Cycles: SSS and Volume rendering in split kernel
[blender.git] / intern / cycles / kernel / split / kernel_subsurface_scatter.h
1
2
3 CCL_NAMESPACE_BEGIN
4
5
6 ccl_device void kernel_subsurface_scatter(KernelGlobals *kg)
7 {
8 #ifdef __SUBSURFACE__
9
10         ccl_local unsigned int local_queue_atomics;
11         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
12                 local_queue_atomics = 0;
13         }
14         ccl_barrier(CCL_LOCAL_MEM_FENCE);
15
16         int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
17         ray_index = get_ray_index(kg, ray_index,
18                                   QUEUE_ACTIVE_AND_REGENERATED_RAYS,
19                                   kernel_split_state.queue_data,
20                                   kernel_split_params.queue_size,
21                                   0);
22
23 #ifdef __COMPUTE_DEVICE_GPU__
24         /* If we are executing on a GPU device, we exit all threads that are not
25          * required.
26          *
27          * If we are executing on a CPU device, then we need to keep all threads
28          * active since we have barrier() calls later in the kernel. CPU devices,
29          * expect all threads to execute barrier statement.
30          */
31         if(ray_index == QUEUE_EMPTY_SLOT) {
32                 return;
33         }
34 #endif
35
36 #ifndef __COMPUTE_DEVICE_GPU__
37         if(ray_index != QUEUE_EMPTY_SLOT) {
38 #endif
39
40
41         char enqueue_flag = 0;
42         ccl_global char *ray_state = kernel_split_state.ray_state;
43         ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
44         PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
45         ccl_global RNG *rng = &kernel_split_state.rng[ray_index];
46         ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
47         ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
48         ccl_global SubsurfaceIndirectRays *ss_indirect = &kernel_split_state.ss_rays[ray_index];
49         ShaderData *sd = &kernel_split_state.sd[ray_index];
50         ShaderData *emission_sd = &kernel_split_state.sd_DL_shadow[ray_index];
51
52         if(IS_STATE(ray_state, ray_index, RAY_ACTIVE)) {
53                 if(sd->flag & SD_BSSRDF) {
54                         if(kernel_path_subsurface_scatter(kg,
55                                                           sd,
56                                                           emission_sd,
57                                                           L,
58                                                           state,
59                                                           rng,
60                                                           ray,
61                                                           throughput,
62                                                           ss_indirect)) {
63                                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER);
64                                 enqueue_flag = 1;
65                         }
66                 }
67         }
68
69 #ifndef __COMPUTE_DEVICE_GPU__
70         }
71 #endif
72
73         /* Enqueue RAY_UPDATE_BUFFER rays. */
74         enqueue_ray_index_local(ray_index,
75                                 QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
76                                 enqueue_flag,
77                                 kernel_split_params.queue_size,
78                                 &local_queue_atomics,
79                                 kernel_split_state.queue_data,
80                                 kernel_split_params.queue_index);
81
82 #endif  /* __SUBSURFACE__ */
83
84 }
85
86 CCL_NAMESPACE_END