c3373174582acae1f6deaa1d042c2707d36ec4be
[blender.git] / intern / cycles / kernel / split / kernel_next_iteration_setup.h
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /*This kernel takes care of setting up ray for the next iteration of
20  * path-iteration and accumulating radiance corresponding to AO and
21  * direct-lighting
22  *
23  * Ray state of rays that are terminated in this kernel are changed
24  * to RAY_UPDATE_BUFFER.
25  *
26  * Note on queues:
27  * This kernel fetches rays from the queue QUEUE_ACTIVE_AND_REGENERATED_RAYS
28  * and processes only the rays of state RAY_ACTIVE.
29  * There are different points in this kernel where a ray may terminate and
30  * reach RAY_UPDATE_BUFF state. These rays are enqueued into
31  * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. These rays will still be present
32  * in QUEUE_ACTIVE_AND_REGENERATED_RAYS queue, but since their ray-state has
33  * been changed to RAY_UPDATE_BUFF, there is no problem.
34  *
35  * State of queues when this kernel is called:
36  * At entry,
37  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
38  *     RAY_REGENERATED, RAY_UPDATE_BUFFER rays.
39  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
40  *     RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
41  * At exit,
42  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
43  *     RAY_REGENERATED and more RAY_UPDATE_BUFFER rays.
44  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
45  *     RAY_TO_REGENERATE and more RAY_UPDATE_BUFFER rays.
46  */
47
48 #ifdef __BRANCHED_PATH__
49 ccl_device_inline void kernel_split_branched_indirect_light_init(KernelGlobals *kg, int ray_index)
50 {
51         kernel_split_branched_path_indirect_loop_init(kg, ray_index);
52
53         ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_LIGHT_INDIRECT);
54 }
55
56 ccl_device void kernel_split_branched_indirect_light_end(KernelGlobals *kg, int ray_index)
57 {
58         kernel_split_branched_path_indirect_loop_end(kg, ray_index);
59
60         ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
61         ShaderData *sd = &kernel_split_state.sd[ray_index];
62         ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
63         ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
64
65         /* continue in case of transparency */
66         *throughput *= shader_bsdf_transparency(kg, sd);
67
68         if(is_zero(*throughput)) {
69                 kernel_split_path_end(kg, ray_index);
70         }
71         else {
72                 /* Update Path State */
73                 state->flag |= PATH_RAY_TRANSPARENT;
74                 state->transparent_bounce++;
75
76                 ray->P = ray_offset(sd->P, -sd->Ng);
77                 ray->t -= sd->ray_length; /* clipping works through transparent */
78
79 #  ifdef __RAY_DIFFERENTIALS__
80                 ray->dP = sd->dP;
81                 ray->dD.dx = -sd->dI.dx;
82                 ray->dD.dy = -sd->dI.dy;
83 #  endif  /* __RAY_DIFFERENTIALS__ */
84
85 #  ifdef __VOLUME__
86                 /* enter/exit volume */
87                 kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
88 #  endif  /* __VOLUME__ */
89         }
90 }
91 #endif  /* __BRANCHED_PATH__ */
92
93 ccl_device void kernel_next_iteration_setup(KernelGlobals *kg,
94                                             ccl_local_param unsigned int *local_queue_atomics)
95 {
96         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
97                 *local_queue_atomics = 0;
98         }
99         ccl_barrier(CCL_LOCAL_MEM_FENCE);
100
101         if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
102                 /* If we are here, then it means that scene-intersect kernel
103                 * has already been executed atleast once. From the next time,
104                 * scene-intersect kernel may operate on queues to fetch ray index
105                 */
106                 *kernel_split_params.use_queues_flag = 1;
107
108                 /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
109                  * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
110                  * previous kernel.
111                  */
112                 kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
113                 kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
114         }
115
116         int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
117         ray_index = get_ray_index(kg, ray_index,
118                                   QUEUE_ACTIVE_AND_REGENERATED_RAYS,
119                                   kernel_split_state.queue_data,
120                                   kernel_split_params.queue_size,
121                                   0);
122
123         ccl_global char *ray_state = kernel_split_state.ray_state;
124
125         bool active = IS_STATE(ray_state, ray_index, RAY_ACTIVE);
126         if(active) {
127                 ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
128                 ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
129                 ShaderData *sd = &kernel_split_state.sd[ray_index];
130                 ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
131                 PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
132
133 #ifdef __BRANCHED_PATH__
134                 if(!kernel_data.integrator.branched || IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
135 #endif
136                         /* Compute direct lighting and next bounce. */
137                         if(!kernel_path_surface_bounce(kg, sd, throughput, state, &L->state, ray)) {
138                                 kernel_split_path_end(kg, ray_index);
139                         }
140 #ifdef __BRANCHED_PATH__
141                 }
142                 else {
143                         kernel_split_branched_indirect_light_init(kg, ray_index);
144
145                         if(kernel_split_branched_path_surface_indirect_light_iter(kg,
146                                                                                   ray_index,
147                                                                                   1.0f,
148                                                                                   &kernel_split_state.branched_state[ray_index].sd,
149                                                                                   true,
150                                                                                   true))
151                         {
152                                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
153                         }
154                         else {
155                                 kernel_split_branched_indirect_light_end(kg, ray_index);
156                         }
157                 }
158 #endif  /* __BRANCHED_PATH__ */
159         }
160
161         /* Enqueue RAY_UPDATE_BUFFER rays. */
162         enqueue_ray_index_local(ray_index,
163                                 QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
164                                 IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER) && active,
165                                 kernel_split_params.queue_size,
166                                 local_queue_atomics,
167                                 kernel_split_state.queue_data,
168                                 kernel_split_params.queue_index);
169
170 #ifdef __BRANCHED_PATH__
171         /* iter loop */
172         if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
173                 kernel_split_params.queue_index[QUEUE_LIGHT_INDIRECT_ITER] = 0;
174         }
175
176         ray_index = get_ray_index(kg, ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
177                                   QUEUE_LIGHT_INDIRECT_ITER,
178                                   kernel_split_state.queue_data,
179                                   kernel_split_params.queue_size,
180                                   1);
181
182         if(IS_STATE(ray_state, ray_index, RAY_LIGHT_INDIRECT_NEXT_ITER)) {
183                 /* for render passes, sum and reset indirect light pass variables
184                  * for the next samples */
185                 PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
186
187                 path_radiance_sum_indirect(L);
188                 path_radiance_reset_indirect(L);
189
190                 if(kernel_split_branched_path_surface_indirect_light_iter(kg,
191                                                                           ray_index,
192                                                                           1.0f,
193                                                                           &kernel_split_state.branched_state[ray_index].sd,
194                                                                           true,
195                                                                           true))
196                 {
197                         ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
198                 }
199                 else {
200                         kernel_split_branched_indirect_light_end(kg, ray_index);
201                 }
202         }
203
204 #  ifdef __VOLUME__
205         /* Enqueue RAY_VOLUME_INDIRECT_NEXT_ITER rays */
206         ccl_barrier(CCL_LOCAL_MEM_FENCE);
207         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
208                 *local_queue_atomics = 0;
209         }
210         ccl_barrier(CCL_LOCAL_MEM_FENCE);
211
212         ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
213         enqueue_ray_index_local(ray_index,
214                                 QUEUE_VOLUME_INDIRECT_ITER,
215                                 IS_STATE(kernel_split_state.ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER),
216                                 kernel_split_params.queue_size,
217                                 local_queue_atomics,
218                                 kernel_split_state.queue_data,
219                                 kernel_split_params.queue_index);
220
221 #  endif  /* __VOLUME__ */
222
223 #  ifdef __SUBSURFACE__
224         /* Enqueue RAY_SUBSURFACE_INDIRECT_NEXT_ITER rays */
225         ccl_barrier(CCL_LOCAL_MEM_FENCE);
226         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
227                 *local_queue_atomics = 0;
228         }
229         ccl_barrier(CCL_LOCAL_MEM_FENCE);
230
231         ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
232         enqueue_ray_index_local(ray_index,
233                                 QUEUE_SUBSURFACE_INDIRECT_ITER,
234                                 IS_STATE(kernel_split_state.ray_state, ray_index, RAY_SUBSURFACE_INDIRECT_NEXT_ITER),
235                                 kernel_split_params.queue_size,
236                                 local_queue_atomics,
237                                 kernel_split_state.queue_data,
238                                 kernel_split_params.queue_index);
239 #  endif  /* __SUBSURFACE__ */
240 #endif  /* __BRANCHED_PATH__ */
241 }
242
243 CCL_NAMESPACE_END