Cycles: don't count volume boundaries as transparent bounces.
[blender-staging.git] / intern / cycles / kernel / split / kernel_next_iteration_setup.h
1 /*
2  * Copyright 2011-2015 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /*This kernel takes care of setting up ray for the next iteration of
20  * path-iteration and accumulating radiance corresponding to AO and
21  * direct-lighting
22  *
23  * Ray state of rays that are terminated in this kernel are changed
24  * to RAY_UPDATE_BUFFER.
25  *
26  * Note on queues:
27  * This kernel fetches rays from the queue QUEUE_ACTIVE_AND_REGENERATED_RAYS
28  * and processes only the rays of state RAY_ACTIVE.
29  * There are different points in this kernel where a ray may terminate and
30  * reach RAY_UPDATE_BUFF state. These rays are enqueued into
31  * QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS queue. These rays will still be present
32  * in QUEUE_ACTIVE_AND_REGENERATED_RAYS queue, but since their ray-state has
33  * been changed to RAY_UPDATE_BUFF, there is no problem.
34  *
35  * State of queues when this kernel is called:
36  * At entry,
37  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
38  *     RAY_REGENERATED, RAY_UPDATE_BUFFER rays.
39  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
40  *     RAY_TO_REGENERATE and RAY_UPDATE_BUFFER rays.
41  * At exit,
42  *   - QUEUE_ACTIVE_AND_REGENERATED_RAYS will be filled with RAY_ACTIVE,
43  *     RAY_REGENERATED and more RAY_UPDATE_BUFFER rays.
44  *   - QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS will be filled with
45  *     RAY_TO_REGENERATE and more RAY_UPDATE_BUFFER rays.
46  */
47
48 #ifdef __BRANCHED_PATH__
49 ccl_device_inline void kernel_split_branched_indirect_light_init(KernelGlobals *kg, int ray_index)
50 {
51         kernel_split_branched_path_indirect_loop_init(kg, ray_index);
52
53         ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_LIGHT_INDIRECT);
54 }
55
56 ccl_device void kernel_split_branched_transparent_bounce(KernelGlobals *kg, int ray_index)
57 {
58         ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
59         ShaderData *sd = kernel_split_sd(sd, ray_index);
60         ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
61         ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
62
63 #  ifdef __VOLUME__
64         if(!(sd->flag & SD_HAS_ONLY_VOLUME)) {
65 #  endif
66                 /* continue in case of transparency */
67                 *throughput *= shader_bsdf_transparency(kg, sd);
68
69                 if(is_zero(*throughput)) {
70                         kernel_split_path_end(kg, ray_index);
71                         return;
72                 }
73
74                 /* Update Path State */
75                 path_state_next(kg, state, LABEL_TRANSPARENT);
76 #  ifdef __VOLUME__
77         }
78         else {
79                 /* For volume bounding meshes we pass through without counting transparent
80                  * bounces, only sanity check in case self intersection gets us stuck. */
81                 state->volume_bounds_bounce++;
82                 if (state->volume_bounds_bounce > VOLUME_BOUNDS_MAX) {
83                         kernel_split_path_end(kg, ray_index);
84                         return;
85                 }
86         }
87 #  endif
88
89         ray->P = ray_offset(sd->P, -sd->Ng);
90         ray->t -= sd->ray_length; /* clipping works through transparent */
91
92 #  ifdef __RAY_DIFFERENTIALS__
93         ray->dP = sd->dP;
94         ray->dD.dx = -sd->dI.dx;
95         ray->dD.dy = -sd->dI.dy;
96 #  endif  /* __RAY_DIFFERENTIALS__ */
97
98 #  ifdef __VOLUME__
99         /* enter/exit volume */
100         kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
101 #  endif  /* __VOLUME__ */
102 }
103 #endif  /* __BRANCHED_PATH__ */
104
105 ccl_device void kernel_next_iteration_setup(KernelGlobals *kg,
106                                             ccl_local_param unsigned int *local_queue_atomics)
107 {
108         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
109                 *local_queue_atomics = 0;
110         }
111         ccl_barrier(CCL_LOCAL_MEM_FENCE);
112
113         if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
114                 /* If we are here, then it means that scene-intersect kernel
115                 * has already been executed atleast once. From the next time,
116                 * scene-intersect kernel may operate on queues to fetch ray index
117                 */
118                 *kernel_split_params.use_queues_flag = 1;
119
120                 /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
121                  * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
122                  * previous kernel.
123                  */
124                 kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
125                 kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
126         }
127
128         int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
129         ray_index = get_ray_index(kg, ray_index,
130                                   QUEUE_ACTIVE_AND_REGENERATED_RAYS,
131                                   kernel_split_state.queue_data,
132                                   kernel_split_params.queue_size,
133                                   0);
134
135         ccl_global char *ray_state = kernel_split_state.ray_state;
136
137 #  ifdef __VOLUME__
138         /* Reactivate only volume rays here, most surface work was skipped. */
139         if(IS_STATE(ray_state, ray_index, RAY_HAS_ONLY_VOLUME)) {
140                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_ACTIVE);
141         }
142 #  endif
143
144         bool active = IS_STATE(ray_state, ray_index, RAY_ACTIVE);
145         if(active) {
146                 ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
147                 ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
148                 ShaderData *sd = kernel_split_sd(sd, ray_index);
149                 ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
150                 PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
151
152 #ifdef __BRANCHED_PATH__
153                 if(!kernel_data.integrator.branched || IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
154 #endif
155                         /* Compute direct lighting and next bounce. */
156                         if(!kernel_path_surface_bounce(kg, sd, throughput, state, &L->state, ray)) {
157                                 kernel_split_path_end(kg, ray_index);
158                         }
159 #ifdef __BRANCHED_PATH__
160                 }
161                 else if(sd->flag & SD_HAS_ONLY_VOLUME) {
162                         kernel_split_branched_transparent_bounce(kg, ray_index);
163                 }
164                 else {
165                         kernel_split_branched_indirect_light_init(kg, ray_index);
166
167                         if(kernel_split_branched_path_surface_indirect_light_iter(kg,
168                                                                                   ray_index,
169                                                                                   1.0f,
170                                                                                   kernel_split_sd(branched_state_sd, ray_index),
171                                                                                   true,
172                                                                                   true))
173                         {
174                                 ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
175                         }
176                         else {
177                                 kernel_split_branched_path_indirect_loop_end(kg, ray_index);
178                                 kernel_split_branched_transparent_bounce(kg, ray_index);
179                         }
180                 }
181 #endif  /* __BRANCHED_PATH__ */
182         }
183
184         /* Enqueue RAY_UPDATE_BUFFER rays. */
185         enqueue_ray_index_local(ray_index,
186                                 QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
187                                 IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER) && active,
188                                 kernel_split_params.queue_size,
189                                 local_queue_atomics,
190                                 kernel_split_state.queue_data,
191                                 kernel_split_params.queue_index);
192
193 #ifdef __BRANCHED_PATH__
194         /* iter loop */
195         if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
196                 kernel_split_params.queue_index[QUEUE_LIGHT_INDIRECT_ITER] = 0;
197         }
198
199         ray_index = get_ray_index(kg, ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
200                                   QUEUE_LIGHT_INDIRECT_ITER,
201                                   kernel_split_state.queue_data,
202                                   kernel_split_params.queue_size,
203                                   1);
204
205         if(IS_STATE(ray_state, ray_index, RAY_LIGHT_INDIRECT_NEXT_ITER)) {
206                 /* for render passes, sum and reset indirect light pass variables
207                  * for the next samples */
208                 PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
209
210                 path_radiance_sum_indirect(L);
211                 path_radiance_reset_indirect(L);
212
213                 if(kernel_split_branched_path_surface_indirect_light_iter(kg,
214                                                                           ray_index,
215                                                                           1.0f,
216                                                                           kernel_split_sd(branched_state_sd, ray_index),
217                                                                           true,
218                                                                           true))
219                 {
220                         ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
221                 }
222                 else {
223                         kernel_split_branched_path_indirect_loop_end(kg, ray_index);
224                         kernel_split_branched_transparent_bounce(kg, ray_index);
225                 }
226         }
227
228 #  ifdef __VOLUME__
229         /* Enqueue RAY_VOLUME_INDIRECT_NEXT_ITER rays */
230         ccl_barrier(CCL_LOCAL_MEM_FENCE);
231         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
232                 *local_queue_atomics = 0;
233         }
234         ccl_barrier(CCL_LOCAL_MEM_FENCE);
235
236         ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
237         enqueue_ray_index_local(ray_index,
238                                 QUEUE_VOLUME_INDIRECT_ITER,
239                                 IS_STATE(kernel_split_state.ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER),
240                                 kernel_split_params.queue_size,
241                                 local_queue_atomics,
242                                 kernel_split_state.queue_data,
243                                 kernel_split_params.queue_index);
244
245 #  endif  /* __VOLUME__ */
246
247 #  ifdef __SUBSURFACE__
248         /* Enqueue RAY_SUBSURFACE_INDIRECT_NEXT_ITER rays */
249         ccl_barrier(CCL_LOCAL_MEM_FENCE);
250         if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
251                 *local_queue_atomics = 0;
252         }
253         ccl_barrier(CCL_LOCAL_MEM_FENCE);
254
255         ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
256         enqueue_ray_index_local(ray_index,
257                                 QUEUE_SUBSURFACE_INDIRECT_ITER,
258                                 IS_STATE(kernel_split_state.ray_state, ray_index, RAY_SUBSURFACE_INDIRECT_NEXT_ITER),
259                                 kernel_split_params.queue_size,
260                                 local_queue_atomics,
261                                 kernel_split_state.queue_data,
262                                 kernel_split_params.queue_index);
263 #  endif  /* __SUBSURFACE__ */
264 #endif  /* __BRANCHED_PATH__ */
265 }
266
267 CCL_NAMESPACE_END