Cycles: Speedup transparent shadows in split kernel
[blender-staging.git] / intern / cycles / kernel / kernel_shadow.h
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /* Attenuate throughput accordingly to the given intersection event.
20  * Returns true if the throughput is zero and traversal can be aborted.
21  */
22 ccl_device_forceinline bool shadow_handle_transparent_isect(
23         KernelGlobals *kg,
24         ShaderData *shadow_sd,
25         ccl_addr_space PathState *state,
26 #    ifdef __VOLUME__
27         ccl_addr_space struct PathState *volume_state,
28 #    endif
29         Intersection *isect,
30         Ray *ray,
31         float3 *throughput)
32 {
33 #ifdef __VOLUME__
34         /* Attenuation between last surface and next surface. */
35         if(volume_state->volume_stack[0].shader != SHADER_NONE) {
36                 Ray segment_ray = *ray;
37                 segment_ray.t = isect->t;
38                 kernel_volume_shadow(kg,
39                                      shadow_sd,
40                                      volume_state,
41                                      &segment_ray,
42                                      throughput);
43         }
44 #endif
45         /* Setup shader data at surface. */
46         shader_setup_from_ray(kg, shadow_sd, isect, ray);
47         /* Attenuation from transparent surface. */
48         if(!(shadow_sd->flag & SD_HAS_ONLY_VOLUME)) {
49                 path_state_modify_bounce(state, true);
50                 shader_eval_surface(kg,
51                                     shadow_sd,
52                                     NULL,
53                                     state,
54                                     0.0f,
55                                     PATH_RAY_SHADOW,
56                                     SHADER_CONTEXT_SHADOW);
57                 path_state_modify_bounce(state, false);
58                 *throughput *= shader_bsdf_transparency(kg, shadow_sd);
59         }
60         /* Stop if all light is blocked. */
61         if(is_zero(*throughput)) {
62                 return true;
63         }
64 #ifdef __VOLUME__
65         /* Exit/enter volume. */
66         kernel_volume_stack_enter_exit(kg, shadow_sd, volume_state->volume_stack);
67 #endif
68         return false;
69 }
70
71 /* Special version which only handles opaque shadows. */
72 ccl_device bool shadow_blocked_opaque(KernelGlobals *kg,
73                                       ShaderData *shadow_sd,
74                                       ccl_addr_space PathState *state,
75                                       Ray *ray,
76                                       Intersection *isect,
77                                       float3 *shadow)
78 {
79         const bool blocked = scene_intersect(kg,
80                                              *ray,
81                                              PATH_RAY_SHADOW_OPAQUE,
82                                              isect,
83                                              NULL,
84                                              0.0f, 0.0f);
85 #ifdef __VOLUME__
86         if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
87                 /* Apply attenuation from current volume shader. */
88                 kernel_volume_shadow(kg, shadow_sd, state, ray, shadow);
89         }
90 #endif
91         return blocked;
92 }
93
94 #ifdef __TRANSPARENT_SHADOWS__
95 #  ifdef __SHADOW_RECORD_ALL__
96 /* Shadow function to compute how much light is blocked,
97  *
98  * We trace a single ray. If it hits any opaque surface, or more than a given
99  * number of transparent surfaces is hit, then we consider the geometry to be
100  * entirely blocked. If not, all transparent surfaces will be recorded and we
101  * will shade them one by one to determine how much light is blocked. This all
102  * happens in one scene intersection function.
103  *
104  * Recording all hits works well in some cases but may be slower in others. If
105  * we have many semi-transparent hairs, one intersection may be faster because
106  * you'd be reinteresecting the same hairs a lot with each step otherwise. If
107  * however there is mostly binary transparency then we may be recording many
108  * unnecessary intersections when one of the first surfaces blocks all light.
109  *
110  * From tests in real scenes it seems the performance loss is either minimal,
111  * or there is a performance increase anyway due to avoiding the need to send
112  * two rays with transparent shadows.
113  *
114  * On CPU it'll handle all transparent bounces (by allocating storage for
115  * intersections when they don't fit into the stack storage).
116  *
117  * On GPU it'll only handle SHADOW_STACK_MAX_HITS-1 intersections, so this
118  * is something to be kept an eye on.
119  */
120
121 #    define SHADOW_STACK_MAX_HITS 64
122
123 /* Actual logic with traversal loop implementation which is free from device
124  * specific tweaks.
125  *
126  * Note that hits array should be as big as max_hits+1.
127  */
128 ccl_device bool shadow_blocked_transparent_all_loop(KernelGlobals *kg,
129                                                     ShaderData *shadow_sd,
130                                                     ccl_addr_space PathState *state,
131                                                     Ray *ray,
132                                                     Intersection *hits,
133                                                     uint max_hits,
134                                                     float3 *shadow)
135 {
136         /* Intersect to find an opaque surface, or record all transparent
137          * surface hits.
138          */
139         uint num_hits;
140         const bool blocked = scene_intersect_shadow_all(kg,
141                                                         ray,
142                                                         hits,
143                                                         max_hits,
144                                                         &num_hits);
145         /* If no opaque surface found but we did find transparent hits,
146          * shade them.
147          */
148         if(!blocked && num_hits > 0) {
149                 float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
150                 float3 Pend = ray->P + ray->D*ray->t;
151                 float last_t = 0.0f;
152                 int bounce = state->transparent_bounce;
153                 Intersection *isect = hits;
154 #    ifdef __VOLUME__
155 #      ifdef __SPLIT_KERNEL__
156                 ccl_addr_space PathState *ps = &kernel_split_state.state_shadow[ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0)];
157 #      else
158                 PathState ps_object;
159                 PathState *ps = &ps_object;
160 #      endif
161                 *ps = *state;
162 #    endif
163                 sort_intersections(hits, num_hits);
164                 for(int hit = 0; hit < num_hits; hit++, isect++) {
165                         /* Adjust intersection distance for moving ray forward. */
166                         float new_t = isect->t;
167                         isect->t -= last_t;
168                         /* Skip hit if we did not move forward, step by step raytracing
169                          * would have skipped it as well then.
170                          */
171                         if(last_t == new_t) {
172                                 continue;
173                         }
174                         last_t = new_t;
175                         /* Attenuate the throughput. */
176                         if(shadow_handle_transparent_isect(kg,
177                                                            shadow_sd,
178                                                            state,
179 #ifdef __VOLUME__
180                                                            ps,
181 #endif
182                                                            isect,
183                                                            ray,
184                                                            &throughput))
185                         {
186                                 return true;
187                         }
188                         /* Move ray forward. */
189                         ray->P = shadow_sd->P;
190                         if(ray->t != FLT_MAX) {
191                                 ray->D = normalize_len(Pend - ray->P, &ray->t);
192                         }
193                         bounce++;
194                 }
195 #    ifdef __VOLUME__
196                 /* Attenuation for last line segment towards light. */
197                 if(ps->volume_stack[0].shader != SHADER_NONE) {
198                         kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
199                 }
200 #    endif
201                 *shadow = throughput;
202                 return is_zero(throughput);
203         }
204 #    ifdef __VOLUME__
205         if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
206                 /* Apply attenuation from current volume shader/ */
207                 kernel_volume_shadow(kg, shadow_sd, state, ray, shadow);
208         }
209 #    endif
210         return blocked;
211 }
212
213 /* Here we do all device specific trickery before invoking actual traversal
214  * loop to help readability of the actual logic.
215  */
216 ccl_device bool shadow_blocked_transparent_all(KernelGlobals *kg,
217                                                ShaderData *shadow_sd,
218                                                ccl_addr_space PathState *state,
219                                                Ray *ray,
220                                                uint max_hits,
221                                                float3 *shadow)
222 {
223 #    ifdef __SPLIT_KERNEL__
224         Intersection hits_[SHADOW_STACK_MAX_HITS];
225         Intersection *hits = &hits_[0];
226 #    elif defined(__KERNEL_CUDA__)
227         Intersection *hits = kg->hits_stack;
228 #    else
229         Intersection hits_stack[SHADOW_STACK_MAX_HITS];
230         Intersection *hits = hits_stack;
231 #    endif
232 #    ifndef __KERNEL_GPU__
233         /* Prefer to use stack but use dynamic allocation if too deep max hits
234          * we need max_hits + 1 storage space due to the logic in
235          * scene_intersect_shadow_all which will first store and then check if
236          * the limit is exceeded.
237          *
238          * Ignore this on GPU because of slow/unavailable malloc().
239          */
240         if(max_hits + 1 > SHADOW_STACK_MAX_HITS) {
241                 if(kg->transparent_shadow_intersections == NULL) {
242                         const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
243                         kg->transparent_shadow_intersections =
244                                 (Intersection*)malloc(sizeof(Intersection)*(transparent_max_bounce + 1));
245                 }
246                 hits = kg->transparent_shadow_intersections;
247         }
248 #    endif  /* __KERNEL_GPU__ */
249         /* Invoke actual traversal. */
250         return shadow_blocked_transparent_all_loop(kg,
251                                                    shadow_sd,
252                                                    state,
253                                                    ray,
254                                                    hits,
255                                                    max_hits,
256                                                    shadow);
257 }
258 #  endif  /* __SHADOW_RECORD_ALL__ */
259
260 #  if defined(__KERNEL_GPU__) || !defined(__SHADOW_RECORD_ALL__)
261 /* Shadow function to compute how much light is blocked,
262  *
263  * Here we raytrace from one transparent surface to the next step by step.
264  * To minimize overhead in cases where we don't need transparent shadows, we
265  * first trace a regular shadow ray. We check if the hit primitive was
266  * potentially transparent, and only in that case start marching. this gives
267  * one extra ray cast for the cases were we do want transparency.
268  */
269
270 /* This function is only implementing device-independent traversal logic
271  * which requires some precalculation done.
272  */
273 ccl_device bool shadow_blocked_transparent_stepped_loop(
274         KernelGlobals *kg,
275         ShaderData *shadow_sd,
276         ccl_addr_space PathState *state,
277         Ray *ray,
278         Intersection *isect,
279         const bool blocked,
280         const bool is_transparent_isect,
281         float3 *shadow)
282 {
283         if(blocked && is_transparent_isect) {
284                 float3 throughput = make_float3(1.0f, 1.0f, 1.0f);
285                 float3 Pend = ray->P + ray->D*ray->t;
286                 int bounce = state->transparent_bounce;
287 #    ifdef __VOLUME__
288 #      ifdef __SPLIT_KERNEL__
289                 ccl_addr_space PathState *ps = &kernel_split_state.state_shadow[ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0)];
290 #      else
291                 PathState ps_object;
292                 PathState *ps = &ps_object;
293 #      endif
294                 *ps = *state;
295 #    endif
296                 for(;;) {
297                         if(bounce >= kernel_data.integrator.transparent_max_bounce) {
298                                 return true;
299                         }
300                         if(!scene_intersect(kg,
301                                             *ray,
302                                             PATH_RAY_SHADOW_TRANSPARENT,
303                                             isect,
304                                             NULL,
305                                             0.0f, 0.0f))
306                         {
307                                 break;
308                         }
309                         if(!shader_transparent_shadow(kg, isect)) {
310                                 return true;
311                         }
312                         /* Attenuate the throughput. */
313                         if(shadow_handle_transparent_isect(kg,
314                                                            shadow_sd,
315                                                            state,
316 #ifdef __VOLUME__
317                                                            ps,
318 #endif
319                                                            isect,
320                                                            ray,
321                                                            &throughput))
322                         {
323                                 return true;
324                         }
325                         /* Move ray forward. */
326                         ray->P = ray_offset(shadow_sd->P, -shadow_sd->Ng);
327                         if(ray->t != FLT_MAX) {
328                                 ray->D = normalize_len(Pend - ray->P, &ray->t);
329                         }
330                         bounce++;
331                 }
332 #    ifdef __VOLUME__
333                 /* Attenuation for last line segment towards light. */
334                 if(ps->volume_stack[0].shader != SHADER_NONE) {
335                         kernel_volume_shadow(kg, shadow_sd, ps, ray, &throughput);
336                 }
337 #    endif
338                 *shadow *= throughput;
339                 return is_zero(throughput);
340         }
341 #    ifdef __VOLUME__
342         if(!blocked && state->volume_stack[0].shader != SHADER_NONE) {
343                 /* Apply attenuation from current volume shader. */
344                 kernel_volume_shadow(kg, shadow_sd, state, ray, shadow);
345         }
346 #    endif
347         return blocked;
348 }
349
350 ccl_device bool shadow_blocked_transparent_stepped(
351         KernelGlobals *kg,
352         ShaderData *shadow_sd,
353         ccl_addr_space PathState *state,
354         Ray *ray,
355         Intersection *isect,
356         float3 *shadow)
357 {
358         const bool blocked = scene_intersect(kg,
359                                              *ray,
360                                              PATH_RAY_SHADOW_OPAQUE,
361                                              isect,
362                                              NULL,
363                                              0.0f, 0.0f);
364         const bool is_transparent_isect = blocked
365                 ? shader_transparent_shadow(kg, isect)
366                 : false;
367         return shadow_blocked_transparent_stepped_loop(kg,
368                                                        shadow_sd,
369                                                        state,
370                                                        ray,
371                                                        isect,
372                                                        blocked,
373                                                        is_transparent_isect,
374                                                        shadow);
375 }
376
377 #  endif  /* __KERNEL_GPU__ || !__SHADOW_RECORD_ALL__ */
378 #endif /* __TRANSPARENT_SHADOWS__ */
379
380 ccl_device_inline bool shadow_blocked(KernelGlobals *kg,
381                                       ShaderData *shadow_sd,
382                                       ccl_addr_space PathState *state,
383                                       Ray *ray_input,
384                                       float3 *shadow)
385 {
386         Ray *ray = ray_input;
387         Intersection isect;
388         /* Some common early checks. */
389         *shadow = make_float3(1.0f, 1.0f, 1.0f);
390         if(ray->t == 0.0f) {
391                 return false;
392         }
393         /* Do actual shadow shading. */
394         /* First of all, we check if integrator requires transparent shadows.
395          * if not, we use simplest and fastest ever way to calculate occlusion.
396          */
397 #ifdef __TRANSPARENT_SHADOWS__
398         if(!kernel_data.integrator.transparent_shadows)
399 #endif
400         {
401                 return shadow_blocked_opaque(kg,
402                                              shadow_sd,
403                                              state,
404                                              ray,
405                                              &isect,
406                                              shadow);
407         }
408 #ifdef __TRANSPARENT_SHADOWS__
409 #  ifdef __SHADOW_RECORD_ALL__
410         /* For the transparent shadows we try to use record-all logic on the
411          * devices which supports this.
412          */
413         const int transparent_max_bounce = kernel_data.integrator.transparent_max_bounce;
414         /* Check transparent bounces here, for volume scatter which can do
415          * lighting before surface path termination is checked.
416          */
417         if(state->transparent_bounce >= transparent_max_bounce) {
418                 return true;
419         }
420         const uint max_hits = transparent_max_bounce - state->transparent_bounce - 1;
421 #    ifdef __KERNEL_GPU__
422         /* On GPU we do trickey with tracing opaque ray first, this avoids speed
423          * regressions in some files.
424          *
425          * TODO(sergey): Check why using record-all behavior causes slowdown in such
426          * cases. Could that be caused by a higher spill pressure?
427          */
428         const bool blocked = scene_intersect(kg,
429                                              *ray,
430                                              PATH_RAY_SHADOW_OPAQUE,
431                                              &isect,
432                                              NULL,
433                                              0.0f, 0.0f);
434         const bool is_transparent_isect = blocked
435                 ? shader_transparent_shadow(kg, &isect)
436                 : false;
437         if(!blocked || !is_transparent_isect ||
438            max_hits + 1 >= SHADOW_STACK_MAX_HITS)
439         {
440                 return shadow_blocked_transparent_stepped_loop(kg,
441                                                                shadow_sd,
442                                                                state,
443                                                                ray,
444                                                                &isect,
445                                                                blocked,
446                                                                is_transparent_isect,
447                                                                shadow);
448         }
449 #    endif  /* __KERNEL_GPU__ */
450         return shadow_blocked_transparent_all(kg,
451                                               shadow_sd,
452                                               state,
453                                               ray,
454                                               max_hits,
455                                               shadow);
456 #  else  /* __SHADOW_RECORD_ALL__ */
457         /* Fallback to a slowest version which works on all devices. */
458         return shadow_blocked_transparent_stepped(kg,
459                                                   shadow_sd,
460                                                   state,
461                                                   ray,
462                                                   &isect,
463                                                   shadow);
464 #  endif  /* __SHADOW_RECORD_ALL__ */
465 #endif  /* __TRANSPARENT_SHADOWS__ */
466 }
467
468 #undef SHADOW_STACK_MAX_HITS
469
470 CCL_NAMESPACE_END