2 * Copyright 2011-2013 Blender Foundation
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* This is a template BVH traversal function, where various features can be
18 * enabled/disabled. This way we can compile optimized versions for each case
19 * without new features slowing things down.
21 * BVH_INSTANCING: object instancing
22 * BVH_HAIR: hair curve rendering
23 * BVH_HAIR_MINIMUM_WIDTH: hair curve rendering with minimum width
24 * BVH_MOTION: motion blur rendering
28 #if BVH_FEATURE(BVH_HAIR)
29 # define NODE_INTERSECT qbvh_node_intersect
30 # define NODE_INTERSECT_ROBUST qbvh_node_intersect_robust
32 # define NODE_INTERSECT qbvh_aligned_node_intersect
33 # define NODE_INTERSECT_ROBUST qbvh_aligned_node_intersect_robust
36 ccl_device bool BVH_FUNCTION_FULL_NAME(QBVH)(KernelGlobals *kg,
40 #if BVH_FEATURE(BVH_HAIR_MINIMUM_WIDTH)
48 * - Test if pushing distance on the stack helps (for non shadow rays).
49 * - Separate version for shadow rays.
50 * - Likely and unlikely for if() statements.
51 * - Test restrict attribute for pointers.
54 /* Traversal stack in CUDA thread-local memory. */
55 QBVHStackItem traversal_stack[BVH_QSTACK_SIZE];
56 traversal_stack[0].addr = ENTRYPOINT_SENTINEL;
57 traversal_stack[0].dist = -FLT_MAX;
59 /* Traversal variables in registers. */
61 int node_addr = kernel_data.bvh.root;
62 float node_dist = -FLT_MAX;
64 /* Ray parameters in registers. */
66 float3 dir = bvh_clamp_direction(ray->D);
67 float3 idir = bvh_inverse_direction(dir);
68 int object = OBJECT_NONE;
70 #if BVH_FEATURE(BVH_MOTION)
74 #ifndef __KERNEL_SSE41__
83 isect->prim = PRIM_NONE;
84 isect->object = OBJECT_NONE;
88 ssef tnear(0.0f), tfar(ray->t);
89 #if BVH_FEATURE(BVH_HAIR)
90 sse3f dir4(ssef(dir.x), ssef(dir.y), ssef(dir.z));
92 sse3f idir4(ssef(idir.x), ssef(idir.y), ssef(idir.z));
94 #ifdef __KERNEL_AVX2__
95 float3 P_idir = P*idir;
96 sse3f P_idir4 = sse3f(P_idir.x, P_idir.y, P_idir.z);
98 #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
99 sse3f org4 = sse3f(ssef(P.x), ssef(P.y), ssef(P.z));
102 /* Offsets to select the side that becomes the lower or upper bound. */
103 int near_x, near_y, near_z;
104 int far_x, far_y, far_z;
105 qbvh_near_far_idx_calc(idir,
106 &near_x, &near_y, &near_z,
107 &far_x, &far_y, &far_z);
109 /* Traversal loop. */
112 /* Traverse internal nodes. */
113 while(node_addr >= 0 && node_addr != ENTRYPOINT_SENTINEL) {
114 float4 inodes = kernel_tex_fetch(__bvh_nodes, node_addr+0);
117 if(UNLIKELY(node_dist > isect->t)
118 #if BVH_FEATURE(BVH_MOTION)
119 || UNLIKELY(ray->time < inodes.y)
120 || UNLIKELY(ray->time > inodes.z)
122 #ifdef __VISIBILITY_FLAG__
123 || (__float_as_uint(inodes.x) & visibility) == 0
128 node_addr = traversal_stack[stack_ptr].addr;
129 node_dist = traversal_stack[stack_ptr].dist;
137 BVH_DEBUG_NEXT_NODE();
139 #if BVH_FEATURE(BVH_HAIR_MINIMUM_WIDTH)
141 /* NOTE: We extend all the child BB instead of fetching
142 * and checking visibility flags for each of the,
144 * Need to test if doing opposite would be any faster.
146 child_mask = NODE_INTERSECT_ROBUST(kg,
149 # ifdef __KERNEL_AVX2__
152 # if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
155 # if BVH_FEATURE(BVH_HAIR)
159 near_x, near_y, near_z,
166 #endif /* BVH_HAIR_MINIMUM_WIDTH */
168 child_mask = NODE_INTERSECT(kg,
171 #ifdef __KERNEL_AVX2__
174 #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
177 #if BVH_FEATURE(BVH_HAIR)
181 near_x, near_y, near_z,
187 if(child_mask != 0) {
189 /* TODO(sergey): Investigate whether moving cnodes upwards
190 * gives a speedup (will be different cache pattern but will
191 * avoid extra check here),
193 #if BVH_FEATURE(BVH_HAIR)
194 if(__float_as_uint(inodes.x) & PATH_RAY_NODE_UNALIGNED) {
195 cnodes = kernel_tex_fetch(__bvh_nodes, node_addr+13);
200 cnodes = kernel_tex_fetch(__bvh_nodes, node_addr+7);
203 /* One child is hit, continue with that child. */
204 int r = __bscf(child_mask);
205 float d0 = ((float*)&dist)[r];
206 if(child_mask == 0) {
207 node_addr = __float_as_int(cnodes[r]);
212 /* Two children are hit, push far child, and continue with
215 int c0 = __float_as_int(cnodes[r]);
216 r = __bscf(child_mask);
217 int c1 = __float_as_int(cnodes[r]);
218 float d1 = ((float*)&dist)[r];
219 if(child_mask == 0) {
224 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
225 traversal_stack[stack_ptr].addr = c0;
226 traversal_stack[stack_ptr].dist = d0;
233 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
234 traversal_stack[stack_ptr].addr = c1;
235 traversal_stack[stack_ptr].dist = d1;
240 /* Here starts the slow path for 3 or 4 hit children. We push
241 * all nodes onto the stack to sort them there.
244 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
245 traversal_stack[stack_ptr].addr = c1;
246 traversal_stack[stack_ptr].dist = d1;
248 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
249 traversal_stack[stack_ptr].addr = c0;
250 traversal_stack[stack_ptr].dist = d0;
252 /* Three children are hit, push all onto stack and sort 3
253 * stack items, continue with closest child.
255 r = __bscf(child_mask);
256 int c2 = __float_as_int(cnodes[r]);
257 float d2 = ((float*)&dist)[r];
258 if(child_mask == 0) {
260 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
261 traversal_stack[stack_ptr].addr = c2;
262 traversal_stack[stack_ptr].dist = d2;
263 qbvh_stack_sort(&traversal_stack[stack_ptr],
264 &traversal_stack[stack_ptr - 1],
265 &traversal_stack[stack_ptr - 2]);
266 node_addr = traversal_stack[stack_ptr].addr;
267 node_dist = traversal_stack[stack_ptr].dist;
272 /* Four children are hit, push all onto stack and sort 4
273 * stack items, continue with closest child.
275 r = __bscf(child_mask);
276 int c3 = __float_as_int(cnodes[r]);
277 float d3 = ((float*)&dist)[r];
279 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
280 traversal_stack[stack_ptr].addr = c3;
281 traversal_stack[stack_ptr].dist = d3;
283 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
284 traversal_stack[stack_ptr].addr = c2;
285 traversal_stack[stack_ptr].dist = d2;
286 qbvh_stack_sort(&traversal_stack[stack_ptr],
287 &traversal_stack[stack_ptr - 1],
288 &traversal_stack[stack_ptr - 2],
289 &traversal_stack[stack_ptr - 3]);
292 node_addr = traversal_stack[stack_ptr].addr;
293 node_dist = traversal_stack[stack_ptr].dist;
297 /* If node is leaf, fetch triangle list. */
299 float4 leaf = kernel_tex_fetch(__bvh_leaf_nodes, (-node_addr-1));
301 #ifdef __VISIBILITY_FLAG__
302 if(UNLIKELY((node_dist > isect->t) ||
303 ((__float_as_uint(leaf.z) & visibility) == 0)))
305 if(UNLIKELY((node_dist > isect->t)))
309 node_addr = traversal_stack[stack_ptr].addr;
310 node_dist = traversal_stack[stack_ptr].dist;
315 int prim_addr = __float_as_int(leaf.x);
317 #if BVH_FEATURE(BVH_INSTANCING)
320 int prim_addr2 = __float_as_int(leaf.y);
321 const uint type = __float_as_int(leaf.w);
324 node_addr = traversal_stack[stack_ptr].addr;
325 node_dist = traversal_stack[stack_ptr].dist;
328 /* Primitive intersection. */
329 switch(type & PRIMITIVE_ALL) {
330 case PRIMITIVE_TRIANGLE: {
331 for(; prim_addr < prim_addr2; prim_addr++) {
332 BVH_DEBUG_NEXT_INTERSECTION();
333 kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
334 if(triangle_intersect(kg,
341 tfar = ssef(isect->t);
342 /* Shadow ray early termination. */
343 if(visibility & PATH_RAY_SHADOW_OPAQUE) {
350 #if BVH_FEATURE(BVH_MOTION)
351 case PRIMITIVE_MOTION_TRIANGLE: {
352 for(; prim_addr < prim_addr2; prim_addr++) {
353 BVH_DEBUG_NEXT_INTERSECTION();
354 kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
355 if(motion_triangle_intersect(kg,
363 tfar = ssef(isect->t);
364 /* Shadow ray early termination. */
365 if(visibility & PATH_RAY_SHADOW_OPAQUE) {
372 #endif /* BVH_FEATURE(BVH_MOTION) */
373 #if BVH_FEATURE(BVH_HAIR)
374 case PRIMITIVE_CURVE:
375 case PRIMITIVE_MOTION_CURVE: {
376 for(; prim_addr < prim_addr2; prim_addr++) {
377 BVH_DEBUG_NEXT_INTERSECTION();
378 const uint curve_type = kernel_tex_fetch(__prim_type, prim_addr);
379 kernel_assert((curve_type & PRIMITIVE_ALL) == (type & PRIMITIVE_ALL));
381 if(kernel_data.curve.curveflags & CURVE_KN_INTERPOLATE) {
382 hit = cardinal_curve_intersect(kg,
396 hit = curve_intersect(kg,
410 tfar = ssef(isect->t);
411 /* Shadow ray early termination. */
412 if(visibility & PATH_RAY_SHADOW_OPAQUE) {
419 #endif /* BVH_FEATURE(BVH_HAIR) */
422 #if BVH_FEATURE(BVH_INSTANCING)
425 object = kernel_tex_fetch(__prim_object, -prim_addr-1);
427 # if BVH_FEATURE(BVH_MOTION)
428 qbvh_instance_motion_push(kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist, &ob_itfm);
430 qbvh_instance_push(kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist);
433 qbvh_near_far_idx_calc(idir,
434 &near_x, &near_y, &near_z,
435 &far_x, &far_y, &far_z);
436 tfar = ssef(isect->t);
437 # if BVH_FEATURE(BVH_HAIR)
438 dir4 = sse3f(ssef(dir.x), ssef(dir.y), ssef(dir.z));
440 idir4 = sse3f(ssef(idir.x), ssef(idir.y), ssef(idir.z));
441 # ifdef __KERNEL_AVX2__
443 P_idir4 = sse3f(P_idir.x, P_idir.y, P_idir.z);
445 # if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
446 org4 = sse3f(ssef(P.x), ssef(P.y), ssef(P.z));
450 kernel_assert(stack_ptr < BVH_QSTACK_SIZE);
451 traversal_stack[stack_ptr].addr = ENTRYPOINT_SENTINEL;
452 traversal_stack[stack_ptr].dist = -FLT_MAX;
454 node_addr = kernel_tex_fetch(__object_node, object);
456 BVH_DEBUG_NEXT_INSTANCE();
459 #endif /* FEATURE(BVH_INSTANCING) */
460 } while(node_addr != ENTRYPOINT_SENTINEL);
462 #if BVH_FEATURE(BVH_INSTANCING)
464 kernel_assert(object != OBJECT_NONE);
467 # if BVH_FEATURE(BVH_MOTION)
468 isect->t = bvh_instance_motion_pop(kg, object, ray, &P, &dir, &idir, isect->t, &ob_itfm);
470 isect->t = bvh_instance_pop(kg, object, ray, &P, &dir, &idir, isect->t);
473 qbvh_near_far_idx_calc(idir,
474 &near_x, &near_y, &near_z,
475 &far_x, &far_y, &far_z);
476 tfar = ssef(isect->t);
477 # if BVH_FEATURE(BVH_HAIR)
478 dir4 = sse3f(ssef(dir.x), ssef(dir.y), ssef(dir.z));
480 idir4 = sse3f(ssef(idir.x), ssef(idir.y), ssef(idir.z));
481 # ifdef __KERNEL_AVX2__
483 P_idir4 = sse3f(P_idir.x, P_idir.y, P_idir.z);
485 # if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
486 org4 = sse3f(ssef(P.x), ssef(P.y), ssef(P.z));
489 object = OBJECT_NONE;
490 node_addr = traversal_stack[stack_ptr].addr;
491 node_dist = traversal_stack[stack_ptr].dist;
494 #endif /* FEATURE(BVH_INSTANCING) */
495 } while(node_addr != ENTRYPOINT_SENTINEL);
497 return (isect->prim != PRIM_NONE);
500 #undef NODE_INTERSECT
501 #undef NODE_INTERSECT_ROBUST