ClangFormat: apply to source, most of intern
[blender.git] / intern / cycles / kernel / bvh / obvh_traversal.h
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 /* This is a template BVH traversal function, where various features can be
18  * enabled/disabled. This way we can compile optimized versions for each case
19  * without new features slowing things down.
20  *
21  * BVH_INSTANCING: object instancing
22  * BVH_HAIR: hair curve rendering
23  * BVH_HAIR_MINIMUM_WIDTH: hair curve rendering with minimum width
24  * BVH_MOTION: motion blur rendering
25  */
26
27 #if BVH_FEATURE(BVH_HAIR)
28 #  define NODE_INTERSECT obvh_node_intersect
29 #  define NODE_INTERSECT_ROBUST obvh_node_intersect_robust
30 #else
31 #  define NODE_INTERSECT obvh_aligned_node_intersect
32 #  define NODE_INTERSECT_ROBUST obvh_aligned_node_intersect_robust
33 #endif
34
35 ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg,
36                                              const Ray *ray,
37                                              Intersection *isect,
38                                              const uint visibility
39 #if BVH_FEATURE(BVH_HAIR_MINIMUM_WIDTH)
40                                              ,
41                                              uint *lcg_state,
42                                              float difl,
43                                              float extmax
44 #endif
45 )
46 {
47   /* Traversal stack in CUDA thread-local memory. */
48   OBVHStackItem traversal_stack[BVH_OSTACK_SIZE];
49   traversal_stack[0].addr = ENTRYPOINT_SENTINEL;
50   traversal_stack[0].dist = -FLT_MAX;
51
52   /* Traversal variables in registers. */
53   int stack_ptr = 0;
54   int node_addr = kernel_data.bvh.root;
55   float node_dist = -FLT_MAX;
56
57   /* Ray parameters in registers. */
58   float3 P = ray->P;
59   float3 dir = bvh_clamp_direction(ray->D);
60   float3 idir = bvh_inverse_direction(dir);
61   int object = OBJECT_NONE;
62
63 #if BVH_FEATURE(BVH_MOTION)
64   Transform ob_itfm;
65 #endif
66
67   isect->t = ray->t;
68   isect->u = 0.0f;
69   isect->v = 0.0f;
70   isect->prim = PRIM_NONE;
71   isect->object = OBJECT_NONE;
72
73   BVH_DEBUG_INIT();
74   avxf tnear(0.0f), tfar(ray->t);
75 #if BVH_FEATURE(BVH_HAIR)
76   avx3f dir4(avxf(dir.x), avxf(dir.y), avxf(dir.z));
77 #endif
78   avx3f idir4(avxf(idir.x), avxf(idir.y), avxf(idir.z));
79
80 #ifdef __KERNEL_AVX2__
81   float3 P_idir = P * idir;
82   avx3f P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
83 #endif
84 #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
85   avx3f org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
86 #endif
87
88   /* Offsets to select the side that becomes the lower or upper bound. */
89   int near_x, near_y, near_z;
90   int far_x, far_y, far_z;
91   obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
92   /* Traversal loop. */
93   do {
94     do {
95       /* Traverse internal nodes. */
96       while (node_addr >= 0 && node_addr != ENTRYPOINT_SENTINEL) {
97         float4 inodes = kernel_tex_fetch(__bvh_nodes, node_addr + 0);
98         (void)inodes;
99
100         if (UNLIKELY(node_dist > isect->t)
101 #if BVH_FEATURE(BVH_MOTION)
102             || UNLIKELY(ray->time < inodes.y) || UNLIKELY(ray->time > inodes.z)
103 #endif
104 #ifdef __VISIBILITY_FLAG__
105             || (__float_as_uint(inodes.x) & visibility) == 0
106 #endif
107         ) {
108           /* Pop. */
109           node_addr = traversal_stack[stack_ptr].addr;
110           node_dist = traversal_stack[stack_ptr].dist;
111           --stack_ptr;
112           continue;
113         }
114
115         int child_mask;
116         avxf dist;
117
118         BVH_DEBUG_NEXT_NODE();
119
120 #if BVH_FEATURE(BVH_HAIR_MINIMUM_WIDTH)
121         if (difl != 0.0f) {
122           /* NOTE: We extend all the child BB instead of fetching
123            * and checking visibility flags for each of the,
124            *
125            * Need to test if doing opposite would be any faster.
126            */
127           child_mask = NODE_INTERSECT_ROBUST(kg,
128                                              tnear,
129                                              tfar,
130 #  ifdef __KERNEL_AVX2__
131                                              P_idir4,
132 #  endif
133 #  if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
134                                              org4,
135 #  endif
136 #  if BVH_FEATURE(BVH_HAIR)
137                                              dir4,
138 #  endif
139                                              idir4,
140                                              near_x,
141                                              near_y,
142                                              near_z,
143                                              far_x,
144                                              far_y,
145                                              far_z,
146                                              node_addr,
147                                              difl,
148                                              &dist);
149         }
150         else
151 #endif /* BVH_HAIR_MINIMUM_WIDTH */
152         {
153           child_mask = NODE_INTERSECT(kg,
154                                       tnear,
155                                       tfar,
156 #ifdef __KERNEL_AVX2__
157                                       P_idir4,
158 #endif
159 #if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
160                                       org4,
161 #endif
162 #if BVH_FEATURE(BVH_HAIR)
163                                       dir4,
164 #endif
165                                       idir4,
166                                       near_x,
167                                       near_y,
168                                       near_z,
169                                       far_x,
170                                       far_y,
171                                       far_z,
172                                       node_addr,
173                                       &dist);
174         }
175
176         if (child_mask != 0) {
177           avxf cnodes;
178           /* TODO(sergey): Investigate whether moving cnodes upwards
179            * gives a speedup (will be different cache pattern but will
180            * avoid extra check here).
181            */
182 #if BVH_FEATURE(BVH_HAIR)
183           if (__float_as_uint(inodes.x) & PATH_RAY_NODE_UNALIGNED) {
184             cnodes = kernel_tex_fetch_avxf(__bvh_nodes, node_addr + 26);
185           }
186           else
187 #endif
188           {
189             cnodes = kernel_tex_fetch_avxf(__bvh_nodes, node_addr + 14);
190           }
191
192           /* One child is hit, continue with that child. */
193           int r = __bscf(child_mask);
194           float d0 = ((float *)&dist)[r];
195           if (child_mask == 0) {
196             node_addr = __float_as_int(cnodes[r]);
197             node_dist = d0;
198             continue;
199           }
200
201           /* Two children are hit, push far child, and continue with
202            * closer child.
203            */
204           int c0 = __float_as_int(cnodes[r]);
205           r = __bscf(child_mask);
206           int c1 = __float_as_int(cnodes[r]);
207           float d1 = ((float *)&dist)[r];
208           if (child_mask == 0) {
209             if (d1 < d0) {
210               node_addr = c1;
211               node_dist = d1;
212               ++stack_ptr;
213               kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
214               traversal_stack[stack_ptr].addr = c0;
215               traversal_stack[stack_ptr].dist = d0;
216               continue;
217             }
218             else {
219               node_addr = c0;
220               node_dist = d0;
221               ++stack_ptr;
222               kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
223               traversal_stack[stack_ptr].addr = c1;
224               traversal_stack[stack_ptr].dist = d1;
225               continue;
226             }
227           }
228
229           /* Here starts the slow path for 3 or 4 hit children. We push
230            * all nodes onto the stack to sort them there.
231            */
232           ++stack_ptr;
233           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
234           traversal_stack[stack_ptr].addr = c1;
235           traversal_stack[stack_ptr].dist = d1;
236           ++stack_ptr;
237           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
238           traversal_stack[stack_ptr].addr = c0;
239           traversal_stack[stack_ptr].dist = d0;
240
241           /* Three children are hit, push all onto stack and sort 3
242            * stack items, continue with closest child.
243            */
244           r = __bscf(child_mask);
245           int c2 = __float_as_int(cnodes[r]);
246           float d2 = ((float *)&dist)[r];
247           if (child_mask == 0) {
248             ++stack_ptr;
249             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
250             traversal_stack[stack_ptr].addr = c2;
251             traversal_stack[stack_ptr].dist = d2;
252             obvh_stack_sort(&traversal_stack[stack_ptr],
253                             &traversal_stack[stack_ptr - 1],
254                             &traversal_stack[stack_ptr - 2]);
255             node_addr = traversal_stack[stack_ptr].addr;
256             node_dist = traversal_stack[stack_ptr].dist;
257             --stack_ptr;
258             continue;
259           }
260
261           /* Four children are hit, push all onto stack and sort 4
262            * stack items, continue with closest child.
263            */
264           r = __bscf(child_mask);
265           int c3 = __float_as_int(cnodes[r]);
266           float d3 = ((float *)&dist)[r];
267           if (child_mask == 0) {
268             ++stack_ptr;
269             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
270             traversal_stack[stack_ptr].addr = c3;
271             traversal_stack[stack_ptr].dist = d3;
272             ++stack_ptr;
273             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
274             traversal_stack[stack_ptr].addr = c2;
275             traversal_stack[stack_ptr].dist = d2;
276             obvh_stack_sort(&traversal_stack[stack_ptr],
277                             &traversal_stack[stack_ptr - 1],
278                             &traversal_stack[stack_ptr - 2],
279                             &traversal_stack[stack_ptr - 3]);
280             node_addr = traversal_stack[stack_ptr].addr;
281             node_dist = traversal_stack[stack_ptr].dist;
282             --stack_ptr;
283             continue;
284           }
285
286           ++stack_ptr;
287           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
288           traversal_stack[stack_ptr].addr = c3;
289           traversal_stack[stack_ptr].dist = d3;
290           ++stack_ptr;
291           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
292           traversal_stack[stack_ptr].addr = c2;
293           traversal_stack[stack_ptr].dist = d2;
294
295           /* Five children are hit, push all onto stack and sort 5
296            * stack items, continue with closest child.
297            */
298           r = __bscf(child_mask);
299           int c4 = __float_as_int(cnodes[r]);
300           float d4 = ((float *)&dist)[r];
301           if (child_mask == 0) {
302             ++stack_ptr;
303             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
304             traversal_stack[stack_ptr].addr = c4;
305             traversal_stack[stack_ptr].dist = d4;
306             obvh_stack_sort(&traversal_stack[stack_ptr],
307                             &traversal_stack[stack_ptr - 1],
308                             &traversal_stack[stack_ptr - 2],
309                             &traversal_stack[stack_ptr - 3],
310                             &traversal_stack[stack_ptr - 4]);
311             node_addr = traversal_stack[stack_ptr].addr;
312             node_dist = traversal_stack[stack_ptr].dist;
313             --stack_ptr;
314             continue;
315           }
316
317           /* Six children are hit, push all onto stack and sort 6
318            * stack items, continue with closest child.
319            */
320           r = __bscf(child_mask);
321           int c5 = __float_as_int(cnodes[r]);
322           float d5 = ((float *)&dist)[r];
323           if (child_mask == 0) {
324             ++stack_ptr;
325             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
326             traversal_stack[stack_ptr].addr = c5;
327             traversal_stack[stack_ptr].dist = d5;
328             ++stack_ptr;
329             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
330             traversal_stack[stack_ptr].addr = c4;
331             traversal_stack[stack_ptr].dist = d4;
332             obvh_stack_sort(&traversal_stack[stack_ptr],
333                             &traversal_stack[stack_ptr - 1],
334                             &traversal_stack[stack_ptr - 2],
335                             &traversal_stack[stack_ptr - 3],
336                             &traversal_stack[stack_ptr - 4],
337                             &traversal_stack[stack_ptr - 5]);
338             node_addr = traversal_stack[stack_ptr].addr;
339             node_dist = traversal_stack[stack_ptr].dist;
340             --stack_ptr;
341             continue;
342           }
343
344           ++stack_ptr;
345           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
346           traversal_stack[stack_ptr].addr = c5;
347           traversal_stack[stack_ptr].dist = d5;
348           ++stack_ptr;
349           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
350           traversal_stack[stack_ptr].addr = c4;
351           traversal_stack[stack_ptr].dist = d4;
352
353           /* Seven children are hit, push all onto stack and sort 7
354            * stack items, continue with closest child.
355            */
356           r = __bscf(child_mask);
357           int c6 = __float_as_int(cnodes[r]);
358           float d6 = ((float *)&dist)[r];
359           if (child_mask == 0) {
360             ++stack_ptr;
361             kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
362             traversal_stack[stack_ptr].addr = c6;
363             traversal_stack[stack_ptr].dist = d6;
364             obvh_stack_sort(&traversal_stack[stack_ptr],
365                             &traversal_stack[stack_ptr - 1],
366                             &traversal_stack[stack_ptr - 2],
367                             &traversal_stack[stack_ptr - 3],
368                             &traversal_stack[stack_ptr - 4],
369                             &traversal_stack[stack_ptr - 5],
370                             &traversal_stack[stack_ptr - 6]);
371             node_addr = traversal_stack[stack_ptr].addr;
372             node_dist = traversal_stack[stack_ptr].dist;
373             --stack_ptr;
374             continue;
375           }
376
377           /* Eight children are hit, push all onto stack and sort 8
378           * stack items, continue with closest child.
379           */
380           r = __bscf(child_mask);
381           int c7 = __float_as_int(cnodes[r]);
382           float d7 = ((float *)&dist)[r];
383           ++stack_ptr;
384           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
385           traversal_stack[stack_ptr].addr = c7;
386           traversal_stack[stack_ptr].dist = d7;
387           ++stack_ptr;
388           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
389           traversal_stack[stack_ptr].addr = c6;
390           traversal_stack[stack_ptr].dist = d6;
391           obvh_stack_sort(&traversal_stack[stack_ptr],
392                           &traversal_stack[stack_ptr - 1],
393                           &traversal_stack[stack_ptr - 2],
394                           &traversal_stack[stack_ptr - 3],
395                           &traversal_stack[stack_ptr - 4],
396                           &traversal_stack[stack_ptr - 5],
397                           &traversal_stack[stack_ptr - 6],
398                           &traversal_stack[stack_ptr - 7]);
399           node_addr = traversal_stack[stack_ptr].addr;
400           node_dist = traversal_stack[stack_ptr].dist;
401           --stack_ptr;
402           continue;
403         }
404
405         node_addr = traversal_stack[stack_ptr].addr;
406         node_dist = traversal_stack[stack_ptr].dist;
407         --stack_ptr;
408       }
409
410       /* If node is leaf, fetch triangle list. */
411       if (node_addr < 0) {
412         float4 leaf = kernel_tex_fetch(__bvh_leaf_nodes, (-node_addr - 1));
413
414 #ifdef __VISIBILITY_FLAG__
415         if (UNLIKELY((node_dist > isect->t) || ((__float_as_uint(leaf.z) & visibility) == 0)))
416 #else
417         if (UNLIKELY((node_dist > isect->t)))
418 #endif
419         {
420           /* Pop. */
421           node_addr = traversal_stack[stack_ptr].addr;
422           node_dist = traversal_stack[stack_ptr].dist;
423           --stack_ptr;
424           continue;
425         }
426         int prim_addr = __float_as_int(leaf.x);
427
428 #if BVH_FEATURE(BVH_INSTANCING)
429         if (prim_addr >= 0) {
430 #endif
431           int prim_addr2 = __float_as_int(leaf.y);
432           const uint type = __float_as_int(leaf.w);
433
434           /* Pop. */
435           node_addr = traversal_stack[stack_ptr].addr;
436           node_dist = traversal_stack[stack_ptr].dist;
437           --stack_ptr;
438
439           /* Primitive intersection. */
440           switch (type & PRIMITIVE_ALL) {
441             case PRIMITIVE_TRIANGLE: {
442               int prim_count = prim_addr2 - prim_addr;
443               if (prim_count < 3) {
444                 for (; prim_addr < prim_addr2; prim_addr++) {
445                   BVH_DEBUG_NEXT_INTERSECTION();
446                   kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
447                   if (triangle_intersect(kg, isect, P, dir, visibility, object, prim_addr)) {
448                     tfar = avxf(isect->t);
449                     /* Shadow ray early termination. */
450                     if (visibility == PATH_RAY_SHADOW_OPAQUE) {
451                       return true;
452                     }
453                   }
454                 }  //for
455               }
456               else {
457                 kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
458                 if (triangle_intersect8(kg,
459                                         &isect,
460                                         P,
461                                         dir,
462                                         visibility,
463                                         object,
464                                         prim_addr,
465                                         prim_count,
466                                         0,
467                                         0,
468                                         NULL,
469                                         0.0f)) {
470                   tfar = avxf(isect->t);
471                   if (visibility == PATH_RAY_SHADOW_OPAQUE) {
472                     return true;
473                   }
474                 }
475               }  //prim count
476               break;
477             }
478 #if BVH_FEATURE(BVH_MOTION)
479             case PRIMITIVE_MOTION_TRIANGLE: {
480               for (; prim_addr < prim_addr2; prim_addr++) {
481                 BVH_DEBUG_NEXT_INTERSECTION();
482                 kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
483                 if (motion_triangle_intersect(
484                         kg, isect, P, dir, ray->time, visibility, object, prim_addr)) {
485                   tfar = avxf(isect->t);
486                   /* Shadow ray early termination. */
487                   if (visibility == PATH_RAY_SHADOW_OPAQUE) {
488                     return true;
489                   }
490                 }
491               }
492               break;
493             }
494 #endif /* BVH_FEATURE(BVH_MOTION) */
495 #if BVH_FEATURE(BVH_HAIR)
496             case PRIMITIVE_CURVE:
497             case PRIMITIVE_MOTION_CURVE: {
498               for (; prim_addr < prim_addr2; prim_addr++) {
499                 BVH_DEBUG_NEXT_INTERSECTION();
500                 const uint curve_type = kernel_tex_fetch(__prim_type, prim_addr);
501                 kernel_assert((curve_type & PRIMITIVE_ALL) == (type & PRIMITIVE_ALL));
502                 bool hit;
503                 if (kernel_data.curve.curveflags & CURVE_KN_INTERPOLATE) {
504                   hit = cardinal_curve_intersect(kg,
505                                                  isect,
506                                                  P,
507                                                  dir,
508                                                  visibility,
509                                                  object,
510                                                  prim_addr,
511                                                  ray->time,
512                                                  curve_type,
513                                                  lcg_state,
514                                                  difl,
515                                                  extmax);
516                 }
517                 else {
518                   hit = curve_intersect(kg,
519                                         isect,
520                                         P,
521                                         dir,
522                                         visibility,
523                                         object,
524                                         prim_addr,
525                                         ray->time,
526                                         curve_type,
527                                         lcg_state,
528                                         difl,
529                                         extmax);
530                 }
531                 if (hit) {
532                   tfar = avxf(isect->t);
533                   /* Shadow ray early termination. */
534                   if (visibility == PATH_RAY_SHADOW_OPAQUE) {
535                     return true;
536                   }
537                 }
538               }
539               break;
540             }
541 #endif /* BVH_FEATURE(BVH_HAIR) */
542           }
543         }
544 #if BVH_FEATURE(BVH_INSTANCING)
545         else {
546           /* Instance push. */
547           object = kernel_tex_fetch(__prim_object, -prim_addr - 1);
548
549 #  if BVH_FEATURE(BVH_MOTION)
550           qbvh_instance_motion_push(
551               kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist, &ob_itfm);
552 #  else
553           qbvh_instance_push(kg, object, ray, &P, &dir, &idir, &isect->t, &node_dist);
554 #  endif
555
556           obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
557           tfar = avxf(isect->t);
558 #  if BVH_FEATURE(BVH_HAIR)
559           dir4 = avx3f(avxf(dir.x), avxf(dir.y), avxf(dir.z));
560 #  endif
561           idir4 = avx3f(avxf(idir.x), avxf(idir.y), avxf(idir.z));
562 #  ifdef __KERNEL_AVX2__
563           P_idir = P * idir;
564           P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
565 #  endif
566 #  if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
567           org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
568 #  endif
569
570           ++stack_ptr;
571           kernel_assert(stack_ptr < BVH_OSTACK_SIZE);
572           traversal_stack[stack_ptr].addr = ENTRYPOINT_SENTINEL;
573           traversal_stack[stack_ptr].dist = -FLT_MAX;
574
575           node_addr = kernel_tex_fetch(__object_node, object);
576
577           BVH_DEBUG_NEXT_INSTANCE();
578         }
579       }
580 #endif /* FEATURE(BVH_INSTANCING) */
581     } while (node_addr != ENTRYPOINT_SENTINEL);
582
583 #if BVH_FEATURE(BVH_INSTANCING)
584     if (stack_ptr >= 0) {
585       kernel_assert(object != OBJECT_NONE);
586
587       /* Instance pop. */
588 #  if BVH_FEATURE(BVH_MOTION)
589       isect->t = bvh_instance_motion_pop(kg, object, ray, &P, &dir, &idir, isect->t, &ob_itfm);
590 #  else
591       isect->t = bvh_instance_pop(kg, object, ray, &P, &dir, &idir, isect->t);
592 #  endif
593
594       obvh_near_far_idx_calc(idir, &near_x, &near_y, &near_z, &far_x, &far_y, &far_z);
595       tfar = avxf(isect->t);
596 #  if BVH_FEATURE(BVH_HAIR)
597       dir4 = avx3f(avxf(dir.x), avxf(dir.y), avxf(dir.z));
598 #  endif
599       idir4 = avx3f(avxf(idir.x), avxf(idir.y), avxf(idir.z));
600 #  ifdef __KERNEL_AVX2__
601       P_idir = P * idir;
602       P_idir4 = avx3f(P_idir.x, P_idir.y, P_idir.z);
603 #  endif
604 #  if BVH_FEATURE(BVH_HAIR) || !defined(__KERNEL_AVX2__)
605       org4 = avx3f(avxf(P.x), avxf(P.y), avxf(P.z));
606 #  endif
607
608       object = OBJECT_NONE;
609       node_addr = traversal_stack[stack_ptr].addr;
610       node_dist = traversal_stack[stack_ptr].dist;
611       --stack_ptr;
612     }
613 #endif /* FEATURE(BVH_INSTANCING) */
614   } while (node_addr != ENTRYPOINT_SENTINEL);
615
616   return (isect->prim != PRIM_NONE);
617 }
618
619 #undef NODE_INTERSECT
620 #undef NODE_INTERSECT_ROBUST