Cycles: Cleanup
authorSergey Sharybin <sergey.vfx@gmail.com>
Thu, 27 Sep 2018 12:47:52 +0000 (14:47 +0200)
committerSergey Sharybin <sergey.vfx@gmail.com>
Thu, 27 Sep 2018 12:49:37 +0000 (14:49 +0200)
intern/cycles/bvh/bvh8.cpp
intern/cycles/kernel/bvh/obvh_traversal.h
intern/cycles/kernel/bvh/qbvh_traversal.h

index 8db06c318632d421615b60e8fe54a85cde263207..b95fe572e27986f3a9187b182cb1bc3fc564ffb5 100644 (file)
@@ -124,6 +124,7 @@ void BVH8::pack_aligned_node(int idx,
        data[0].a = __uint_as_float(visibility & ~PATH_RAY_NODE_UNALIGNED);
        data[0].b = time_from;
        data[0].c = time_to;
+
        for(int i = 0; i < num; i++) {
                float3 bb_min = bounds[i].min;
                float3 bb_max = bounds[i].max;
@@ -140,8 +141,8 @@ void BVH8::pack_aligned_node(int idx,
 
        for(int i = num; i < 8; i++) {
                /* We store BB which would never be recorded as intersection
-               * so kernel might safely assume there are always 4 child nodes.
-               */
+                * so kernel might safely assume there are always 4 child nodes.
+                */
                data[1][i] = FLT_MAX;
                data[2][i] = -FLT_MAX;
 
@@ -153,6 +154,7 @@ void BVH8::pack_aligned_node(int idx,
 
                data[7][i] = __int_as_float(0);
        }
+
        memcpy(&pack.nodes[idx], data, sizeof(float4)*BVH_ONODE_SIZE);
 }
 
@@ -189,6 +191,7 @@ void BVH8::pack_unaligned_node(int idx,
 {
        float8 data[BVH_UNALIGNED_ONODE_SIZE];
        memset(data, 0, sizeof(data));
+
        data[0].a = __uint_as_float(visibility | PATH_RAY_NODE_UNALIGNED);
        data[0].b = time_from;
        data[0].c = time_to;
index 2021d8e1143b47c8a08b98d5261c5c36301b9073..98cd8f000ba12dee5564d14f1d35476e5e7afd3b 100644 (file)
@@ -179,7 +179,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(OBVH)(KernelGlobals *kg,
                                        avxf cnodes;
                                        /* TODO(sergey): Investigate whether moving cnodes upwards
                                         * gives a speedup (will be different cache pattern but will
-                                        * avoid extra check here),
+                                        * avoid extra check here).
                                         */
 #if BVH_FEATURE(BVH_HAIR)
                                        if(__float_as_uint(inodes.x) & PATH_RAY_NODE_UNALIGNED) {
index 335a4afd47a3957ef2c02486f029b8b4ab11377f..b36689b2a2ee02bc67d76333efc8a666fa122199 100644 (file)
@@ -188,7 +188,7 @@ ccl_device bool BVH_FUNCTION_FULL_NAME(QBVH)(KernelGlobals *kg,
                                        float4 cnodes;
                                        /* TODO(sergey): Investigate whether moving cnodes upwards
                                         * gives a speedup (will be different cache pattern but will
-                                        * avoid extra check here),
+                                        * avoid extra check here).
                                         */
 #if BVH_FEATURE(BVH_HAIR)
                                        if(__float_as_uint(inodes.x) & PATH_RAY_NODE_UNALIGNED) {