ClangFormat: apply to source, most of intern
[blender.git] / intern / cycles / kernel / split / kernel_next_iteration_setup.h
index e388955..781ce86 100644 (file)
@@ -48,217 +48,211 @@ CCL_NAMESPACE_BEGIN
 #ifdef __BRANCHED_PATH__
 ccl_device_inline void kernel_split_branched_indirect_light_init(KernelGlobals *kg, int ray_index)
 {
-       kernel_split_branched_path_indirect_loop_init(kg, ray_index);
+  kernel_split_branched_path_indirect_loop_init(kg, ray_index);
 
-       ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_LIGHT_INDIRECT);
+  ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_LIGHT_INDIRECT);
 }
 
 ccl_device void kernel_split_branched_transparent_bounce(KernelGlobals *kg, int ray_index)
 {
-       ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
-       ShaderData *sd = kernel_split_sd(sd, ray_index);
-       ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
-       ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
+  ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
+  ShaderData *sd = kernel_split_sd(sd, ray_index);
+  ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+  ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
 
 #  ifdef __VOLUME__
-       if(!(sd->flag & SD_HAS_ONLY_VOLUME)) {
+  if (!(sd->flag & SD_HAS_ONLY_VOLUME)) {
 #  endif
-               /* continue in case of transparency */
-               *throughput *= shader_bsdf_transparency(kg, sd);
+    /* continue in case of transparency */
+    *throughput *= shader_bsdf_transparency(kg, sd);
 
-               if(is_zero(*throughput)) {
-                       kernel_split_path_end(kg, ray_index);
-                       return;
-               }
+    if (is_zero(*throughput)) {
+      kernel_split_path_end(kg, ray_index);
+      return;
+    }
 
-               /* Update Path State */
-               path_state_next(kg, state, LABEL_TRANSPARENT);
+    /* Update Path State */
+    path_state_next(kg, state, LABEL_TRANSPARENT);
 #  ifdef __VOLUME__
-       }
-       else {
-               if(!path_state_volume_next(kg, state)) {
-                       kernel_split_path_end(kg, ray_index);
-                       return;
-               }
-       }
+  }
+  else {
+    if (!path_state_volume_next(kg, state)) {
+      kernel_split_path_end(kg, ray_index);
+      return;
+    }
+  }
 #  endif
 
-       ray->P = ray_offset(sd->P, -sd->Ng);
-       ray->t -= sd->ray_length; /* clipping works through transparent */
+  ray->P = ray_offset(sd->P, -sd->Ng);
+  ray->t -= sd->ray_length; /* clipping works through transparent */
 
 #  ifdef __RAY_DIFFERENTIALS__
-       ray->dP = sd->dP;
-       ray->dD.dx = -sd->dI.dx;
-       ray->dD.dy = -sd->dI.dy;
-#  endif  /* __RAY_DIFFERENTIALS__ */
+  ray->dP = sd->dP;
+  ray->dD.dx = -sd->dI.dx;
+  ray->dD.dy = -sd->dI.dy;
+#  endif /* __RAY_DIFFERENTIALS__ */
 
 #  ifdef __VOLUME__
-       /* enter/exit volume */
-       kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
-#  endif  /* __VOLUME__ */
+  /* enter/exit volume */
+  kernel_volume_stack_enter_exit(kg, sd, state->volume_stack);
+#  endif /* __VOLUME__ */
 }
-#endif  /* __BRANCHED_PATH__ */
+#endif /* __BRANCHED_PATH__ */
 
 ccl_device void kernel_next_iteration_setup(KernelGlobals *kg,
                                             ccl_local_param unsigned int *local_queue_atomics)
 {
-       if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
-               *local_queue_atomics = 0;
-       }
-       ccl_barrier(CCL_LOCAL_MEM_FENCE);
-
-       if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
-               /* If we are here, then it means that scene-intersect kernel
-               * has already been executed atleast once. From the next time,
-               * scene-intersect kernel may operate on queues to fetch ray index
-               */
-               *kernel_split_params.use_queues_flag = 1;
-
-               /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
-                * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
-                * previous kernel.
-                */
-               kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
-               kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
-       }
-
-       int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
-       ray_index = get_ray_index(kg, ray_index,
-                                 QUEUE_ACTIVE_AND_REGENERATED_RAYS,
-                                 kernel_split_state.queue_data,
-                                 kernel_split_params.queue_size,
-                                 0);
-
-       ccl_global char *ray_state = kernel_split_state.ray_state;
-
-#  ifdef __VOLUME__
-       /* Reactivate only volume rays here, most surface work was skipped. */
-       if(IS_STATE(ray_state, ray_index, RAY_HAS_ONLY_VOLUME)) {
-               ASSIGN_RAY_STATE(ray_state, ray_index, RAY_ACTIVE);
-       }
-#  endif
+  if (ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+    *local_queue_atomics = 0;
+  }
+  ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+  if (ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
+    /* If we are here, then it means that scene-intersect kernel
+    * has already been executed atleast once. From the next time,
+    * scene-intersect kernel may operate on queues to fetch ray index
+    */
+    *kernel_split_params.use_queues_flag = 1;
+
+    /* Mark queue indices of QUEUE_SHADOW_RAY_CAST_AO_RAYS and
+     * QUEUE_SHADOW_RAY_CAST_DL_RAYS queues that were made empty during the
+     * previous kernel.
+     */
+    kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_AO_RAYS] = 0;
+    kernel_split_params.queue_index[QUEUE_SHADOW_RAY_CAST_DL_RAYS] = 0;
+  }
+
+  int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+  ray_index = get_ray_index(kg,
+                            ray_index,
+                            QUEUE_ACTIVE_AND_REGENERATED_RAYS,
+                            kernel_split_state.queue_data,
+                            kernel_split_params.queue_size,
+                            0);
+
+  ccl_global char *ray_state = kernel_split_state.ray_state;
+
+#ifdef __VOLUME__
+  /* Reactivate only volume rays here, most surface work was skipped. */
+  if (IS_STATE(ray_state, ray_index, RAY_HAS_ONLY_VOLUME)) {
+    ASSIGN_RAY_STATE(ray_state, ray_index, RAY_ACTIVE);
+  }
+#endif
 
-       bool active = IS_STATE(ray_state, ray_index, RAY_ACTIVE);
-       if(active) {
-               ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
-               ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
-               ShaderData *sd = kernel_split_sd(sd, ray_index);
-               ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
-               PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
+  bool active = IS_STATE(ray_state, ray_index, RAY_ACTIVE);
+  if (active) {
+    ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
+    ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
+    ShaderData *sd = kernel_split_sd(sd, ray_index);
+    ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
+    PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
 
 #ifdef __BRANCHED_PATH__
-               if(!kernel_data.integrator.branched || IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
+    if (!kernel_data.integrator.branched || IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
 #endif
-                       /* Compute direct lighting and next bounce. */
-                       if(!kernel_path_surface_bounce(kg, sd, throughput, state, &L->state, ray)) {
-                               kernel_split_path_end(kg, ray_index);
-                       }
+      /* Compute direct lighting and next bounce. */
+      if (!kernel_path_surface_bounce(kg, sd, throughput, state, &L->state, ray)) {
+        kernel_split_path_end(kg, ray_index);
+      }
 #ifdef __BRANCHED_PATH__
-               }
-               else if(sd->flag & SD_HAS_ONLY_VOLUME) {
-                       kernel_split_branched_transparent_bounce(kg, ray_index);
-               }
-               else {
-                       kernel_split_branched_indirect_light_init(kg, ray_index);
-
-                       if(kernel_split_branched_path_surface_indirect_light_iter(kg,
-                                                                                 ray_index,
-                                                                                 1.0f,
-                                                                                 kernel_split_sd(branched_state_sd, ray_index),
-                                                                                 true,
-                                                                                 true))
-                       {
-                               ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
-                       }
-                       else {
-                               kernel_split_branched_path_indirect_loop_end(kg, ray_index);
-                               kernel_split_branched_transparent_bounce(kg, ray_index);
-                       }
-               }
-#endif  /* __BRANCHED_PATH__ */
-       }
-
-       /* Enqueue RAY_UPDATE_BUFFER rays. */
-       enqueue_ray_index_local(ray_index,
-                               QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
-                               IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER) && active,
-                               kernel_split_params.queue_size,
-                               local_queue_atomics,
-                               kernel_split_state.queue_data,
-                               kernel_split_params.queue_index);
+    }
+    else if (sd->flag & SD_HAS_ONLY_VOLUME) {
+      kernel_split_branched_transparent_bounce(kg, ray_index);
+    }
+    else {
+      kernel_split_branched_indirect_light_init(kg, ray_index);
+
+      if (kernel_split_branched_path_surface_indirect_light_iter(
+              kg, ray_index, 1.0f, kernel_split_sd(branched_state_sd, ray_index), true, true)) {
+        ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
+      }
+      else {
+        kernel_split_branched_path_indirect_loop_end(kg, ray_index);
+        kernel_split_branched_transparent_bounce(kg, ray_index);
+      }
+    }
+#endif /* __BRANCHED_PATH__ */
+  }
+
+  /* Enqueue RAY_UPDATE_BUFFER rays. */
+  enqueue_ray_index_local(ray_index,
+                          QUEUE_HITBG_BUFF_UPDATE_TOREGEN_RAYS,
+                          IS_STATE(ray_state, ray_index, RAY_UPDATE_BUFFER) && active,
+                          kernel_split_params.queue_size,
+                          local_queue_atomics,
+                          kernel_split_state.queue_data,
+                          kernel_split_params.queue_index);
 
 #ifdef __BRANCHED_PATH__
-       /* iter loop */
-       if(ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
-               kernel_split_params.queue_index[QUEUE_LIGHT_INDIRECT_ITER] = 0;
-       }
-
-       ray_index = get_ray_index(kg, ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
-                                 QUEUE_LIGHT_INDIRECT_ITER,
-                                 kernel_split_state.queue_data,
-                                 kernel_split_params.queue_size,
-                                 1);
-
-       if(IS_STATE(ray_state, ray_index, RAY_LIGHT_INDIRECT_NEXT_ITER)) {
-               /* for render passes, sum and reset indirect light pass variables
-                * for the next samples */
-               PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
-
-               path_radiance_sum_indirect(L);
-               path_radiance_reset_indirect(L);
-
-               if(kernel_split_branched_path_surface_indirect_light_iter(kg,
-                                                                         ray_index,
-                                                                         1.0f,
-                                                                         kernel_split_sd(branched_state_sd, ray_index),
-                                                                         true,
-                                                                         true))
-               {
-                       ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
-               }
-               else {
-                       kernel_split_branched_path_indirect_loop_end(kg, ray_index);
-                       kernel_split_branched_transparent_bounce(kg, ray_index);
-               }
-       }
+  /* iter loop */
+  if (ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
+    kernel_split_params.queue_index[QUEUE_LIGHT_INDIRECT_ITER] = 0;
+  }
+
+  ray_index = get_ray_index(kg,
+                            ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
+                            QUEUE_LIGHT_INDIRECT_ITER,
+                            kernel_split_state.queue_data,
+                            kernel_split_params.queue_size,
+                            1);
+
+  if (IS_STATE(ray_state, ray_index, RAY_LIGHT_INDIRECT_NEXT_ITER)) {
+    /* for render passes, sum and reset indirect light pass variables
+     * for the next samples */
+    PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
+
+    path_radiance_sum_indirect(L);
+    path_radiance_reset_indirect(L);
+
+    if (kernel_split_branched_path_surface_indirect_light_iter(
+            kg, ray_index, 1.0f, kernel_split_sd(branched_state_sd, ray_index), true, true)) {
+      ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
+    }
+    else {
+      kernel_split_branched_path_indirect_loop_end(kg, ray_index);
+      kernel_split_branched_transparent_bounce(kg, ray_index);
+    }
+  }
 
 #  ifdef __VOLUME__
-       /* Enqueue RAY_VOLUME_INDIRECT_NEXT_ITER rays */
-       ccl_barrier(CCL_LOCAL_MEM_FENCE);
-       if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
-               *local_queue_atomics = 0;
-       }
-       ccl_barrier(CCL_LOCAL_MEM_FENCE);
-
-       ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
-       enqueue_ray_index_local(ray_index,
-                               QUEUE_VOLUME_INDIRECT_ITER,
-                               IS_STATE(kernel_split_state.ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER),
-                               kernel_split_params.queue_size,
-                               local_queue_atomics,
-                               kernel_split_state.queue_data,
-                               kernel_split_params.queue_index);
-
-#  endif  /* __VOLUME__ */
+  /* Enqueue RAY_VOLUME_INDIRECT_NEXT_ITER rays */
+  ccl_barrier(CCL_LOCAL_MEM_FENCE);
+  if (ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+    *local_queue_atomics = 0;
+  }
+  ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+  ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+  enqueue_ray_index_local(
+      ray_index,
+      QUEUE_VOLUME_INDIRECT_ITER,
+      IS_STATE(kernel_split_state.ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER),
+      kernel_split_params.queue_size,
+      local_queue_atomics,
+      kernel_split_state.queue_data,
+      kernel_split_params.queue_index);
+
+#  endif /* __VOLUME__ */
 
 #  ifdef __SUBSURFACE__
-       /* Enqueue RAY_SUBSURFACE_INDIRECT_NEXT_ITER rays */
-       ccl_barrier(CCL_LOCAL_MEM_FENCE);
-       if(ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
-               *local_queue_atomics = 0;
-       }
-       ccl_barrier(CCL_LOCAL_MEM_FENCE);
-
-       ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
-       enqueue_ray_index_local(ray_index,
-                               QUEUE_SUBSURFACE_INDIRECT_ITER,
-                               IS_STATE(kernel_split_state.ray_state, ray_index, RAY_SUBSURFACE_INDIRECT_NEXT_ITER),
-                               kernel_split_params.queue_size,
-                               local_queue_atomics,
-                               kernel_split_state.queue_data,
-                               kernel_split_params.queue_index);
-#  endif  /* __SUBSURFACE__ */
-#endif  /* __BRANCHED_PATH__ */
+  /* Enqueue RAY_SUBSURFACE_INDIRECT_NEXT_ITER rays */
+  ccl_barrier(CCL_LOCAL_MEM_FENCE);
+  if (ccl_local_id(0) == 0 && ccl_local_id(1) == 0) {
+    *local_queue_atomics = 0;
+  }
+  ccl_barrier(CCL_LOCAL_MEM_FENCE);
+
+  ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
+  enqueue_ray_index_local(
+      ray_index,
+      QUEUE_SUBSURFACE_INDIRECT_ITER,
+      IS_STATE(kernel_split_state.ray_state, ray_index, RAY_SUBSURFACE_INDIRECT_NEXT_ITER),
+      kernel_split_params.queue_size,
+      local_queue_atomics,
+      kernel_split_state.queue_data,
+      kernel_split_params.queue_index);
+#  endif /* __SUBSURFACE__ */
+#endif   /* __BRANCHED_PATH__ */
 }
 
 CCL_NAMESPACE_END