Cycles: use defensive sampling for picking BSDFs and BSSRDFs.
authorBrecht Van Lommel <brechtvanlommel@gmail.com>
Wed, 13 Sep 2017 16:28:31 +0000 (18:28 +0200)
committerBrecht Van Lommel <brechtvanlommel@gmail.com>
Wed, 20 Sep 2017 17:38:08 +0000 (19:38 +0200)
For the first bounce we now give each BSDF or BSSRDF a minimum sample weight,
which helps reduce noise for a typical case where you have a glossy BSDF with
a small weight due to Fresnel, but not necessarily small contribution relative
to a diffuse or transmission BSDF below.

We can probably find a better heuristic that also enables this on further
bounces, for example when looking through a perfect mirror, but I wasn't able
to find a robust one so far.

intern/cycles/kernel/kernel_path.h
intern/cycles/kernel/kernel_shader.h
intern/cycles/kernel/split/kernel_shader_eval.h

index 2df21f1cda36bf90cb177cdd15b3ff64d7197b9e..d43d6374c13f85d9f21d74dd29476505e2b2fbe4 100644 (file)
@@ -435,9 +435,7 @@ ccl_device void kernel_path_indirect(KernelGlobals *kg,
                                      &isect,
                                      ray);
                shader_eval_surface(kg, sd, state, state->flag);
-#ifdef __BRANCHED_PATH__
-               shader_merge_closures(sd);
-#endif  /* __BRANCHED_PATH__ */
+               shader_prepare_closures(sd, state);
 
                /* Apply shadow catcher, holdout, emission. */
                if(!kernel_path_shader_apply(kg,
@@ -588,6 +586,7 @@ ccl_device_forceinline void kernel_path_integrate(
                /* Setup and evaluate shader. */
                shader_setup_from_ray(kg, &sd, &isect, ray);
                shader_eval_surface(kg, &sd, state, state->flag);
+               shader_prepare_closures(&sd, state);
 
                /* Apply shadow catcher, holdout, emission. */
                if(!kernel_path_shader_apply(kg,
index 5ef4475e25930f18aea205fc8164ece30a42eece..bb3add5d7cae4214fde81d534dd2057faca70102 100644 (file)
@@ -494,6 +494,34 @@ ccl_device_inline void shader_merge_closures(ShaderData *sd)
 }
 #endif
 
+/* Defensive sampling. */
+
+ccl_device_inline void shader_prepare_closures(ShaderData *sd,
+                                               ccl_addr_space PathState *state)
+{
+       /* We can likely also do defensive sampling at deeper bounces, particularly
+        * for cases like a perfect mirror but possibly also others. This will need
+        * a good heuristic. */
+       if(state->bounce + state->transparent_bounce == 0 && sd->num_closure > 1) {
+               float sum = 0.0f;
+
+               for(int i = 0; i < sd->num_closure; i++) {
+                       ShaderClosure *sc = &sd->closure[i];
+                       if(CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
+                               sum += sc->sample_weight;
+                       }
+               }
+
+               for(int i = 0; i < sd->num_closure; i++) {
+                       ShaderClosure *sc = &sd->closure[i];
+                       if(CLOSURE_IS_BSDF_OR_BSSRDF(sc->type)) {
+                               sc->sample_weight = max(sc->sample_weight, 0.125f * sum);
+                       }
+               }
+       }
+}
+
+
 /* BSDF */
 
 ccl_device_inline void _shader_bsdf_multi_eval(KernelGlobals *kg, ShaderData *sd, const float3 omega_in, float *pdf,
index c792c4adb0e4d723dbca0ce179c57b39adc91807..7032461b04a83ef294353b6307c76116ee7edd6d 100644 (file)
@@ -52,8 +52,14 @@ ccl_device void kernel_shader_eval(KernelGlobals *kg)
 
                shader_eval_surface(kg, &kernel_split_state.sd[ray_index], state, state->flag);
 #ifdef __BRANCHED_PATH__
-               shader_merge_closures(&kernel_split_state.sd[ray_index]);
-#endif  /* __BRANCHED_PATH__ */
+               if(kernel_data.integrator.branched) {
+                       shader_merge_closures(&kernel_split_state.sd[ray_index]);
+               }
+               else
+#endif
+               {
+                       shader_prepare_closures(&kernel_split_state.sd[ray_index], state);
+               }
        }
 }