5c65cf78820e0c49ac4d2c73810c30dc9ed49512
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33 #include "BLI_rand.h"
34
35 #include "BKE_node.h"
36 #include "BKE_modifier.h"
37 #include "BKE_particle.h"
38
39 #include "DNA_image_types.h"
40 #include "DNA_mesh_types.h"
41 #include "DNA_modifier_types.h"
42 #include "DNA_node_types.h"
43
44 #include "ED_uvedit.h"
45
46 #include "GPU_shader.h"
47 #include "GPU_texture.h"
48
49 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
50
51 /* *********** STATIC *********** */
52
53 /* #define DEBUG_SHADOW_VOLUME */
54
55 #ifdef DEBUG_SHADOW_VOLUME
56 #  include "draw_debug.h"
57 #endif
58
59 static struct {
60         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
61         struct GPUShader *composite_sh_cache[MAX_SHADERS];
62         struct GPUShader *cavity_sh;
63         struct GPUShader *ghost_resolve_sh;
64         struct GPUShader *shadow_fail_sh;
65         struct GPUShader *shadow_fail_manifold_sh;
66         struct GPUShader *shadow_pass_sh;
67         struct GPUShader *shadow_pass_manifold_sh;
68         struct GPUShader *shadow_caps_sh;
69         struct GPUShader *shadow_caps_manifold_sh;
70
71         struct GPUTexture *ghost_depth_tx; /* ref only, not alloced */
72         struct GPUTexture *object_id_tx; /* ref only, not alloced */
73         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
74         struct GPUTexture *cavity_buffer_tx; /* ref only, not alloced */
75         struct GPUTexture *specular_buffer_tx; /* ref only, not alloced */
76         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
77         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
78
79         SceneDisplay display; /* world light direction for shadows */
80         int next_object_id;
81
82         struct GPUUniformBuffer *sampling_ubo;
83         struct GPUTexture *jitter_tx;
84         int cached_sample_num;
85 } e_data = {{NULL}};
86
87 /* Shaders */
88 extern char datatoc_common_hair_lib_glsl[];
89
90 extern char datatoc_workbench_prepass_vert_glsl[];
91 extern char datatoc_workbench_prepass_frag_glsl[];
92 extern char datatoc_workbench_cavity_frag_glsl[];
93 extern char datatoc_workbench_deferred_composite_frag_glsl[];
94 extern char datatoc_workbench_ghost_resolve_frag_glsl[];
95
96 extern char datatoc_workbench_shadow_vert_glsl[];
97 extern char datatoc_workbench_shadow_geom_glsl[];
98 extern char datatoc_workbench_shadow_caps_geom_glsl[];
99 extern char datatoc_workbench_shadow_debug_frag_glsl[];
100
101 extern char datatoc_workbench_background_lib_glsl[];
102 extern char datatoc_workbench_cavity_lib_glsl[];
103 extern char datatoc_workbench_common_lib_glsl[];
104 extern char datatoc_workbench_data_lib_glsl[];
105 extern char datatoc_workbench_object_outline_lib_glsl[];
106 extern char datatoc_workbench_curvature_lib_glsl[];
107 extern char datatoc_workbench_world_light_lib_glsl[];
108
109 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
110
111 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
112 {
113         char *str = NULL;
114
115         DynStr *ds = BLI_dynstr_new();
116
117         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
118         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
119         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
120
121         if ((wpd->shading.light & V3D_LIGHTING_MATCAP) || (wpd->shading.light & V3D_LIGHTING_STUDIO) || (wpd->shading.flag & V3D_SHADING_SPECULAR_HIGHLIGHT)) {
122                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
123         }
124         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
125                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
126         }
127         if (CURVATURE_ENABLED(wpd)) {
128                 BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
129         }
130
131         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
132
133         str = BLI_dynstr_get_cstring(ds);
134         BLI_dynstr_free(ds);
135         return str;
136 }
137
138 static char *workbench_build_prepass_frag(void)
139 {
140         char *str = NULL;
141
142         DynStr *ds = BLI_dynstr_new();
143
144         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
145         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
146         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
147
148         str = BLI_dynstr_get_cstring(ds);
149         BLI_dynstr_free(ds);
150         return str;
151 }
152
153 static char *workbench_build_prepass_vert(bool is_hair)
154 {
155         char *str = NULL;
156         if (!is_hair) {
157                 return BLI_strdup(datatoc_workbench_prepass_vert_glsl);
158         }
159
160         DynStr *ds = BLI_dynstr_new();
161
162         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
163         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
164
165         str = BLI_dynstr_get_cstring(ds);
166         BLI_dynstr_free(ds);
167         return str;
168 }
169
170 static char *workbench_build_cavity_frag(void)
171 {
172         char *str = NULL;
173
174         DynStr *ds = BLI_dynstr_new();
175
176         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
177         BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
178         BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
179
180         str = BLI_dynstr_get_cstring(ds);
181         BLI_dynstr_free(ds);
182         return str;
183 }
184
185 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, bool use_textures, bool is_hair)
186 {
187         if (e_data.prepass_sh_cache[index] == NULL) {
188                 char *defines = workbench_material_build_defines(wpd, use_textures, is_hair);
189                 char *composite_frag = workbench_build_composite_frag(wpd);
190                 char *prepass_vert = workbench_build_prepass_vert(is_hair);
191                 char *prepass_frag = workbench_build_prepass_frag();
192                 e_data.prepass_sh_cache[index] = DRW_shader_create(
193                         prepass_vert, NULL,
194                         prepass_frag, defines);
195                 if (!use_textures && !is_hair) {
196                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
197                 }
198                 MEM_freeN(prepass_vert);
199                 MEM_freeN(prepass_frag);
200                 MEM_freeN(composite_frag);
201                 MEM_freeN(defines);
202         }
203 }
204
205 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
206 {
207         int index_solid = workbench_material_get_shader_index(wpd, false, false);
208         int index_solid_hair = workbench_material_get_shader_index(wpd, false, true);
209         int index_texture = workbench_material_get_shader_index(wpd, true, false);
210         int index_texture_hair = workbench_material_get_shader_index(wpd, true, true);
211
212         ensure_deferred_shaders(wpd, index_solid, false, false);
213         ensure_deferred_shaders(wpd, index_solid_hair, false, true);
214         ensure_deferred_shaders(wpd, index_texture, true, false);
215         ensure_deferred_shaders(wpd, index_texture_hair, true, true);
216
217         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
218         wpd->prepass_solid_hair_sh = e_data.prepass_sh_cache[index_solid_hair];
219         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
220         wpd->prepass_texture_hair_sh = e_data.prepass_sh_cache[index_texture_hair];
221         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
222 }
223
224
225 /* Using Hammersley distribution */
226 static float *create_disk_samples(int num_samples, int num_iterations)
227 {
228         /* vec4 to ensure memory alignment. */
229         const int total_samples = num_samples * num_iterations;
230         float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * total_samples, __func__);
231         const float num_samples_inv = 1.0f / num_samples;
232
233         for (int i = 0; i < total_samples; i++) {
234                 float it_add = (i / num_samples) * 0.499f;
235                 float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
236                 double dphi;
237                 BLI_hammersley_1D(i, &dphi);
238
239                 float phi = (float)dphi * 2.0f * M_PI + it_add;
240                 texels[i][0] = cosf(phi);
241                 texels[i][1] = sinf(phi);
242                 /* This deliberately distribute more samples
243                  * at the center of the disk (and thus the shadow). */
244                 texels[i][2] = r;
245         }
246
247         return (float *)texels;
248 }
249
250 static struct GPUTexture *create_jitter_texture(int num_samples)
251 {
252         float jitter[64 * 64][3];
253         const float num_samples_inv = 1.0f / num_samples;
254
255         for (int i = 0; i < 64 * 64; i++) {
256                 float phi = blue_noise[i][0] * 2.0f * M_PI;
257                 /* This rotate the sample per pixels */
258                 jitter[i][0] = cosf(phi);
259                 jitter[i][1] = sinf(phi);
260                 /* This offset the sample along it's direction axis (reduce banding) */
261                 float bn = blue_noise[i][1] - 0.5f;
262                 CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
263                 jitter[i][2] = bn * num_samples_inv;
264         }
265
266         UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
267
268         return DRW_texture_create_2D(64, 64, GPU_RGB16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
269 }
270 /* Functions */
271
272
273 static void workbench_init_object_data(DrawData *dd)
274 {
275         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
276         data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
277         data->shadow_bbox_dirty = true;
278 }
279
280 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
281 {
282         WORKBENCH_FramebufferList *fbl = vedata->fbl;
283         WORKBENCH_StorageList *stl = vedata->stl;
284         WORKBENCH_PassList *psl = vedata->psl;
285         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
286         const DRWContextState *draw_ctx = DRW_context_state_get();
287
288         if (!stl->g_data) {
289                 /* Alloc transient pointers */
290                 stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
291         }
292         if (!stl->effects) {
293                 stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
294                 workbench_effect_info_init(stl->effects);
295         }
296
297         if (!e_data.next_object_id) {
298                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
299                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
300                 e_data.next_object_id = 1;
301 #ifdef DEBUG_SHADOW_VOLUME
302                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
303 #else
304                 const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
305 #endif
306                 e_data.shadow_pass_sh = DRW_shader_create(
307                         datatoc_workbench_shadow_vert_glsl,
308                         datatoc_workbench_shadow_geom_glsl,
309                         shadow_frag,
310                         "#define SHADOW_PASS\n"
311                         "#define DOUBLE_MANIFOLD\n");
312                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
313                         datatoc_workbench_shadow_vert_glsl,
314                         datatoc_workbench_shadow_geom_glsl,
315                         shadow_frag,
316                         "#define SHADOW_PASS\n");
317                 e_data.shadow_fail_sh = DRW_shader_create(
318                         datatoc_workbench_shadow_vert_glsl,
319                         datatoc_workbench_shadow_geom_glsl,
320                         shadow_frag,
321                         "#define SHADOW_FAIL\n"
322                         "#define DOUBLE_MANIFOLD\n");
323                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
324                         datatoc_workbench_shadow_vert_glsl,
325                         datatoc_workbench_shadow_geom_glsl,
326                         shadow_frag,
327                         "#define SHADOW_FAIL\n");
328                 e_data.shadow_caps_sh = DRW_shader_create(
329                         datatoc_workbench_shadow_vert_glsl,
330                         datatoc_workbench_shadow_caps_geom_glsl,
331                         shadow_frag,
332                         "#define SHADOW_FAIL\n"
333                         "#define DOUBLE_MANIFOLD\n");
334                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
335                         datatoc_workbench_shadow_vert_glsl,
336                         datatoc_workbench_shadow_caps_geom_glsl,
337                         shadow_frag,
338                         "#define SHADOW_FAIL\n");
339
340                 char *cavity_frag = workbench_build_cavity_frag();
341                 e_data.cavity_sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
342                 MEM_freeN(cavity_frag);
343
344                 e_data.ghost_resolve_sh = DRW_shader_create_fullscreen(datatoc_workbench_ghost_resolve_frag_glsl, NULL);
345         }
346         workbench_volume_engine_init();
347         workbench_fxaa_engine_init();
348         workbench_taa_engine_init(vedata);
349
350         WORKBENCH_PrivateData *wpd = stl->g_data;
351         workbench_private_data_init(wpd);
352
353         {
354                 const float *viewport_size = DRW_viewport_size_get();
355                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
356                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
357                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
358                 e_data.cavity_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
359                 e_data.specular_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
360                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
361                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
362
363                 if (NORMAL_ENCODING_ENABLED()) {
364                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
365                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
366                 }
367                 else {
368                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
369                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
370                 }
371
372                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
373                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
374                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
375                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
376                         GPU_ATTACHMENT_TEXTURE(e_data.specular_buffer_tx),
377                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
378                 });
379                 GPU_framebuffer_ensure_config(&fbl->cavity_fb, {
380                         GPU_ATTACHMENT_NONE,
381                         GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
382                 });
383                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
384                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
385                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
386                 });
387                 GPU_framebuffer_ensure_config(&fbl->volume_fb, {
388                         GPU_ATTACHMENT_NONE,
389                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
390                 });
391                 GPU_framebuffer_ensure_config(&fbl->effect_fb, {
392                         GPU_ATTACHMENT_NONE,
393                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
394                 });
395         }
396
397         {
398                 Scene *scene = draw_ctx->scene;
399                 /* AO Samples Tex */
400                 int num_iterations = workbench_taa_calculate_num_iterations(vedata);
401
402                 const int ssao_samples_single_iteration = scene->display.matcap_ssao_samples;
403                 const int ssao_samples = MIN2(num_iterations * ssao_samples_single_iteration, 500);
404
405                 if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
406                         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
407                         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
408                 }
409
410                 if (e_data.sampling_ubo == NULL) {
411                         float *samples = create_disk_samples(ssao_samples_single_iteration, num_iterations);
412                         e_data.jitter_tx = create_jitter_texture(ssao_samples);
413                         e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
414                         e_data.cached_sample_num = ssao_samples;
415                         MEM_freeN(samples);
416                 }
417         }
418
419         /* Prepass */
420         {
421                 DRWShadingGroup *grp;
422                 const bool do_cull = (draw_ctx->v3d && (draw_ctx->v3d->flag2 & V3D_BACKFACE_CULLING));
423
424                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
425                 psl->prepass_pass = DRW_pass_create("Prepass", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
426                 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
427
428                 psl->ghost_prepass_pass = DRW_pass_create("Prepass Ghost", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
429                 psl->ghost_prepass_hair_pass = DRW_pass_create("Prepass Ghost", state);
430
431                 psl->ghost_resolve_pass = DRW_pass_create("Resolve Ghost Depth", DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
432                 grp = DRW_shgroup_create(e_data.ghost_resolve_sh, psl->ghost_resolve_pass);
433                 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &e_data.ghost_depth_tx);
434                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
435         }
436
437         {
438                 workbench_aa_create_pass(vedata, &e_data.color_buffer_tx);
439         }
440
441         {
442                 int state = DRW_STATE_WRITE_COLOR;
443                 psl->cavity_pass = DRW_pass_create("Cavity", state);
444                 DRWShadingGroup *grp = DRW_shgroup_create(e_data.cavity_sh, psl->cavity_pass);
445                 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
446                 DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
447                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
448
449                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
450                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
451                 DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
452                 DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
453                 DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
454                 DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
455                 DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
456                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
457         }
458 }
459
460 static void workbench_setup_ghost_framebuffer(WORKBENCH_FramebufferList *fbl)
461 {
462         const float *viewport_size = DRW_viewport_size_get();
463         const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
464
465         e_data.ghost_depth_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_DEPTH_COMPONENT24, &draw_engine_workbench_solid);
466         GPU_framebuffer_ensure_config(&fbl->ghost_prepass_fb, {
467                 GPU_ATTACHMENT_TEXTURE(e_data.ghost_depth_tx),
468                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
469                 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
470                 GPU_ATTACHMENT_TEXTURE(e_data.specular_buffer_tx),
471                 GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
472         });
473 }
474
475 void workbench_deferred_engine_free(void)
476 {
477         for (int index = 0; index < MAX_SHADERS; index++) {
478                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
479                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
480         }
481         DRW_SHADER_FREE_SAFE(e_data.cavity_sh);
482         DRW_SHADER_FREE_SAFE(e_data.ghost_resolve_sh);
483         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
484         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
485
486         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
487         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
488         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
489         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
490         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
491         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
492
493         workbench_volume_engine_free();
494         workbench_fxaa_engine_free();
495         workbench_taa_engine_free();
496 }
497
498 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
499 {
500         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
501         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
502         if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
503                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
504         }
505         if (SSAO_ENABLED(wpd)) {
506                 DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
507         }
508         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || MATCAP_ENABLED(wpd)) {
509                 DRW_shgroup_uniform_texture_ref(grp, "specularBuffer", &e_data.specular_buffer_tx);
510                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
511         }
512         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
513         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
514
515         if (STUDIOLIGHT_ORIENTATION_VIEWNORMAL_ENABLED(wpd)) {
516                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
517                 DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture );
518         }
519 }
520
521 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
522 {
523         WORKBENCH_StorageList *stl = vedata->stl;
524         WORKBENCH_PassList *psl = vedata->psl;
525         WORKBENCH_PrivateData *wpd = stl->g_data;
526         DRWShadingGroup *grp;
527         const DRWContextState *draw_ctx = DRW_context_state_get();
528
529         Scene *scene = draw_ctx->scene;
530
531         workbench_volume_cache_init(vedata);
532
533         select_deferred_shaders(wpd);
534
535         /* Deferred Mix Pass */
536         {
537                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
538                 studiolight_update_light(wpd, e_data.display.light_direction);
539
540                 e_data.display.shadow_shift = scene->display.shadow_shift;
541
542                 if (SHADOW_ENABLED(wpd)) {
543                         psl->composite_pass = DRW_pass_create(
544                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
545                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
546                         workbench_composite_uniforms(wpd, grp);
547                         DRW_shgroup_stencil_mask(grp, 0x00);
548                         DRW_shgroup_uniform_float_copy(grp, "lightMultiplier", 1.0f);
549                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
550                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
551                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
552
553                         /* Stencil Shadow passes. */
554 #ifdef DEBUG_SHADOW_VOLUME
555                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
556                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
557 #else
558                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
559                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
560 #endif
561                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
562                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
563                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
564                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
565                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
566                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
567
568 #ifndef DEBUG_SHADOW_VOLUME
569                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
570                         DRW_shgroup_stencil_mask(grp, 0xFF);
571                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
572                         DRW_shgroup_stencil_mask(grp, 0xFF);
573                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
574                         DRW_shgroup_stencil_mask(grp, 0xFF);
575                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
576                         DRW_shgroup_stencil_mask(grp, 0xFF);
577                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
578                         DRW_shgroup_stencil_mask(grp, 0xFF);
579                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
580                         DRW_shgroup_stencil_mask(grp, 0xFF);
581
582                         psl->composite_shadow_pass = DRW_pass_create("Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
583                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
584                         DRW_shgroup_stencil_mask(grp, 0x00);
585                         workbench_composite_uniforms(wpd, grp);
586                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
587                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
588                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
589                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
590 #endif
591
592                 }
593                 else {
594                         psl->composite_pass = DRW_pass_create(
595                                 "Composite", DRW_STATE_WRITE_COLOR);
596                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
597                         workbench_composite_uniforms(wpd, grp);
598                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
599                 }
600         }
601 }
602
603 static WORKBENCH_MaterialData *get_or_create_material_data(
604         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int color_type)
605 {
606         WORKBENCH_StorageList *stl = vedata->stl;
607         WORKBENCH_PassList *psl = vedata->psl;
608         WORKBENCH_PrivateData *wpd = stl->g_data;
609         WORKBENCH_MaterialData *material;
610         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
611                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
612         WORKBENCH_MaterialData material_template;
613         const bool is_ghost = (ob->dtx & OB_DRAWXRAY);
614
615         /* Solid */
616         workbench_material_update_data(wpd, ob, mat, &material_template);
617         material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
618         material_template.color_type = color_type;
619         material_template.ima = ima;
620         uint hash = workbench_material_get_hash(&material_template, is_ghost);
621
622         material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
623         if (material == NULL) {
624                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
625                 material->shgrp = DRW_shgroup_create(
626                         (color_type == V3D_SHADING_TEXTURE_COLOR) ? wpd->prepass_texture_sh: wpd->prepass_solid_sh,
627                         (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
628                 workbench_material_copy(material, &material_template);
629                 DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
630                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
631                 workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob);
632
633                 BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
634         }
635         return material;
636 }
637
638 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
639 {
640         WORKBENCH_StorageList *stl = vedata->stl;
641         WORKBENCH_PassList *psl = vedata->psl;
642         WORKBENCH_PrivateData *wpd = stl->g_data;
643
644         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
645                 if (md->type != eModifierType_ParticleSystem) {
646                         continue;
647                 }
648                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
649                 if (!psys_check_enabled(ob, psys, false)) {
650                         continue;
651                 }
652                 if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
653                         continue;
654                 }
655                 ParticleSettings *part = psys->part;
656                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
657
658                 if (draw_as == PART_DRAW_PATH) {
659                         Image *image = NULL;
660                         Material *mat = give_current_material(ob, part->omat);
661                         ED_object_get_active_image(ob, part->omat, &image, NULL, NULL, NULL);
662                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
663                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, color_type);
664
665                         struct GPUShader *shader = (color_type != V3D_SHADING_TEXTURE_COLOR) ?
666                                 wpd->prepass_solid_hair_sh :
667                                 wpd->prepass_texture_hair_sh;
668                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
669                                 ob, psys, md,
670                                 (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
671                                 shader);
672                         DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
673                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
674                         workbench_material_shgroup_uniform(wpd, shgrp, material, ob);
675                 }
676         }
677 }
678
679 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
680 {
681         WORKBENCH_StorageList *stl = vedata->stl;
682         WORKBENCH_PassList *psl = vedata->psl;
683         WORKBENCH_PrivateData *wpd = stl->g_data;
684         const DRWContextState *draw_ctx = DRW_context_state_get();
685         Scene *scene = draw_ctx->scene;
686
687         if (!DRW_object_is_renderable(ob))
688                 return;
689
690         if (ob->type == OB_MESH) {
691                 workbench_cache_populate_particles(vedata, ob);
692         }
693
694         ModifierData *md;
695         if (((ob->base_flag & BASE_FROMDUPLI) == 0) &&
696             (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
697             (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
698             (((SmokeModifierData *)md)->domain != NULL))
699         {
700                 workbench_volume_cache_populate(vedata, scene, ob, md);
701                 return; /* Do not draw solid in this case. */
702         }
703
704         if (!DRW_object_is_visible_in_active_context(ob) || (ob->dt < OB_SOLID)) {
705                 return;
706         }
707
708         WORKBENCH_MaterialData *material;
709         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
710                 const bool is_active = (ob == draw_ctx->obact);
711                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
712                 const bool use_hide = is_active && DRW_object_use_hide_faces(ob);
713                 bool is_drawn = false;
714                 if (!is_sculpt_mode && TEXTURE_DRAWING_ENABLED(wpd) && ELEM(ob->type, OB_MESH)) {
715                         const Mesh *me = ob->data;
716                         if (me->mloopuv) {
717                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
718                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
719                                 struct GPUBatch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob, use_hide) : NULL;
720                                 if (materials_len > 0 && geom_array) {
721                                         for (int i = 0; i < materials_len; i++) {
722                                                 if (geom_array[i] == NULL) {
723                                                         continue;
724                                                 }
725
726                                                 Material *mat = give_current_material(ob, i + 1);
727                                                 Image *image;
728                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
729                                                 int color_type = workbench_material_determine_color_type(wpd, image, ob);
730                                                 material = get_or_create_material_data(vedata, ob, mat, image, color_type);
731                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
732                                         }
733                                         is_drawn = true;
734                                 }
735                         }
736                 }
737
738                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
739                 if (!is_drawn) {
740                         if (ELEM(wpd->shading.color_type, V3D_SHADING_SINGLE_COLOR, V3D_SHADING_RANDOM_COLOR)) {
741                                 /* No material split needed */
742                                 struct GPUBatch *geom = DRW_cache_object_surface_get_ex(ob, use_hide);
743                                 if (geom) {
744                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type);
745                                         if (is_sculpt_mode) {
746                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
747                                         }
748                                         else {
749                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
750                                         }
751                                 }
752                         }
753                         else { /* MATERIAL colors */
754                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
755                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
756                                 for (int i = 0; i < materials_len; i++) {
757                                         gpumat_array[i] = NULL;
758                                 }
759
760                                 struct GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
761                                         ob, gpumat_array, materials_len, use_hide, NULL, NULL, NULL);
762                                 if (mat_geom) {
763                                         for (int i = 0; i < materials_len; ++i) {
764                                                 if (mat_geom[i] == NULL) {
765                                                         continue;
766                                                 }
767
768                                                 Material *mat = give_current_material(ob, i + 1);
769                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR);
770                                                 if (is_sculpt_mode) {
771                                                         DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
772                                                 }
773                                                 else {
774                                                         DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
775                                                 }
776                                         }
777                                 }
778                         }
779                 }
780
781                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW)) {
782                         bool is_manifold;
783                         struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
784                         if (geom_shadow) {
785                                 if (is_sculpt_mode || use_hide) {
786                                         /* Currently unsupported in sculpt mode. We could revert to the slow
787                                          * method in this case but I'm not sure if it's a good idea given that
788                                          * sculpted meshes are heavy to begin with. */
789                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
790                                 }
791                                 else {
792                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
793                                                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
794
795                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
796
797                                                 invert_m4_m4(ob->imat, ob->obmat);
798                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
799
800                                                 DRWShadingGroup *grp;
801                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
802
803                                                 if (use_shadow_pass_technique) {
804                                                         if (is_manifold) {
805                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
806                                                         }
807                                                         else {
808                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
809                                                         }
810                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
811                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
812                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
813 #ifdef DEBUG_SHADOW_VOLUME
814                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
815 #endif
816                                                 }
817                                                 else {
818                                                         float extrude_distance = studiolight_object_shadow_distance(wpd, ob, engine_object_data);
819
820                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
821                                                         const bool need_caps = true;
822                                                         if (need_caps) {
823                                                                 if (is_manifold) {
824                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
825                                                                 }
826                                                                 else {
827                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
828                                                                 }
829                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
830                                                                 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
831                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
832                                                         }
833
834                                                         if (is_manifold) {
835                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
836                                                         }
837                                                         else {
838                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
839                                                         }
840                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
841                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
842                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
843 #ifdef DEBUG_SHADOW_VOLUME
844                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
845 #endif
846                                                 }
847                                         }
848                                 }
849                         }
850                 }
851         }
852 }
853
854 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
855 {
856 }
857
858 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
859 {
860         WORKBENCH_StorageList *stl = vedata->stl;
861         WORKBENCH_FramebufferList *fbl = vedata->fbl;
862         WORKBENCH_PrivateData *wpd = stl->g_data;
863         const float clear_depth = 1.0f;
864         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
865         uint clear_stencil = 0x00;
866
867         DRW_stats_group_start("Clear Background");
868         GPU_framebuffer_bind(fbl->prepass_fb);
869         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
870         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
871         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
872         DRW_stats_group_end();
873 }
874
875 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
876 {
877         WORKBENCH_PassList *psl = vedata->psl;
878         WORKBENCH_StorageList *stl = vedata->stl;
879         WORKBENCH_FramebufferList *fbl = vedata->fbl;
880         WORKBENCH_PrivateData *wpd = stl->g_data;
881         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
882
883         if (TAA_ENABLED(wpd)) {
884                 workbench_taa_draw_scene_start(vedata);
885         }
886
887         /* clear in background */
888         GPU_framebuffer_bind(fbl->prepass_fb);
889         DRW_draw_pass(psl->prepass_pass);
890         DRW_draw_pass(psl->prepass_hair_pass);
891
892         if (GHOST_ENABLED(psl)) {
893                 /* meh, late init to not request a depth buffer we won't use. */
894                 workbench_setup_ghost_framebuffer(fbl);
895
896                 GPU_framebuffer_bind(fbl->ghost_prepass_fb);
897                 GPU_framebuffer_clear_depth(fbl->ghost_prepass_fb, 1.0f);
898                 DRW_draw_pass(psl->ghost_prepass_pass);
899                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
900
901                 GPU_framebuffer_bind(dfbl->depth_only_fb);
902                 DRW_draw_pass(psl->ghost_resolve_pass);
903         }
904
905         if (SSAO_ENABLED(wpd)) {
906                 GPU_framebuffer_bind(fbl->cavity_fb);
907                 DRW_draw_pass(psl->cavity_pass);
908         }
909
910         if (SHADOW_ENABLED(wpd)) {
911 #ifdef DEBUG_SHADOW_VOLUME
912                 GPU_framebuffer_bind(fbl->composite_fb);
913                 DRW_draw_pass(psl->composite_pass);
914 #else
915                 GPU_framebuffer_bind(dfbl->depth_only_fb);
916 #endif
917                 DRW_draw_pass(psl->shadow_depth_pass_pass);
918                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
919                 DRW_draw_pass(psl->shadow_depth_fail_pass);
920                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
921                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
922                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
923
924                 if (GHOST_ENABLED(psl)) {
925                         /* We need to set the stencil buffer to 0 where Ghost objects
926                          * else they will get shadow and even badly shadowed. */
927                         DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
928                         DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
929
930                         DRW_draw_pass(psl->ghost_prepass_pass);
931                         DRW_draw_pass(psl->ghost_prepass_hair_pass);
932                 }
933 #ifndef DEBUG_SHADOW_VOLUME
934                 GPU_framebuffer_bind(fbl->composite_fb);
935                 DRW_draw_pass(psl->composite_pass);
936                 DRW_draw_pass(psl->composite_shadow_pass);
937 #endif
938         }
939         else {
940                 GPU_framebuffer_bind(fbl->composite_fb);
941                 DRW_draw_pass(psl->composite_pass);
942         }
943
944         /* TODO(fclem): only enable when needed (when there is overlays). */
945         if (GHOST_ENABLED(psl)) {
946                 /* In order to not draw on top of ghost objects, we clear the stencil
947                  * to 0xFF and the ghost object to 0x00 and only draw overlays on top if
948                  * stencil is not 0. */
949                 GPU_framebuffer_bind(dfbl->depth_only_fb);
950                 GPU_framebuffer_clear_stencil(dfbl->depth_only_fb, 0xFF);
951
952                 DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
953                 DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
954
955                 DRW_draw_pass(psl->ghost_prepass_pass);
956                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
957         }
958
959         if (wpd->volumes_do) {
960                 GPU_framebuffer_bind(fbl->volume_fb);
961                 DRW_draw_pass(psl->volume_pass);
962         }
963
964         workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
965 }
966
967 void workbench_deferred_draw_finish(WORKBENCH_Data *vedata)
968 {
969         WORKBENCH_StorageList *stl = vedata->stl;
970         WORKBENCH_PrivateData *wpd = stl->g_data;
971
972         workbench_private_data_free(wpd);
973         workbench_volume_smoke_textures_free(wpd);
974 }