e6771ddc5621af51e6153af2162570834e9170db
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Copyright 2016, Blender Foundation.
19  * Contributor(s): Blender Institute
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  *
23  */
24
25 /** \file workbench_deferred.c
26  *  \ingroup draw_engine
27  */
28
29 #include "workbench_private.h"
30
31
32 #include "BLI_alloca.h"
33 #include "BLI_dynstr.h"
34 #include "BLI_utildefines.h"
35 #include "BLI_rand.h"
36 #include "BLI_string_utils.h"
37
38 #include "BKE_modifier.h"
39 #include "BKE_object.h"
40 #include "BKE_particle.h"
41
42 #include "DNA_image_types.h"
43 #include "DNA_mesh_types.h"
44 #include "DNA_modifier_types.h"
45 #include "DNA_node_types.h"
46
47
48 #include "GPU_shader.h"
49 #include "GPU_texture.h"
50 #include "GPU_extensions.h"
51
52 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
53
54 /* *********** STATIC *********** */
55
56 /* #define DEBUG_SHADOW_VOLUME */
57
58 #ifdef DEBUG_SHADOW_VOLUME
59 #  include "draw_debug.h"
60 #endif
61
62 static struct {
63         struct GPUShader *prepass_sh_cache[MAX_PREPASS_SHADERS];
64         struct GPUShader *composite_sh_cache[MAX_COMPOSITE_SHADERS];
65         struct GPUShader *cavity_sh[MAX_CAVITY_SHADERS];
66         struct GPUShader *background_sh[2];
67         struct GPUShader *ghost_resolve_sh;
68         struct GPUShader *shadow_fail_sh;
69         struct GPUShader *shadow_fail_manifold_sh;
70         struct GPUShader *shadow_pass_sh;
71         struct GPUShader *shadow_pass_manifold_sh;
72         struct GPUShader *shadow_caps_sh;
73         struct GPUShader *shadow_caps_manifold_sh;
74
75         struct GPUTexture *ghost_depth_tx; /* ref only, not alloced */
76         struct GPUTexture *object_id_tx; /* ref only, not alloced */
77         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
78         struct GPUTexture *cavity_buffer_tx; /* ref only, not alloced */
79         struct GPUTexture *metallic_buffer_tx; /* ref only, not alloced */
80         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
81         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
82
83         SceneDisplay display; /* world light direction for shadows */
84         int next_object_id;
85
86         struct GPUUniformBuffer *sampling_ubo;
87         struct GPUTexture *jitter_tx;
88         int cached_sample_num;
89 } e_data = {{NULL}};
90
91 /* Shaders */
92 extern char datatoc_common_hair_lib_glsl[];
93 extern char datatoc_common_world_clip_lib_glsl[];
94
95 extern char datatoc_workbench_prepass_vert_glsl[];
96 extern char datatoc_workbench_prepass_frag_glsl[];
97 extern char datatoc_workbench_cavity_frag_glsl[];
98 extern char datatoc_workbench_deferred_composite_frag_glsl[];
99 extern char datatoc_workbench_deferred_background_frag_glsl[];
100 extern char datatoc_workbench_ghost_resolve_frag_glsl[];
101
102 extern char datatoc_workbench_shadow_vert_glsl[];
103 extern char datatoc_workbench_shadow_geom_glsl[];
104 extern char datatoc_workbench_shadow_caps_geom_glsl[];
105 extern char datatoc_workbench_shadow_debug_frag_glsl[];
106
107 extern char datatoc_workbench_background_lib_glsl[];
108 extern char datatoc_workbench_cavity_lib_glsl[];
109 extern char datatoc_workbench_common_lib_glsl[];
110 extern char datatoc_workbench_data_lib_glsl[];
111 extern char datatoc_workbench_object_outline_lib_glsl[];
112 extern char datatoc_workbench_curvature_lib_glsl[];
113 extern char datatoc_workbench_world_light_lib_glsl[];
114
115 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
116
117 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
118 {
119         char *str = NULL;
120
121         DynStr *ds = BLI_dynstr_new();
122
123         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
124         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
125         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
126
127         if (!FLAT_ENABLED(wpd)) {
128                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
129         }
130         if (OBJECT_OUTLINE_ENABLED(wpd)) {
131                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
132         }
133         if (CURVATURE_ENABLED(wpd)) {
134                 BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
135         }
136
137         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
138
139         str = BLI_dynstr_get_cstring(ds);
140         BLI_dynstr_free(ds);
141         return str;
142 }
143
144 static char *workbench_build_prepass_frag(void)
145 {
146         char *str = NULL;
147
148         DynStr *ds = BLI_dynstr_new();
149
150         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
151         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
152         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
153
154         str = BLI_dynstr_get_cstring(ds);
155         BLI_dynstr_free(ds);
156         return str;
157 }
158
159 static char *workbench_build_prepass_vert(bool is_hair)
160 {
161         char *str = NULL;
162         if (!is_hair) {
163                 return BLI_string_joinN(
164                         datatoc_common_world_clip_lib_glsl,
165                         datatoc_workbench_prepass_vert_glsl);
166         }
167
168         DynStr *ds = BLI_dynstr_new();
169
170         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
171         BLI_dynstr_append(ds, datatoc_common_world_clip_lib_glsl);
172         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
173
174         str = BLI_dynstr_get_cstring(ds);
175         BLI_dynstr_free(ds);
176         return str;
177 }
178
179 static char *workbench_build_cavity_frag(bool cavity, bool curvature, bool high_dpi)
180 {
181         char *str = NULL;
182
183         DynStr *ds = BLI_dynstr_new();
184
185         if (cavity) {
186                 BLI_dynstr_append(ds, "#define USE_CAVITY\n");
187         }
188         if (curvature) {
189                 BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
190         }
191         if (high_dpi) {
192                 BLI_dynstr_append(ds, "#define CURVATURE_OFFSET 2\n");
193         }
194         if (NORMAL_ENCODING_ENABLED()) {
195                 BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
196         }
197         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
198         BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
199         BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
200         BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
201
202         str = BLI_dynstr_get_cstring(ds);
203         BLI_dynstr_free(ds);
204         return str;
205 }
206
207 static GPUShader *workbench_cavity_shader_get(bool cavity, bool curvature)
208 {
209         const bool high_dpi = (U.pixelsize > 1.5f);
210         int index = 0;
211         SET_FLAG_FROM_TEST(index, cavity, 1 << 0);
212         SET_FLAG_FROM_TEST(index, curvature, 1 << 1);
213         SET_FLAG_FROM_TEST(index, high_dpi, 1 << 2);
214
215         GPUShader **sh = &e_data.cavity_sh[index];
216         if (*sh == NULL) {
217                 char *cavity_frag = workbench_build_cavity_frag(cavity, curvature, high_dpi);
218                 *sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
219                 MEM_freeN(cavity_frag);
220         }
221         return *sh;
222 }
223
224 static GPUShader *ensure_deferred_prepass_shader(WORKBENCH_PrivateData *wpd, bool use_textures, bool is_hair)
225 {
226         int index = workbench_material_get_prepass_shader_index(wpd, use_textures, is_hair);
227         if (e_data.prepass_sh_cache[index] == NULL) {
228                 char *defines = workbench_material_build_defines(wpd, use_textures, is_hair);
229                 char *prepass_vert = workbench_build_prepass_vert(is_hair);
230                 char *prepass_frag = workbench_build_prepass_frag();
231                 e_data.prepass_sh_cache[index] = DRW_shader_create(
232                         prepass_vert, NULL,
233                         prepass_frag, defines);
234                 MEM_freeN(prepass_vert);
235                 MEM_freeN(prepass_frag);
236                 MEM_freeN(defines);
237         }
238         return e_data.prepass_sh_cache[index];
239 }
240
241 static GPUShader *ensure_deferred_composite_shader(WORKBENCH_PrivateData *wpd)
242 {
243         int index = workbench_material_get_composite_shader_index(wpd);
244         if (e_data.composite_sh_cache[index] == NULL) {
245                 char *defines = workbench_material_build_defines(wpd, false, false);
246                 char *composite_frag = workbench_build_composite_frag(wpd);
247                 e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
248                 MEM_freeN(composite_frag);
249                 MEM_freeN(defines);
250         }
251         return e_data.composite_sh_cache[index];
252 }
253
254 static GPUShader *ensure_background_shader(WORKBENCH_PrivateData *wpd)
255 {
256         const int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
257         if (e_data.background_sh[index] == NULL) {
258                 const char *defines = (index) ? "#define V3D_SHADING_OBJECT_OUTLINE\n" : NULL;
259                 char *frag = BLI_string_joinN(
260                         datatoc_workbench_data_lib_glsl,
261                         datatoc_workbench_common_lib_glsl,
262                         datatoc_workbench_background_lib_glsl,
263                         datatoc_workbench_object_outline_lib_glsl,
264                         datatoc_workbench_deferred_background_frag_glsl);
265                 e_data.background_sh[index] = DRW_shader_create_fullscreen(frag, defines);
266                 MEM_freeN(frag);
267         }
268         return e_data.background_sh[index];
269 }
270
271 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
272 {
273         wpd->prepass_solid_sh = ensure_deferred_prepass_shader(wpd, false, false);
274         wpd->prepass_solid_hair_sh = ensure_deferred_prepass_shader(wpd, false, true);
275         wpd->prepass_texture_sh = ensure_deferred_prepass_shader(wpd, true, false);
276         wpd->prepass_texture_hair_sh = ensure_deferred_prepass_shader(wpd, true, true);
277         wpd->composite_sh = ensure_deferred_composite_shader(wpd);
278         wpd->background_sh = ensure_background_shader(wpd);
279 }
280
281 /* Using Hammersley distribution */
282 static float *create_disk_samples(int num_samples, int num_iterations)
283 {
284         /* vec4 to ensure memory alignment. */
285         const int total_samples = num_samples * num_iterations;
286         float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * total_samples, __func__);
287         const float num_samples_inv = 1.0f / num_samples;
288
289         for (int i = 0; i < total_samples; i++) {
290                 float it_add = (i / num_samples) * 0.499f;
291                 float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
292                 double dphi;
293                 BLI_hammersley_1D(i, &dphi);
294
295                 float phi = (float)dphi * 2.0f * M_PI + it_add;
296                 texels[i][0] = cosf(phi);
297                 texels[i][1] = sinf(phi);
298                 /* This deliberately distribute more samples
299                  * at the center of the disk (and thus the shadow). */
300                 texels[i][2] = r;
301         }
302
303         return (float *)texels;
304 }
305
306 static struct GPUTexture *create_jitter_texture(int num_samples)
307 {
308         float jitter[64 * 64][3];
309         const float num_samples_inv = 1.0f / num_samples;
310
311         for (int i = 0; i < 64 * 64; i++) {
312                 float phi = blue_noise[i][0] * 2.0f * M_PI;
313                 /* This rotate the sample per pixels */
314                 jitter[i][0] = cosf(phi);
315                 jitter[i][1] = sinf(phi);
316                 /* This offset the sample along it's direction axis (reduce banding) */
317                 float bn = blue_noise[i][1] - 0.5f;
318                 CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
319                 jitter[i][2] = bn * num_samples_inv;
320         }
321
322         UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
323
324         return DRW_texture_create_2D(64, 64, GPU_RGB16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
325 }
326 /* Functions */
327
328
329 static void workbench_init_object_data(DrawData *dd)
330 {
331         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
332         data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
333         data->shadow_bbox_dirty = true;
334 }
335
336 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
337 {
338         WORKBENCH_FramebufferList *fbl = vedata->fbl;
339         WORKBENCH_StorageList *stl = vedata->stl;
340         WORKBENCH_PassList *psl = vedata->psl;
341         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
342         const DRWContextState *draw_ctx = DRW_context_state_get();
343         RegionView3D *rv3d = draw_ctx->rv3d;
344         View3D *v3d = draw_ctx->v3d;
345         Object *camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
346
347         if (!stl->g_data) {
348                 /* Alloc transient pointers */
349                 stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
350         }
351         if (!stl->effects) {
352                 stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
353                 workbench_effect_info_init(stl->effects);
354         }
355
356         if (!e_data.next_object_id) {
357                 memset(e_data.prepass_sh_cache,   0, sizeof(e_data.prepass_sh_cache));
358                 memset(e_data.composite_sh_cache, 0, sizeof(e_data.composite_sh_cache));
359                 e_data.next_object_id = 1;
360 #ifdef DEBUG_SHADOW_VOLUME
361                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
362 #else
363                 const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
364 #endif
365                 /* TODO only compile on demand */
366                 e_data.shadow_pass_sh = DRW_shader_create(
367                         datatoc_workbench_shadow_vert_glsl,
368                         datatoc_workbench_shadow_geom_glsl,
369                         shadow_frag,
370                         "#define SHADOW_PASS\n"
371                         "#define DOUBLE_MANIFOLD\n");
372                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
373                         datatoc_workbench_shadow_vert_glsl,
374                         datatoc_workbench_shadow_geom_glsl,
375                         shadow_frag,
376                         "#define SHADOW_PASS\n");
377                 e_data.shadow_fail_sh = DRW_shader_create(
378                         datatoc_workbench_shadow_vert_glsl,
379                         datatoc_workbench_shadow_geom_glsl,
380                         shadow_frag,
381                         "#define SHADOW_FAIL\n"
382                         "#define DOUBLE_MANIFOLD\n");
383                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
384                         datatoc_workbench_shadow_vert_glsl,
385                         datatoc_workbench_shadow_geom_glsl,
386                         shadow_frag,
387                         "#define SHADOW_FAIL\n");
388                 e_data.shadow_caps_sh = DRW_shader_create(
389                         datatoc_workbench_shadow_vert_glsl,
390                         datatoc_workbench_shadow_caps_geom_glsl,
391                         shadow_frag,
392                         "#define SHADOW_FAIL\n"
393                         "#define DOUBLE_MANIFOLD\n");
394                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
395                         datatoc_workbench_shadow_vert_glsl,
396                         datatoc_workbench_shadow_caps_geom_glsl,
397                         shadow_frag,
398                         "#define SHADOW_FAIL\n");
399
400                 e_data.ghost_resolve_sh = DRW_shader_create_fullscreen(datatoc_workbench_ghost_resolve_frag_glsl, NULL);
401         }
402         workbench_volume_engine_init();
403         workbench_fxaa_engine_init();
404         workbench_taa_engine_init(vedata);
405
406         WORKBENCH_PrivateData *wpd = stl->g_data;
407         workbench_private_data_init(wpd);
408
409         workbench_dof_engine_init(vedata, camera);
410
411         {
412                 const float *viewport_size = DRW_viewport_size_get();
413                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
414                 const eGPUTextureFormat nor_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16 : GPU_RGBA32F;
415                 const eGPUTextureFormat comp_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F : GPU_R11F_G11F_B10F;
416                 const eGPUTextureFormat id_tex_format = OBJECT_ID_PASS_ENABLED(wpd) ? GPU_R32UI : GPU_R8;
417
418                 e_data.object_id_tx = NULL;
419                 e_data.color_buffer_tx = NULL;
420                 e_data.composite_buffer_tx = NULL;
421                 e_data.normal_buffer_tx = NULL;
422                 e_data.cavity_buffer_tx = NULL;
423
424                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], comp_tex_format, &draw_engine_workbench_solid);
425
426                 if (MATDATA_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
427                         e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
428                 }
429                 if (OBJECT_ID_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
430                         e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], id_tex_format, &draw_engine_workbench_solid);
431                 }
432                 if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
433                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], nor_tex_format, &draw_engine_workbench_solid);
434                 }
435                 if (CAVITY_ENABLED(wpd)) {
436                         e_data.cavity_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R16, &draw_engine_workbench_solid);
437                 }
438
439                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
440                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
441                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
442                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
443                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
444                 });
445                 GPU_framebuffer_ensure_config(&fbl->cavity_fb, {
446                         GPU_ATTACHMENT_NONE,
447                         GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
448                 });
449                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
450                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
451                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
452                 });
453                 GPU_framebuffer_ensure_config(&fbl->color_only_fb, {
454                         GPU_ATTACHMENT_NONE,
455                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
456                 });
457
458                 if (!MATDATA_PASS_ENABLED(wpd) && !GPU_unused_fb_slot_workaround()) {
459                         e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
460                 }
461
462                 GPU_framebuffer_ensure_config(&fbl->effect_fb, {
463                         GPU_ATTACHMENT_NONE,
464                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
465                 });
466
467                 if (OBJECT_ID_PASS_ENABLED(wpd)) {
468                         GPU_framebuffer_ensure_config(&fbl->id_clear_fb, {
469                                 GPU_ATTACHMENT_NONE,
470                                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
471                         });
472                 }
473         }
474
475         {
476                 Scene *scene = draw_ctx->scene;
477                 /* AO Samples Tex */
478                 int num_iterations = workbench_taa_calculate_num_iterations(vedata);
479
480                 const int ssao_samples_single_iteration = scene->display.matcap_ssao_samples;
481                 const int ssao_samples = MIN2(num_iterations * ssao_samples_single_iteration, 500);
482
483                 if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
484                         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
485                         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
486                 }
487
488                 if (e_data.sampling_ubo == NULL) {
489                         float *samples = create_disk_samples(ssao_samples_single_iteration, num_iterations);
490                         e_data.jitter_tx = create_jitter_texture(ssao_samples);
491                         e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
492                         e_data.cached_sample_num = ssao_samples;
493                         MEM_freeN(samples);
494                 }
495         }
496
497         /* Prepass */
498         {
499                 DRWShadingGroup *grp;
500                 const bool do_cull = (draw_ctx->v3d && (draw_ctx->v3d->shading.flag & V3D_SHADING_BACKFACE_CULLING));
501
502                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
503                 psl->prepass_pass = DRW_pass_create("Prepass", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
504                 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
505
506                 psl->ghost_prepass_pass = DRW_pass_create("Prepass Ghost", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
507                 psl->ghost_prepass_hair_pass = DRW_pass_create("Prepass Ghost", state);
508
509                 psl->ghost_resolve_pass = DRW_pass_create("Resolve Ghost Depth", DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
510                 grp = DRW_shgroup_create(e_data.ghost_resolve_sh, psl->ghost_resolve_pass);
511                 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &e_data.ghost_depth_tx);
512                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
513         }
514
515         {
516                 workbench_aa_create_pass(vedata, &e_data.color_buffer_tx);
517         }
518
519         {
520                 workbench_dof_create_pass(vedata, &e_data.composite_buffer_tx);
521         }
522
523         if (CAVITY_ENABLED(wpd)) {
524                 int state = DRW_STATE_WRITE_COLOR;
525                 GPUShader *shader = workbench_cavity_shader_get(SSAO_ENABLED(wpd), CURVATURE_ENABLED(wpd));
526                 psl->cavity_pass = DRW_pass_create("Cavity", state);
527                 DRWShadingGroup *grp = DRW_shgroup_create(shader, psl->cavity_pass);
528                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
529                 DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
530
531                 if (SSAO_ENABLED(wpd)) {
532                         DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
533                         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
534                         DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
535                         DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
536                         DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
537                         DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
538                         DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
539                 }
540
541                 if (CURVATURE_ENABLED(wpd)) {
542                         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
543                         DRW_shgroup_uniform_vec2(grp, "curvature_settings", &wpd->world_data.curvature_ridge, 1);
544                 }
545
546                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
547         }
548 }
549
550 static void workbench_setup_ghost_framebuffer(WORKBENCH_FramebufferList *fbl)
551 {
552         const float *viewport_size = DRW_viewport_size_get();
553         const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
554
555         e_data.ghost_depth_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_DEPTH_COMPONENT24, &draw_engine_workbench_solid);
556
557         GPU_framebuffer_ensure_config(&fbl->ghost_prepass_fb, {
558                 GPU_ATTACHMENT_TEXTURE(e_data.ghost_depth_tx),
559                 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
560                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
561                 GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
562         });
563 }
564
565 void workbench_deferred_engine_free(void)
566 {
567         for (int index = 0; index < MAX_PREPASS_SHADERS; index++) {
568                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
569         }
570         for (int index = 0; index < MAX_COMPOSITE_SHADERS; index++) {
571                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
572         }
573         for (int index = 0; index < MAX_CAVITY_SHADERS; ++index) {
574                 DRW_SHADER_FREE_SAFE(e_data.cavity_sh[index]);
575         }
576         DRW_SHADER_FREE_SAFE(e_data.ghost_resolve_sh);
577         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
578         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
579         DRW_SHADER_FREE_SAFE(e_data.background_sh[0]);
580         DRW_SHADER_FREE_SAFE(e_data.background_sh[1]);
581
582         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
583         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
584         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
585         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
586         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
587         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
588
589         workbench_volume_engine_free();
590         workbench_fxaa_engine_free();
591         workbench_taa_engine_free();
592         workbench_dof_engine_free();
593 }
594
595 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
596 {
597         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
598         if (MATDATA_PASS_ENABLED(wpd)) {
599                 DRW_shgroup_uniform_texture_ref(grp, "materialBuffer", &e_data.color_buffer_tx);
600         }
601         else {
602                 DRW_shgroup_uniform_vec3(grp, "materialSingleColor", wpd->shading.single_color, 1);
603         }
604         if (OBJECT_OUTLINE_ENABLED(wpd)) {
605                 DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
606         }
607         if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
608                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
609         }
610         if (CAVITY_ENABLED(wpd)) {
611                 DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
612         }
613         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
614                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
615         }
616         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
617                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
618         }
619         if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
620                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
621                 DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture);
622         }
623 }
624
625 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
626 {
627         WORKBENCH_StorageList *stl = vedata->stl;
628         WORKBENCH_PassList *psl = vedata->psl;
629         WORKBENCH_PrivateData *wpd = stl->g_data;
630         DRWShadingGroup *grp;
631         const DRWContextState *draw_ctx = DRW_context_state_get();
632
633         Scene *scene = draw_ctx->scene;
634
635         workbench_volume_cache_init(vedata);
636
637         select_deferred_shaders(wpd);
638
639         /* Background Pass */
640         {
641                 psl->background_pass = DRW_pass_create(
642                         "Background", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
643                 grp = DRW_shgroup_create(wpd->background_sh, psl->background_pass);
644                 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
645                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
646                 if (OBJECT_OUTLINE_ENABLED(wpd)) {
647                         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
648                 }
649                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
650
651                 if (draw_ctx->rv3d && (draw_ctx->rv3d->rflag & RV3D_CLIPPING) && draw_ctx->rv3d->clipbb) {
652                         GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_3D_UNIFORM_COLOR_BACKGROUND);
653                         grp = DRW_shgroup_create(shader, psl->background_pass);
654                         wpd->world_clip_planes_batch = DRW_draw_background_clipping_batch_from_rv3d(draw_ctx->rv3d);
655                         DRW_shgroup_call_add(grp, wpd->world_clip_planes_batch, NULL);
656                         DRW_shgroup_uniform_vec4(grp, "color", &wpd->world_clip_planes_color[0], 1);
657                 }
658         }
659
660         /* Deferred Mix Pass */
661         {
662                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
663                 studiolight_update_light(wpd, e_data.display.light_direction);
664
665                 float shadow_focus = scene->display.shadow_focus;
666                 /* Clamp to avoid overshadowing and shading errors. */
667                 CLAMP(shadow_focus, 0.0001f, 0.99999f);
668                 shadow_focus = 1.0f - shadow_focus * (1.0f - scene->display.shadow_shift);
669
670                 if (SHADOW_ENABLED(wpd)) {
671                         psl->composite_pass = DRW_pass_create(
672                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL | DRW_STATE_DEPTH_GREATER);
673                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
674                         workbench_composite_uniforms(wpd, grp);
675                         DRW_shgroup_stencil_mask(grp, 0x00);
676                         DRW_shgroup_uniform_float_copy(grp, "lightMultiplier", 1.0f);
677                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
678                         DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
679                         DRW_shgroup_uniform_float_copy(grp, "shadowFocus", shadow_focus);
680                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
681
682                         /* Stencil Shadow passes. */
683 #ifdef DEBUG_SHADOW_VOLUME
684                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
685                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
686 #else
687                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
688                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
689 #endif
690                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
691                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
692                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
693                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
694                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
695                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
696
697 #ifndef DEBUG_SHADOW_VOLUME
698                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
699                         DRW_shgroup_stencil_mask(grp, 0xFF);
700                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
701                         DRW_shgroup_stencil_mask(grp, 0xFF);
702                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
703                         DRW_shgroup_stencil_mask(grp, 0xFF);
704                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
705                         DRW_shgroup_stencil_mask(grp, 0xFF);
706                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
707                         DRW_shgroup_stencil_mask(grp, 0xFF);
708                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
709                         DRW_shgroup_stencil_mask(grp, 0xFF);
710
711                         psl->composite_shadow_pass = DRW_pass_create(
712                                 "Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL | DRW_STATE_DEPTH_GREATER);
713                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
714                         DRW_shgroup_stencil_mask(grp, 0x00);
715                         workbench_composite_uniforms(wpd, grp);
716                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
717                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
718                         DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
719                         DRW_shgroup_uniform_float_copy(grp, "shadowFocus", shadow_focus);
720                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
721 #endif
722                 }
723                 else {
724                         psl->composite_pass = DRW_pass_create(
725                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_GREATER);
726                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
727                         workbench_composite_uniforms(wpd, grp);
728                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
729                 }
730         }
731 }
732
733 static WORKBENCH_MaterialData *get_or_create_material_data(
734         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int color_type, int interp)
735 {
736         WORKBENCH_StorageList *stl = vedata->stl;
737         WORKBENCH_PassList *psl = vedata->psl;
738         WORKBENCH_PrivateData *wpd = stl->g_data;
739         WORKBENCH_MaterialData *material;
740         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
741                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
742         WORKBENCH_MaterialData material_template;
743         const bool is_ghost = (ob->dtx & OB_DRAWXRAY);
744
745         /* Solid */
746         workbench_material_update_data(wpd, ob, mat, &material_template);
747         material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
748         material_template.color_type = color_type;
749         material_template.ima = ima;
750         material_template.interp = interp;
751         uint hash = workbench_material_get_hash(&material_template, is_ghost);
752
753         material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
754         if (material == NULL) {
755                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
756                 material->shgrp = DRW_shgroup_create(
757                         (color_type == V3D_SHADING_TEXTURE_COLOR) ? wpd->prepass_texture_sh: wpd->prepass_solid_sh,
758                         (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
759                 workbench_material_copy(material, &material_template);
760                 DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
761                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
762                 workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, true, interp);
763                 if (wpd->world_clip_planes) {
764                         const DRWContextState *draw_ctx = DRW_context_state_get();
765                         RegionView3D *rv3d = draw_ctx->rv3d;
766                         DRW_shgroup_world_clip_planes_from_rv3d(material->shgrp, rv3d);
767                 }
768                 BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
769         }
770         return material;
771 }
772
773 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
774 {
775         WORKBENCH_StorageList *stl = vedata->stl;
776         WORKBENCH_PassList *psl = vedata->psl;
777         WORKBENCH_PrivateData *wpd = stl->g_data;
778
779         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
780                 if (md->type != eModifierType_ParticleSystem) {
781                         continue;
782                 }
783                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
784                 if (!psys_check_enabled(ob, psys, false)) {
785                         continue;
786                 }
787                 if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
788                         continue;
789                 }
790                 ParticleSettings *part = psys->part;
791                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
792
793                 if (draw_as == PART_DRAW_PATH) {
794                         Material *mat;
795                         Image *image;
796                         int interp;
797                         workbench_material_get_image_and_mat(ob, part->omat, &image, &interp, &mat);
798                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
799                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, color_type, interp);
800
801                         struct GPUShader *shader = (color_type != V3D_SHADING_TEXTURE_COLOR) ?
802                                 wpd->prepass_solid_hair_sh :
803                                 wpd->prepass_texture_hair_sh;
804                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
805                                 ob, psys, md,
806                                 (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
807                                 shader);
808                         DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
809                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
810                         workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, true, interp);
811                 }
812         }
813 }
814
815 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
816 {
817         WORKBENCH_StorageList *stl = vedata->stl;
818         WORKBENCH_PassList *psl = vedata->psl;
819         WORKBENCH_PrivateData *wpd = stl->g_data;
820         const DRWContextState *draw_ctx = DRW_context_state_get();
821         Scene *scene = draw_ctx->scene;
822
823         if (!DRW_object_is_renderable(ob)) {
824                 return;
825         }
826
827         if (ob->type == OB_MESH) {
828                 workbench_cache_populate_particles(vedata, ob);
829         }
830
831         ModifierData *md;
832         if (((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
833             (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
834             (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
835             (((SmokeModifierData *)md)->domain != NULL))
836         {
837                 workbench_volume_cache_populate(vedata, scene, ob, md);
838                 return; /* Do not draw solid in this case. */
839         }
840
841         if (!(DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF)) {
842                 return;
843         }
844         if (ob->dt < OB_SOLID) {
845                 return;
846         }
847
848         WORKBENCH_MaterialData *material;
849         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
850                 const bool is_active = (ob == draw_ctx->obact);
851                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
852                 const bool use_hide = is_active && DRW_object_use_hide_faces(ob);
853                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
854                 const Mesh *me = (ob->type == OB_MESH) ? ob->data : NULL;
855
856                 if (!is_sculpt_mode && me && me->mloopuv && TEXTURE_DRAWING_ENABLED(wpd)) {
857                         /* Draw textured */
858                         struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
859                         for (int i = 0; i < materials_len; i++) {
860                                 if (geom_array != NULL && geom_array[i] != NULL) {
861                                         Material *mat;
862                                         Image *image;
863                                         int interp;
864                                         workbench_material_get_image_and_mat(ob, i + 1, &image, &interp, &mat);
865                                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
866                                         material = get_or_create_material_data(vedata, ob, mat, image, color_type, interp);
867                                         DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
868                                 }
869                         }
870                 }
871                 else if (ELEM(wpd->shading.color_type,
872                               V3D_SHADING_SINGLE_COLOR, V3D_SHADING_OBJECT_COLOR, V3D_SHADING_RANDOM_COLOR))
873                 {
874                         /* Draw solid color */
875                         material = get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type, 0);
876                         if (is_sculpt_mode) {
877                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
878                         }
879                         else {
880                                 struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
881                                 if (geom) {
882                                         DRW_shgroup_call_object_add(material->shgrp, geom, ob);
883                                 }
884                         }
885                 }
886                 else {
887                         /* Draw material color */
888                         if (is_sculpt_mode) {
889                                 /* Multiple materials are not supported in sculpt mode yet. */
890                                 Material *mat = give_current_material(ob, 1);
891                                 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
892                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
893                         }
894                         else {
895                                 struct GPUBatch **geoms;
896                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
897                                 memset(gpumat_array, 0, sizeof(*gpumat_array) * materials_len);
898
899                                 geoms = DRW_cache_object_surface_material_get(ob, gpumat_array, materials_len, NULL, NULL, NULL);
900                                 for (int i = 0; i < materials_len; ++i) {
901                                         if (geoms != NULL && geoms[i] != NULL) {
902                                                 Material *mat = give_current_material(ob, i + 1);
903                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
904                                                 DRW_shgroup_call_object_add(material->shgrp, geoms[i], ob);
905                                         }
906                                 }
907                         }
908                 }
909
910                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW)) {
911                         bool is_manifold;
912                         struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
913                         if (geom_shadow) {
914                                 if (is_sculpt_mode || use_hide) {
915                                         /* Currently unsupported in sculpt mode. We could revert to the slow
916                                          * method in this case but I'm not sure if it's a good idea given that
917                                          * sculpted meshes are heavy to begin with. */
918                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
919                                 }
920                                 else {
921                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
922                                                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
923
924                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
925
926                                                 invert_m4_m4(ob->imat, ob->obmat);
927                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
928
929                                                 DRWShadingGroup *grp;
930                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
931
932                                                 if (use_shadow_pass_technique) {
933                                                         if (is_manifold) {
934                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
935                                                         }
936                                                         else {
937                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
938                                                         }
939                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
940                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
941                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
942 #ifdef DEBUG_SHADOW_VOLUME
943                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
944 #endif
945                                                 }
946                                                 else {
947                                                         float extrude_distance = studiolight_object_shadow_distance(wpd, ob, engine_object_data);
948
949                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
950                                                         const bool need_caps = true;
951                                                         if (need_caps) {
952                                                                 if (is_manifold) {
953                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
954                                                                 }
955                                                                 else {
956                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
957                                                                 }
958                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
959                                                                 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
960                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
961                                                         }
962
963                                                         if (is_manifold) {
964                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
965                                                         }
966                                                         else {
967                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
968                                                         }
969                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
970                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
971                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
972 #ifdef DEBUG_SHADOW_VOLUME
973                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
974 #endif
975                                                 }
976                                         }
977                                 }
978                         }
979                 }
980         }
981 }
982
983 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
984 {
985 }
986
987 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
988 {
989         WORKBENCH_StorageList *stl = vedata->stl;
990         WORKBENCH_FramebufferList *fbl = vedata->fbl;
991         WORKBENCH_PrivateData *wpd = stl->g_data;
992         const float clear_depth = 1.0f;
993         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
994         uint clear_stencil = 0x00;
995
996         DRW_stats_group_start("Clear Background");
997
998         if (OBJECT_ID_PASS_ENABLED(wpd)) {
999                 /* From all the color buffers, only object id needs to be cleared. */
1000                 GPU_framebuffer_bind(fbl->id_clear_fb);
1001                 GPU_framebuffer_clear_color(fbl->id_clear_fb, clear_color);
1002         }
1003
1004         GPU_framebuffer_bind(fbl->prepass_fb);
1005         int clear_bits = GPU_DEPTH_BIT;
1006         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
1007         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
1008         DRW_stats_group_end();
1009 }
1010
1011 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
1012 {
1013         WORKBENCH_PassList *psl = vedata->psl;
1014         WORKBENCH_StorageList *stl = vedata->stl;
1015         WORKBENCH_FramebufferList *fbl = vedata->fbl;
1016         WORKBENCH_PrivateData *wpd = stl->g_data;
1017         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
1018
1019         if (TAA_ENABLED(wpd)) {
1020                 workbench_taa_draw_scene_start(vedata);
1021         }
1022
1023         /* clear in background */
1024         GPU_framebuffer_bind(fbl->prepass_fb);
1025         DRW_draw_pass(psl->prepass_pass);
1026         DRW_draw_pass(psl->prepass_hair_pass);
1027
1028         if (GHOST_ENABLED(psl)) {
1029                 /* meh, late init to not request a depth buffer we won't use. */
1030                 workbench_setup_ghost_framebuffer(fbl);
1031
1032                 GPU_framebuffer_bind(fbl->ghost_prepass_fb);
1033                 GPU_framebuffer_clear_depth(fbl->ghost_prepass_fb, 1.0f);
1034                 DRW_draw_pass(psl->ghost_prepass_pass);
1035                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1036
1037                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1038                 DRW_draw_pass(psl->ghost_resolve_pass);
1039         }
1040
1041         if (CAVITY_ENABLED(wpd)) {
1042                 GPU_framebuffer_bind(fbl->cavity_fb);
1043                 DRW_draw_pass(psl->cavity_pass);
1044         }
1045
1046         if (SHADOW_ENABLED(wpd)) {
1047 #ifdef DEBUG_SHADOW_VOLUME
1048                 GPU_framebuffer_bind(fbl->composite_fb);
1049                 DRW_draw_pass(psl->composite_pass);
1050 #else
1051                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1052 #endif
1053                 DRW_draw_pass(psl->shadow_depth_pass_pass);
1054                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
1055                 DRW_draw_pass(psl->shadow_depth_fail_pass);
1056                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
1057                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
1058                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
1059
1060                 if (GHOST_ENABLED(psl)) {
1061                         /* We need to set the stencil buffer to 0 where Ghost objects
1062                          * else they will get shadow and even badly shadowed. */
1063                         DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1064                         DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1065
1066                         DRW_draw_pass(psl->ghost_prepass_pass);
1067                         DRW_draw_pass(psl->ghost_prepass_hair_pass);
1068                 }
1069 #ifndef DEBUG_SHADOW_VOLUME
1070                 GPU_framebuffer_bind(fbl->composite_fb);
1071                 DRW_draw_pass(psl->composite_pass);
1072                 DRW_draw_pass(psl->composite_shadow_pass);
1073 #endif
1074         }
1075         else {
1076                 GPU_framebuffer_bind(fbl->composite_fb);
1077                 DRW_draw_pass(psl->composite_pass);
1078         }
1079
1080         /* TODO(fclem): only enable when needed (when there is overlays). */
1081         if (GHOST_ENABLED(psl)) {
1082                 /* In order to not draw on top of ghost objects, we clear the stencil
1083                  * to 0xFF and the ghost object to 0x00 and only draw overlays on top if
1084                  * stencil is not 0. */
1085                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1086                 GPU_framebuffer_clear_stencil(dfbl->depth_only_fb, 0xFF);
1087
1088                 DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1089                 DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1090
1091                 DRW_draw_pass(psl->ghost_prepass_pass);
1092                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1093         }
1094
1095         GPU_framebuffer_bind(fbl->composite_fb);
1096         DRW_draw_pass(psl->background_pass);
1097
1098         if (wpd->volumes_do) {
1099                 GPU_framebuffer_bind(fbl->color_only_fb);
1100                 DRW_draw_pass(psl->volume_pass);
1101         }
1102
1103         workbench_dof_draw_pass(vedata);
1104         workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
1105 }
1106
1107 void workbench_deferred_draw_finish(WORKBENCH_Data *vedata)
1108 {
1109         WORKBENCH_StorageList *stl = vedata->stl;
1110         WORKBENCH_PrivateData *wpd = stl->g_data;
1111
1112         /* XXX TODO(fclem) do not discard UBOS after drawing! Store them per viewport. */
1113         workbench_private_data_free(wpd);
1114         workbench_volume_smoke_textures_free(wpd);
1115 }