GPU: refactor clipped drawing from DRW into GPU
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file workbench_deferred.c
20  *  \ingroup draw_engine
21  */
22
23 #include "workbench_private.h"
24
25
26 #include "BLI_alloca.h"
27 #include "BLI_dynstr.h"
28 #include "BLI_utildefines.h"
29 #include "BLI_rand.h"
30 #include "BLI_string_utils.h"
31
32 #include "BKE_modifier.h"
33 #include "BKE_object.h"
34 #include "BKE_particle.h"
35
36 #include "DNA_image_types.h"
37 #include "DNA_mesh_types.h"
38 #include "DNA_modifier_types.h"
39 #include "DNA_node_types.h"
40
41
42 #include "GPU_shader.h"
43 #include "GPU_texture.h"
44 #include "GPU_extensions.h"
45
46 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
47
48 /* *********** STATIC *********** */
49
50 /* #define DEBUG_SHADOW_VOLUME */
51
52 #ifdef DEBUG_SHADOW_VOLUME
53 #  include "draw_debug.h"
54 #endif
55
56 static struct {
57         struct GPUShader *prepass_sh_cache[MAX_PREPASS_SHADERS];
58         struct GPUShader *composite_sh_cache[MAX_COMPOSITE_SHADERS];
59         struct GPUShader *cavity_sh[MAX_CAVITY_SHADERS];
60         struct GPUShader *background_sh[2];
61         struct GPUShader *ghost_resolve_sh;
62         struct GPUShader *shadow_fail_sh;
63         struct GPUShader *shadow_fail_manifold_sh;
64         struct GPUShader *shadow_pass_sh;
65         struct GPUShader *shadow_pass_manifold_sh;
66         struct GPUShader *shadow_caps_sh;
67         struct GPUShader *shadow_caps_manifold_sh;
68         struct GPUShader *oit_resolve_sh;
69
70         /* TODO(fclem) move everything below to wpd and custom viewlayer data. */
71         struct GPUTexture *oit_accum_tx; /* ref only, not alloced */
72         struct GPUTexture *oit_revealage_tx; /* ref only, not alloced */
73         struct GPUTexture *ghost_depth_tx; /* ref only, not alloced */
74         struct GPUTexture *object_id_tx; /* ref only, not alloced */
75         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
76         struct GPUTexture *cavity_buffer_tx; /* ref only, not alloced */
77         struct GPUTexture *metallic_buffer_tx; /* ref only, not alloced */
78         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
79         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
80
81         SceneDisplay display; /* world light direction for shadows */
82         int next_object_id;
83
84         struct GPUUniformBuffer *sampling_ubo;
85         struct GPUTexture *jitter_tx;
86         int cached_sample_num;
87 } e_data = {{NULL}};
88
89 /* Shaders */
90 extern char datatoc_common_hair_lib_glsl[];
91 extern char datatoc_gpu_shader_cfg_world_clip_lib_glsl[];
92
93 extern char datatoc_workbench_prepass_vert_glsl[];
94 extern char datatoc_workbench_prepass_frag_glsl[];
95 extern char datatoc_workbench_cavity_frag_glsl[];
96 extern char datatoc_workbench_forward_composite_frag_glsl[];
97 extern char datatoc_workbench_deferred_composite_frag_glsl[];
98 extern char datatoc_workbench_deferred_background_frag_glsl[];
99 extern char datatoc_workbench_ghost_resolve_frag_glsl[];
100
101 extern char datatoc_workbench_shadow_vert_glsl[];
102 extern char datatoc_workbench_shadow_geom_glsl[];
103 extern char datatoc_workbench_shadow_caps_geom_glsl[];
104 extern char datatoc_workbench_shadow_debug_frag_glsl[];
105
106 extern char datatoc_workbench_background_lib_glsl[];
107 extern char datatoc_workbench_cavity_lib_glsl[];
108 extern char datatoc_workbench_common_lib_glsl[];
109 extern char datatoc_workbench_data_lib_glsl[];
110 extern char datatoc_workbench_object_outline_lib_glsl[];
111 extern char datatoc_workbench_curvature_lib_glsl[];
112 extern char datatoc_workbench_world_light_lib_glsl[];
113
114 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
115
116 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
117 {
118         char *str = NULL;
119
120         DynStr *ds = BLI_dynstr_new();
121
122         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
123         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
124         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
125
126         if (!FLAT_ENABLED(wpd)) {
127                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
128         }
129         if (OBJECT_OUTLINE_ENABLED(wpd)) {
130                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
131         }
132         if (CURVATURE_ENABLED(wpd)) {
133                 BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
134         }
135
136         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
137
138         str = BLI_dynstr_get_cstring(ds);
139         BLI_dynstr_free(ds);
140         return str;
141 }
142
143 static char *workbench_build_prepass_frag(void)
144 {
145         char *str = NULL;
146
147         DynStr *ds = BLI_dynstr_new();
148
149         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
150         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
151         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
152
153         str = BLI_dynstr_get_cstring(ds);
154         BLI_dynstr_free(ds);
155         return str;
156 }
157
158 static char *workbench_build_prepass_vert(bool is_hair)
159 {
160         char *str = NULL;
161         if (!is_hair) {
162                 return BLI_string_joinN(
163                         datatoc_gpu_shader_cfg_world_clip_lib_glsl,
164                         datatoc_workbench_prepass_vert_glsl);
165         }
166
167         DynStr *ds = BLI_dynstr_new();
168
169         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
170         BLI_dynstr_append(ds, datatoc_gpu_shader_cfg_world_clip_lib_glsl);
171         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
172
173         str = BLI_dynstr_get_cstring(ds);
174         BLI_dynstr_free(ds);
175         return str;
176 }
177
178 static char *workbench_build_cavity_frag(bool cavity, bool curvature, bool high_dpi)
179 {
180         char *str = NULL;
181
182         DynStr *ds = BLI_dynstr_new();
183
184         if (cavity) {
185                 BLI_dynstr_append(ds, "#define USE_CAVITY\n");
186         }
187         if (curvature) {
188                 BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
189         }
190         if (high_dpi) {
191                 BLI_dynstr_append(ds, "#define CURVATURE_OFFSET 2\n");
192         }
193         if (NORMAL_ENCODING_ENABLED()) {
194                 BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
195         }
196         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
197         BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
198         BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
199         BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
200
201         str = BLI_dynstr_get_cstring(ds);
202         BLI_dynstr_free(ds);
203         return str;
204 }
205
206 static GPUShader *workbench_cavity_shader_get(bool cavity, bool curvature)
207 {
208         const bool high_dpi = (U.pixelsize > 1.5f);
209         int index = 0;
210         SET_FLAG_FROM_TEST(index, cavity, 1 << 0);
211         SET_FLAG_FROM_TEST(index, curvature, 1 << 1);
212         SET_FLAG_FROM_TEST(index, high_dpi, 1 << 2);
213
214         GPUShader **sh = &e_data.cavity_sh[index];
215         if (*sh == NULL) {
216                 char *cavity_frag = workbench_build_cavity_frag(cavity, curvature, high_dpi);
217                 *sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
218                 MEM_freeN(cavity_frag);
219         }
220         return *sh;
221 }
222
223 static GPUShader *ensure_deferred_prepass_shader(WORKBENCH_PrivateData *wpd, bool use_textures, bool is_hair)
224 {
225         int index = workbench_material_get_prepass_shader_index(wpd, use_textures, is_hair);
226         if (e_data.prepass_sh_cache[index] == NULL) {
227                 char *defines = workbench_material_build_defines(wpd, use_textures, is_hair);
228                 char *prepass_vert = workbench_build_prepass_vert(is_hair);
229                 char *prepass_frag = workbench_build_prepass_frag();
230                 e_data.prepass_sh_cache[index] = DRW_shader_create(
231                         prepass_vert, NULL,
232                         prepass_frag, defines);
233                 MEM_freeN(prepass_vert);
234                 MEM_freeN(prepass_frag);
235                 MEM_freeN(defines);
236         }
237         return e_data.prepass_sh_cache[index];
238 }
239
240 static GPUShader *ensure_deferred_composite_shader(WORKBENCH_PrivateData *wpd)
241 {
242         int index = workbench_material_get_composite_shader_index(wpd);
243         if (e_data.composite_sh_cache[index] == NULL) {
244                 char *defines = workbench_material_build_defines(wpd, false, false);
245                 char *composite_frag = workbench_build_composite_frag(wpd);
246                 e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
247                 MEM_freeN(composite_frag);
248                 MEM_freeN(defines);
249         }
250         return e_data.composite_sh_cache[index];
251 }
252
253 static GPUShader *ensure_background_shader(WORKBENCH_PrivateData *wpd)
254 {
255         const int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
256         if (e_data.background_sh[index] == NULL) {
257                 const char *defines = (index) ? "#define V3D_SHADING_OBJECT_OUTLINE\n" : NULL;
258                 char *frag = BLI_string_joinN(
259                         datatoc_workbench_data_lib_glsl,
260                         datatoc_workbench_common_lib_glsl,
261                         datatoc_workbench_background_lib_glsl,
262                         datatoc_workbench_object_outline_lib_glsl,
263                         datatoc_workbench_deferred_background_frag_glsl);
264                 e_data.background_sh[index] = DRW_shader_create_fullscreen(frag, defines);
265                 MEM_freeN(frag);
266         }
267         return e_data.background_sh[index];
268 }
269
270 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
271 {
272         wpd->prepass_solid_sh = ensure_deferred_prepass_shader(wpd, false, false);
273         wpd->prepass_solid_hair_sh = ensure_deferred_prepass_shader(wpd, false, true);
274         wpd->prepass_texture_sh = ensure_deferred_prepass_shader(wpd, true, false);
275         wpd->prepass_texture_hair_sh = ensure_deferred_prepass_shader(wpd, true, true);
276         wpd->composite_sh = ensure_deferred_composite_shader(wpd);
277         wpd->background_sh = ensure_background_shader(wpd);
278 }
279
280 /* Using Hammersley distribution */
281 static float *create_disk_samples(int num_samples, int num_iterations)
282 {
283         /* vec4 to ensure memory alignment. */
284         const int total_samples = num_samples * num_iterations;
285         float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * total_samples, __func__);
286         const float num_samples_inv = 1.0f / num_samples;
287
288         for (int i = 0; i < total_samples; i++) {
289                 float it_add = (i / num_samples) * 0.499f;
290                 float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
291                 double dphi;
292                 BLI_hammersley_1D(i, &dphi);
293
294                 float phi = (float)dphi * 2.0f * M_PI + it_add;
295                 texels[i][0] = cosf(phi);
296                 texels[i][1] = sinf(phi);
297                 /* This deliberately distribute more samples
298                  * at the center of the disk (and thus the shadow). */
299                 texels[i][2] = r;
300         }
301
302         return (float *)texels;
303 }
304
305 static struct GPUTexture *create_jitter_texture(int num_samples)
306 {
307         float jitter[64 * 64][4];
308         const float num_samples_inv = 1.0f / num_samples;
309
310         for (int i = 0; i < 64 * 64; i++) {
311                 float phi = blue_noise[i][0] * 2.0f * M_PI;
312                 /* This rotate the sample per pixels */
313                 jitter[i][0] = cosf(phi);
314                 jitter[i][1] = sinf(phi);
315                 /* This offset the sample along it's direction axis (reduce banding) */
316                 float bn = blue_noise[i][1] - 0.5f;
317                 CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
318                 jitter[i][2] = bn * num_samples_inv;
319                 jitter[i][3] = blue_noise[i][1];
320         }
321
322         UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
323
324         return DRW_texture_create_2D(64, 64, GPU_RGBA16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
325 }
326 /* Functions */
327
328
329 static void workbench_init_object_data(DrawData *dd)
330 {
331         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
332         data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
333         data->shadow_bbox_dirty = true;
334 }
335
336 static void workbench_init_oit_framebuffer(WORKBENCH_FramebufferList *fbl, DefaultTextureList *dtxl)
337 {
338         const float *size = DRW_viewport_size_get();
339         e_data.oit_accum_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
340         e_data.oit_revealage_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R16F, &draw_engine_workbench_solid);
341
342         GPU_framebuffer_ensure_config(&fbl->transparent_accum_fb, {
343                 GPU_ATTACHMENT_TEXTURE(dtxl->depth),
344                 GPU_ATTACHMENT_TEXTURE(e_data.oit_accum_tx),
345                 GPU_ATTACHMENT_TEXTURE(e_data.oit_revealage_tx),
346         });
347 }
348
349 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
350 {
351         WORKBENCH_FramebufferList *fbl = vedata->fbl;
352         WORKBENCH_StorageList *stl = vedata->stl;
353         WORKBENCH_PassList *psl = vedata->psl;
354         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
355         const DRWContextState *draw_ctx = DRW_context_state_get();
356         RegionView3D *rv3d = draw_ctx->rv3d;
357         View3D *v3d = draw_ctx->v3d;
358         Scene *scene = draw_ctx->scene;
359         Object *camera;
360
361         if (v3d && rv3d) {
362                 camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
363         }
364         else {
365                 camera = scene->camera;
366         }
367
368         if (!stl->g_data) {
369                 /* Alloc transient pointers */
370                 stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
371         }
372         if (!stl->effects) {
373                 stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
374                 workbench_effect_info_init(stl->effects);
375         }
376
377         if (!e_data.next_object_id) {
378                 memset(e_data.prepass_sh_cache,   0, sizeof(e_data.prepass_sh_cache));
379                 memset(e_data.composite_sh_cache, 0, sizeof(e_data.composite_sh_cache));
380                 e_data.next_object_id = 1;
381 #ifdef DEBUG_SHADOW_VOLUME
382                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
383 #else
384                 const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
385 #endif
386                 /* TODO only compile on demand */
387                 e_data.shadow_pass_sh = DRW_shader_create(
388                         datatoc_workbench_shadow_vert_glsl,
389                         datatoc_workbench_shadow_geom_glsl,
390                         shadow_frag,
391                         "#define SHADOW_PASS\n"
392                         "#define DOUBLE_MANIFOLD\n");
393                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
394                         datatoc_workbench_shadow_vert_glsl,
395                         datatoc_workbench_shadow_geom_glsl,
396                         shadow_frag,
397                         "#define SHADOW_PASS\n");
398                 e_data.shadow_fail_sh = DRW_shader_create(
399                         datatoc_workbench_shadow_vert_glsl,
400                         datatoc_workbench_shadow_geom_glsl,
401                         shadow_frag,
402                         "#define SHADOW_FAIL\n"
403                         "#define DOUBLE_MANIFOLD\n");
404                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
405                         datatoc_workbench_shadow_vert_glsl,
406                         datatoc_workbench_shadow_geom_glsl,
407                         shadow_frag,
408                         "#define SHADOW_FAIL\n");
409                 e_data.shadow_caps_sh = DRW_shader_create(
410                         datatoc_workbench_shadow_vert_glsl,
411                         datatoc_workbench_shadow_caps_geom_glsl,
412                         shadow_frag,
413                         "#define SHADOW_FAIL\n"
414                         "#define DOUBLE_MANIFOLD\n");
415                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
416                         datatoc_workbench_shadow_vert_glsl,
417                         datatoc_workbench_shadow_caps_geom_glsl,
418                         shadow_frag,
419                         "#define SHADOW_FAIL\n");
420
421                 e_data.ghost_resolve_sh = DRW_shader_create_fullscreen(datatoc_workbench_ghost_resolve_frag_glsl, NULL);
422         }
423         workbench_volume_engine_init();
424         workbench_fxaa_engine_init();
425         workbench_taa_engine_init(vedata);
426
427         WORKBENCH_PrivateData *wpd = stl->g_data;
428         workbench_private_data_init(wpd);
429
430         workbench_dof_engine_init(vedata, camera);
431
432         if (OIT_ENABLED(wpd)) {
433                 if (e_data.oit_resolve_sh == NULL) {
434                         e_data.oit_resolve_sh = DRW_shader_create_fullscreen(
435                                 datatoc_workbench_forward_composite_frag_glsl,
436                                 "#define ALPHA_COMPOSITE\n");
437                 }
438
439                 workbench_forward_choose_shaders(wpd);
440                 workbench_forward_outline_shaders_ensure(wpd);
441         }
442
443         {
444                 const float *viewport_size = DRW_viewport_size_get();
445                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
446                 const eGPUTextureFormat nor_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16 : GPU_RGBA32F;
447                 const eGPUTextureFormat comp_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F : GPU_R11F_G11F_B10F;
448                 const eGPUTextureFormat id_tex_format = OBJECT_ID_PASS_ENABLED(wpd) ? GPU_R32UI : GPU_R8;
449
450                 e_data.object_id_tx = NULL;
451                 e_data.color_buffer_tx = NULL;
452                 e_data.composite_buffer_tx = NULL;
453                 e_data.normal_buffer_tx = NULL;
454                 e_data.cavity_buffer_tx = NULL;
455
456                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], comp_tex_format, &draw_engine_workbench_solid);
457
458                 if (MATDATA_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
459                         e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
460                 }
461                 if (OBJECT_ID_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
462                         e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], id_tex_format, &draw_engine_workbench_solid);
463                 }
464                 if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
465                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], nor_tex_format, &draw_engine_workbench_solid);
466                 }
467                 if (CAVITY_ENABLED(wpd)) {
468                         e_data.cavity_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R16, &draw_engine_workbench_solid);
469                 }
470
471                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
472                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
473                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
474                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
475                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
476                 });
477                 GPU_framebuffer_ensure_config(&fbl->cavity_fb, {
478                         GPU_ATTACHMENT_NONE,
479                         GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
480                 });
481                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
482                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
483                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
484                 });
485                 GPU_framebuffer_ensure_config(&fbl->color_only_fb, {
486                         GPU_ATTACHMENT_NONE,
487                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
488                 });
489
490                 if (!MATDATA_PASS_ENABLED(wpd) && !GPU_unused_fb_slot_workaround()) {
491                         e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
492                 }
493
494                 GPU_framebuffer_ensure_config(&fbl->effect_fb, {
495                         GPU_ATTACHMENT_NONE,
496                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
497                 });
498
499                 if (OBJECT_ID_PASS_ENABLED(wpd)) {
500                         GPU_framebuffer_ensure_config(&fbl->id_clear_fb, {
501                                 GPU_ATTACHMENT_NONE,
502                                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
503                         });
504                 }
505         }
506
507         {
508                 /* AO Samples Tex */
509                 int num_iterations = workbench_taa_calculate_num_iterations(vedata);
510
511                 const int ssao_samples_single_iteration = scene->display.matcap_ssao_samples;
512                 const int ssao_samples = MIN2(num_iterations * ssao_samples_single_iteration, 500);
513
514                 if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
515                         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
516                         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
517                 }
518
519                 if (e_data.sampling_ubo == NULL) {
520                         float *samples = create_disk_samples(ssao_samples_single_iteration, num_iterations);
521                         e_data.jitter_tx = create_jitter_texture(ssao_samples);
522                         e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
523                         e_data.cached_sample_num = ssao_samples;
524                         MEM_freeN(samples);
525                 }
526         }
527
528         /* Prepass */
529         {
530                 DRWShadingGroup *grp;
531                 const bool do_cull = CULL_BACKFACE_ENABLED(wpd);
532
533                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
534                 psl->prepass_pass = DRW_pass_create("Prepass", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
535                 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
536
537                 psl->ghost_prepass_pass = DRW_pass_create("Prepass Ghost", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
538                 psl->ghost_prepass_hair_pass = DRW_pass_create("Prepass Ghost", state);
539
540                 psl->ghost_resolve_pass = DRW_pass_create("Resolve Ghost Depth", DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
541                 grp = DRW_shgroup_create(e_data.ghost_resolve_sh, psl->ghost_resolve_pass);
542                 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &e_data.ghost_depth_tx);
543                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
544         }
545
546         {
547                 workbench_aa_create_pass(vedata, &e_data.color_buffer_tx);
548         }
549
550         {
551                 workbench_dof_create_pass(vedata, &e_data.composite_buffer_tx, e_data.jitter_tx);
552         }
553
554         if (CAVITY_ENABLED(wpd)) {
555                 int state = DRW_STATE_WRITE_COLOR;
556                 GPUShader *shader = workbench_cavity_shader_get(SSAO_ENABLED(wpd), CURVATURE_ENABLED(wpd));
557                 psl->cavity_pass = DRW_pass_create("Cavity", state);
558                 DRWShadingGroup *grp = DRW_shgroup_create(shader, psl->cavity_pass);
559                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
560                 DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
561
562                 if (SSAO_ENABLED(wpd)) {
563                         DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
564                         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
565                         DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
566                         DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
567                         DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
568                         DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
569                         DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
570                 }
571
572                 if (CURVATURE_ENABLED(wpd)) {
573                         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
574                         DRW_shgroup_uniform_vec2(grp, "curvature_settings", &wpd->world_data.curvature_ridge, 1);
575                 }
576
577                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
578         }
579 }
580
581 static void workbench_setup_ghost_framebuffer(WORKBENCH_FramebufferList *fbl)
582 {
583         const float *viewport_size = DRW_viewport_size_get();
584         const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
585
586         e_data.ghost_depth_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_DEPTH_COMPONENT24, &draw_engine_workbench_solid);
587
588         GPU_framebuffer_ensure_config(&fbl->ghost_prepass_fb, {
589                 GPU_ATTACHMENT_TEXTURE(e_data.ghost_depth_tx),
590                 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
591                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
592                 GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
593         });
594 }
595
596 void workbench_deferred_engine_free(void)
597 {
598         for (int index = 0; index < MAX_PREPASS_SHADERS; index++) {
599                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
600         }
601         for (int index = 0; index < MAX_COMPOSITE_SHADERS; index++) {
602                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
603         }
604         for (int index = 0; index < MAX_CAVITY_SHADERS; ++index) {
605                 DRW_SHADER_FREE_SAFE(e_data.cavity_sh[index]);
606         }
607         DRW_SHADER_FREE_SAFE(e_data.ghost_resolve_sh);
608         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
609         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
610         DRW_SHADER_FREE_SAFE(e_data.background_sh[0]);
611         DRW_SHADER_FREE_SAFE(e_data.background_sh[1]);
612
613         DRW_SHADER_FREE_SAFE(e_data.oit_resolve_sh);
614
615         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
616         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
617         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
618         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
619         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
620         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
621
622         workbench_volume_engine_free();
623         workbench_fxaa_engine_free();
624         workbench_taa_engine_free();
625         workbench_dof_engine_free();
626 }
627
628 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
629 {
630         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
631         if (MATDATA_PASS_ENABLED(wpd)) {
632                 DRW_shgroup_uniform_texture_ref(grp, "materialBuffer", &e_data.color_buffer_tx);
633         }
634         else {
635                 DRW_shgroup_uniform_vec3(grp, "materialSingleColor", wpd->shading.single_color, 1);
636         }
637         if (OBJECT_OUTLINE_ENABLED(wpd)) {
638                 DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
639         }
640         if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
641                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
642         }
643         if (CAVITY_ENABLED(wpd)) {
644                 DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
645         }
646         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
647                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
648         }
649         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
650                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
651         }
652         if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
653                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
654                 DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture);
655         }
656 }
657
658 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
659 {
660         WORKBENCH_StorageList *stl = vedata->stl;
661         WORKBENCH_PassList *psl = vedata->psl;
662         WORKBENCH_PrivateData *wpd = stl->g_data;
663         DRWShadingGroup *grp;
664         const DRWContextState *draw_ctx = DRW_context_state_get();
665
666         Scene *scene = draw_ctx->scene;
667
668         workbench_volume_cache_init(vedata);
669
670         select_deferred_shaders(wpd);
671
672         /* Background Pass */
673         {
674                 psl->background_pass = DRW_pass_create(
675                         "Background", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
676                 grp = DRW_shgroup_create(wpd->background_sh, psl->background_pass);
677                 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
678                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
679                 if (OBJECT_OUTLINE_ENABLED(wpd)) {
680                         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
681                 }
682                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
683
684                 if (draw_ctx->rv3d && (draw_ctx->rv3d->rflag & RV3D_CLIPPING) && draw_ctx->rv3d->clipbb) {
685                         GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_3D_UNIFORM_COLOR_BACKGROUND);
686                         grp = DRW_shgroup_create(shader, psl->background_pass);
687                         wpd->world_clip_planes_batch = DRW_draw_background_clipping_batch_from_rv3d(draw_ctx->rv3d);
688                         DRW_shgroup_call_add(grp, wpd->world_clip_planes_batch, NULL);
689                         DRW_shgroup_uniform_vec4(grp, "color", &wpd->world_clip_planes_color[0], 1);
690                 }
691         }
692
693         /* Deferred Mix Pass */
694         {
695                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
696                 studiolight_update_light(wpd, e_data.display.light_direction);
697
698                 if (SHADOW_ENABLED(wpd)) {
699                         psl->composite_pass = DRW_pass_create(
700                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL | DRW_STATE_DEPTH_GREATER);
701                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
702                         workbench_composite_uniforms(wpd, grp);
703                         DRW_shgroup_stencil_mask(grp, 0x00);
704                         DRW_shgroup_uniform_float_copy(grp, "lightMultiplier", 1.0f);
705                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
706                         DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
707                         DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
708                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
709
710                         /* Stencil Shadow passes. */
711 #ifdef DEBUG_SHADOW_VOLUME
712                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
713                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
714 #else
715                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
716                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
717 #endif
718                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
719                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
720                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
721                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
722                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
723                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
724
725 #ifndef DEBUG_SHADOW_VOLUME
726                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
727                         DRW_shgroup_stencil_mask(grp, 0xFF);
728                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
729                         DRW_shgroup_stencil_mask(grp, 0xFF);
730                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
731                         DRW_shgroup_stencil_mask(grp, 0xFF);
732                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
733                         DRW_shgroup_stencil_mask(grp, 0xFF);
734                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
735                         DRW_shgroup_stencil_mask(grp, 0xFF);
736                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
737                         DRW_shgroup_stencil_mask(grp, 0xFF);
738
739                         psl->composite_shadow_pass = DRW_pass_create(
740                                 "Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL | DRW_STATE_DEPTH_GREATER);
741                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
742                         DRW_shgroup_stencil_mask(grp, 0x00);
743                         workbench_composite_uniforms(wpd, grp);
744                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
745                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
746                         DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
747                         DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
748                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
749 #endif
750                 }
751                 else {
752                         psl->composite_pass = DRW_pass_create(
753                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_GREATER);
754                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
755                         workbench_composite_uniforms(wpd, grp);
756                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
757                 }
758         }
759
760         /**
761          * Order Independant Transparency.
762          * Similar to workbench forward. Duplicated code to avoid
763          * spaghetti with workbench forward. It would be great if we unify
764          * this in a clean way.
765          **/
766         if (OIT_ENABLED(wpd)) {
767                 const bool do_cull = CULL_BACKFACE_ENABLED(wpd);
768                 const int cull_state = (do_cull) ? DRW_STATE_CULL_BACK : 0;
769                 /* Transparency Accum */
770                 {
771                         /* Same as forward but here we use depth test to
772                          * not bleed through other solid objects. */
773                         int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_OIT | DRW_STATE_DEPTH_LESS | cull_state;
774                         psl->transparent_accum_pass = DRW_pass_create("Transparent Accum", state);
775                 }
776                 /* Depth */
777                 {
778                         int state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS | cull_state;
779                         psl->object_outline_pass = DRW_pass_create("Transparent Depth", state);
780                 }
781                 /* OIT Composite */
782                 {
783                         int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND;
784                         psl->oit_composite_pass = DRW_pass_create("OIT Composite", state);
785
786                         grp = DRW_shgroup_create(e_data.oit_resolve_sh, psl->oit_composite_pass);
787                         DRW_shgroup_uniform_texture_ref(grp, "transparentAccum", &e_data.oit_accum_tx);
788                         DRW_shgroup_uniform_texture_ref(grp, "transparentRevealage", &e_data.oit_revealage_tx);
789                         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
790                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
791                 }
792         }
793 }
794
795 static WORKBENCH_MaterialData *get_or_create_material_data(
796         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int color_type, int interp)
797 {
798         WORKBENCH_StorageList *stl = vedata->stl;
799         WORKBENCH_PassList *psl = vedata->psl;
800         WORKBENCH_PrivateData *wpd = stl->g_data;
801         WORKBENCH_MaterialData *material;
802         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
803                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
804         WORKBENCH_MaterialData material_template;
805         const bool is_ghost = (ob->dtx & OB_DRAWXRAY);
806
807         /* Solid */
808         workbench_material_update_data(wpd, ob, mat, &material_template);
809         material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
810         material_template.color_type = color_type;
811         material_template.ima = ima;
812         material_template.interp = interp;
813         uint hash = workbench_material_get_hash(&material_template, is_ghost);
814
815         material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
816         if (material == NULL) {
817                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
818                 material->shgrp = DRW_shgroup_create(
819                         (color_type == V3D_SHADING_TEXTURE_COLOR) ? wpd->prepass_texture_sh: wpd->prepass_solid_sh,
820                         (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
821                 workbench_material_copy(material, &material_template);
822                 DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
823                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
824                 workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, true, interp);
825                 if (wpd->world_clip_planes) {
826                         const DRWContextState *draw_ctx = DRW_context_state_get();
827                         RegionView3D *rv3d = draw_ctx->rv3d;
828                         DRW_shgroup_world_clip_planes_from_rv3d(material->shgrp, rv3d);
829                 }
830                 BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
831         }
832         return material;
833 }
834
835 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
836 {
837         WORKBENCH_StorageList *stl = vedata->stl;
838         WORKBENCH_PassList *psl = vedata->psl;
839         WORKBENCH_PrivateData *wpd = stl->g_data;
840
841         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
842                 if (md->type != eModifierType_ParticleSystem) {
843                         continue;
844                 }
845                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
846                 if (!psys_check_enabled(ob, psys, false)) {
847                         continue;
848                 }
849                 if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
850                         continue;
851                 }
852                 ParticleSettings *part = psys->part;
853                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
854
855                 if (draw_as == PART_DRAW_PATH) {
856                         Material *mat;
857                         Image *image;
858                         int interp;
859                         workbench_material_get_image_and_mat(ob, part->omat, &image, &interp, &mat);
860                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
861                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, color_type, interp);
862
863                         struct GPUShader *shader = (color_type != V3D_SHADING_TEXTURE_COLOR) ?
864                                 wpd->prepass_solid_hair_sh :
865                                 wpd->prepass_texture_hair_sh;
866                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
867                                 ob, psys, md,
868                                 (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
869                                 shader);
870                         DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
871                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
872                         workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, true, interp);
873                 }
874         }
875 }
876
877 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
878 {
879         WORKBENCH_StorageList *stl = vedata->stl;
880         WORKBENCH_PassList *psl = vedata->psl;
881         WORKBENCH_PrivateData *wpd = stl->g_data;
882         const DRWContextState *draw_ctx = DRW_context_state_get();
883         Scene *scene = draw_ctx->scene;
884
885         if (!DRW_object_is_renderable(ob)) {
886                 return;
887         }
888
889         if (ob->type == OB_MESH) {
890                 workbench_cache_populate_particles(vedata, ob);
891         }
892
893         ModifierData *md;
894         if (((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
895             (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
896             (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
897             (((SmokeModifierData *)md)->domain != NULL))
898         {
899                 workbench_volume_cache_populate(vedata, scene, ob, md);
900                 return; /* Do not draw solid in this case. */
901         }
902
903         if (!(DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF)) {
904                 return;
905         }
906         if (ob->dt < OB_SOLID) {
907                 return;
908         }
909
910         WORKBENCH_MaterialData *material;
911         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
912                 const bool is_active = (ob == draw_ctx->obact);
913                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
914                 const bool use_hide = is_active && DRW_object_use_hide_faces(ob);
915                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
916                 const Mesh *me = (ob->type == OB_MESH) ? ob->data : NULL;
917                 bool has_transp_mat = false;
918
919                 if (!is_sculpt_mode && me && me->mloopuv && TEXTURE_DRAWING_ENABLED(wpd)) {
920                         /* Draw textured */
921                         struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
922                         for (int i = 0; i < materials_len; i++) {
923                                 if (geom_array != NULL && geom_array[i] != NULL) {
924                                         Material *mat;
925                                         Image *image;
926                                         int interp;
927                                         workbench_material_get_image_and_mat(ob, i + 1, &image, &interp, &mat);
928                                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
929                                         material = get_or_create_material_data(vedata, ob, mat, image, color_type, interp);
930                                         DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
931                                 }
932                         }
933                 }
934                 else if (ELEM(wpd->shading.color_type,
935                               V3D_SHADING_SINGLE_COLOR, V3D_SHADING_OBJECT_COLOR, V3D_SHADING_RANDOM_COLOR))
936                 {
937                         if ((ob->col[3] < 1.0f) &&
938                             (wpd->shading.color_type == V3D_SHADING_OBJECT_COLOR))
939                         {
940                                 /* Hack */
941                                 wpd->shading.xray_alpha = ob->col[3];
942                                 material = workbench_forward_get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type, 0);
943                                 has_transp_mat = true;
944                         }
945                         else {
946                                 /* Draw solid color */
947                                 material = get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type, 0);
948                         }
949                         if (is_sculpt_mode) {
950                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
951                         }
952                         else {
953                                 struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
954                                 if (geom) {
955                                         DRW_shgroup_call_object_add(material->shgrp, geom, ob);
956                                 }
957                         }
958                 }
959                 else {
960                         /* Draw material color */
961                         if (is_sculpt_mode) {
962                                 /* Multiple materials are not supported in sculpt mode yet. */
963                                 Material *mat = give_current_material(ob, 1);
964                                 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
965                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
966                         }
967                         else {
968                                 struct GPUBatch **geoms;
969                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
970                                 memset(gpumat_array, 0, sizeof(*gpumat_array) * materials_len);
971
972                                 geoms = DRW_cache_object_surface_material_get(ob, gpumat_array, materials_len, NULL, NULL, NULL);
973                                 for (int i = 0; i < materials_len; ++i) {
974                                         if (geoms != NULL && geoms[i] != NULL) {
975                                                 Material *mat = give_current_material(ob, i + 1);
976                                                 if (mat != NULL && mat->a < 1.0f) {
977                                                         /* Hack */
978                                                         wpd->shading.xray_alpha = mat->a;
979                                                         material = workbench_forward_get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
980                                                         has_transp_mat = true;
981                                                 }
982                                                 else {
983                                                         material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
984                                                 }
985                                                 DRW_shgroup_call_object_add(material->shgrp, geoms[i], ob);
986                                         }
987                                 }
988                         }
989                 }
990
991                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW)) {
992                         bool is_manifold;
993                         struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
994                         if (geom_shadow) {
995                                 if (is_sculpt_mode || use_hide) {
996                                         /* Currently unsupported in sculpt mode. We could revert to the slow
997                                          * method in this case but I'm not sure if it's a good idea given that
998                                          * sculpted meshes are heavy to begin with. */
999                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
1000                                 }
1001                                 else {
1002                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
1003                                                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
1004
1005                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
1006
1007                                                 invert_m4_m4(ob->imat, ob->obmat);
1008                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
1009
1010                                                 DRWShadingGroup *grp;
1011                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
1012
1013                                                 if (use_shadow_pass_technique && !has_transp_mat) {
1014                                                         if (is_manifold) {
1015                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
1016                                                         }
1017                                                         else {
1018                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
1019                                                         }
1020                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1021                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
1022                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
1023 #ifdef DEBUG_SHADOW_VOLUME
1024                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
1025 #endif
1026                                                 }
1027                                                 else {
1028                                                         float extrude_distance = studiolight_object_shadow_distance(wpd, ob, engine_object_data);
1029
1030                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
1031                                                         const bool need_caps = true;
1032                                                         if (need_caps) {
1033                                                                 if (is_manifold) {
1034                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
1035                                                                 }
1036                                                                 else {
1037                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
1038                                                                 }
1039                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1040                                                                 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
1041                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
1042                                                         }
1043
1044                                                         if (is_manifold) {
1045                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
1046                                                         }
1047                                                         else {
1048                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
1049                                                         }
1050                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1051                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
1052                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
1053 #ifdef DEBUG_SHADOW_VOLUME
1054                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
1055 #endif
1056                                                 }
1057                                         }
1058                                 }
1059                         }
1060                 }
1061         }
1062 }
1063
1064 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
1065 {
1066 }
1067
1068 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
1069 {
1070         WORKBENCH_StorageList *stl = vedata->stl;
1071         WORKBENCH_FramebufferList *fbl = vedata->fbl;
1072         WORKBENCH_PrivateData *wpd = stl->g_data;
1073         const float clear_depth = 1.0f;
1074         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
1075         uint clear_stencil = 0x00;
1076
1077         DRW_stats_group_start("Clear Background");
1078
1079         if (OBJECT_ID_PASS_ENABLED(wpd)) {
1080                 /* From all the color buffers, only object id needs to be cleared. */
1081                 GPU_framebuffer_bind(fbl->id_clear_fb);
1082                 GPU_framebuffer_clear_color(fbl->id_clear_fb, clear_color);
1083         }
1084
1085         GPU_framebuffer_bind(fbl->prepass_fb);
1086         int clear_bits = GPU_DEPTH_BIT;
1087         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
1088         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
1089         DRW_stats_group_end();
1090 }
1091
1092 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
1093 {
1094         WORKBENCH_PassList *psl = vedata->psl;
1095         WORKBENCH_StorageList *stl = vedata->stl;
1096         WORKBENCH_FramebufferList *fbl = vedata->fbl;
1097         WORKBENCH_PrivateData *wpd = stl->g_data;
1098         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
1099
1100         if (TAA_ENABLED(wpd)) {
1101                 workbench_taa_draw_scene_start(vedata);
1102         }
1103
1104         /* clear in background */
1105         GPU_framebuffer_bind(fbl->prepass_fb);
1106         DRW_draw_pass(psl->prepass_pass);
1107         DRW_draw_pass(psl->prepass_hair_pass);
1108
1109         if (GHOST_ENABLED(psl)) {
1110                 /* meh, late init to not request a depth buffer we won't use. */
1111                 workbench_setup_ghost_framebuffer(fbl);
1112
1113                 GPU_framebuffer_bind(fbl->ghost_prepass_fb);
1114                 GPU_framebuffer_clear_depth(fbl->ghost_prepass_fb, 1.0f);
1115                 DRW_draw_pass(psl->ghost_prepass_pass);
1116                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1117
1118                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1119                 DRW_draw_pass(psl->ghost_resolve_pass);
1120         }
1121
1122         if (CAVITY_ENABLED(wpd)) {
1123                 GPU_framebuffer_bind(fbl->cavity_fb);
1124                 DRW_draw_pass(psl->cavity_pass);
1125         }
1126
1127         if (SHADOW_ENABLED(wpd)) {
1128 #ifdef DEBUG_SHADOW_VOLUME
1129                 GPU_framebuffer_bind(fbl->composite_fb);
1130                 DRW_draw_pass(psl->composite_pass);
1131 #else
1132                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1133 #endif
1134                 DRW_draw_pass(psl->shadow_depth_pass_pass);
1135                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
1136                 DRW_draw_pass(psl->shadow_depth_fail_pass);
1137                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
1138                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
1139                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
1140
1141                 if (GHOST_ENABLED(psl)) {
1142                         /* We need to set the stencil buffer to 0 where Ghost objects
1143                          * else they will get shadow and even badly shadowed. */
1144                         DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1145                         DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1146
1147                         DRW_draw_pass(psl->ghost_prepass_pass);
1148                         DRW_draw_pass(psl->ghost_prepass_hair_pass);
1149                 }
1150 #ifndef DEBUG_SHADOW_VOLUME
1151                 GPU_framebuffer_bind(fbl->composite_fb);
1152                 DRW_draw_pass(psl->composite_pass);
1153                 DRW_draw_pass(psl->composite_shadow_pass);
1154 #endif
1155         }
1156         else {
1157                 GPU_framebuffer_bind(fbl->composite_fb);
1158                 DRW_draw_pass(psl->composite_pass);
1159         }
1160
1161         /* TODO(fclem): only enable when needed (when there is overlays). */
1162         if (GHOST_ENABLED(psl)) {
1163                 /* In order to not draw on top of ghost objects, we clear the stencil
1164                  * to 0xFF and the ghost object to 0x00 and only draw overlays on top if
1165                  * stencil is not 0. */
1166                 GPU_framebuffer_bind(dfbl->depth_only_fb);
1167                 GPU_framebuffer_clear_stencil(dfbl->depth_only_fb, 0xFF);
1168
1169                 DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1170                 DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1171
1172                 DRW_draw_pass(psl->ghost_prepass_pass);
1173                 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1174         }
1175
1176         GPU_framebuffer_bind(fbl->composite_fb);
1177         DRW_draw_pass(psl->background_pass);
1178
1179         if (OIT_ENABLED(wpd) && !DRW_pass_is_empty(psl->transparent_accum_pass)) {
1180                 DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
1181                 /* meh, late init to not request buffers we won't use. */
1182                 workbench_init_oit_framebuffer(fbl, dtxl);
1183
1184                 const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
1185                 GPU_framebuffer_bind(fbl->transparent_accum_fb);
1186                 GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
1187                 DRW_draw_pass(psl->transparent_accum_pass);
1188
1189                 GPU_framebuffer_bind(fbl->composite_fb);
1190                 DRW_draw_pass(psl->oit_composite_pass);
1191         }
1192
1193         if (wpd->volumes_do) {
1194                 GPU_framebuffer_bind(fbl->color_only_fb);
1195                 DRW_draw_pass(psl->volume_pass);
1196         }
1197
1198         workbench_dof_draw_pass(vedata);
1199         workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
1200 }
1201
1202 void workbench_deferred_draw_finish(WORKBENCH_Data *vedata)
1203 {
1204         WORKBENCH_StorageList *stl = vedata->stl;
1205         WORKBENCH_PrivateData *wpd = stl->g_data;
1206
1207         /* XXX TODO(fclem) do not discard UBOS after drawing! Store them per viewport. */
1208         workbench_private_data_free(wpd);
1209         workbench_volume_smoke_textures_free(wpd);
1210 }