28ac6d0c274372babf500550f52eaf4b5337cbdf
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file
20  * \ingroup draw_engine
21  */
22
23 #include "workbench_private.h"
24
25 #include "BLI_alloca.h"
26 #include "BLI_dynstr.h"
27 #include "BLI_utildefines.h"
28 #include "BLI_rand.h"
29 #include "BLI_string_utils.h"
30
31 #include "BKE_modifier.h"
32 #include "BKE_object.h"
33 #include "BKE_paint.h"
34 #include "BKE_particle.h"
35
36 #include "DNA_image_types.h"
37 #include "DNA_mesh_types.h"
38 #include "DNA_modifier_types.h"
39 #include "DNA_node_types.h"
40
41 #include "GPU_shader.h"
42 #include "GPU_texture.h"
43 #include "GPU_extensions.h"
44
45 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
46
47 /* *********** STATIC *********** */
48
49 /* #define DEBUG_SHADOW_VOLUME */
50
51 #ifdef DEBUG_SHADOW_VOLUME
52 #  include "draw_debug.h"
53 #endif
54
55 typedef struct WORKBENCH_DEFERRED_Shaders {
56   struct GPUShader *prepass_sh_cache[MAX_PREPASS_SHADERS];
57 } WORKBENCH_DEFERRED_Shaders;
58
59 static struct {
60   WORKBENCH_DEFERRED_Shaders sh_data[GPU_SHADER_CFG_LEN];
61
62   struct GPUShader *composite_sh_cache[MAX_COMPOSITE_SHADERS];
63   struct GPUShader *cavity_sh[MAX_CAVITY_SHADERS];
64   struct GPUShader *background_sh[2];
65   struct GPUShader *ghost_resolve_sh;
66   struct GPUShader *shadow_fail_sh;
67   struct GPUShader *shadow_fail_manifold_sh;
68   struct GPUShader *shadow_pass_sh;
69   struct GPUShader *shadow_pass_manifold_sh;
70   struct GPUShader *shadow_caps_sh;
71   struct GPUShader *shadow_caps_manifold_sh;
72   struct GPUShader *oit_resolve_sh;
73
74   /* TODO(fclem) move everything below to wpd and custom viewlayer data. */
75   struct GPUTexture *oit_accum_tx;        /* ref only, not alloced */
76   struct GPUTexture *oit_revealage_tx;    /* ref only, not alloced */
77   struct GPUTexture *ghost_depth_tx;      /* ref only, not alloced */
78   struct GPUTexture *object_id_tx;        /* ref only, not alloced */
79   struct GPUTexture *color_buffer_tx;     /* ref only, not alloced */
80   struct GPUTexture *cavity_buffer_tx;    /* ref only, not alloced */
81   struct GPUTexture *metallic_buffer_tx;  /* ref only, not alloced */
82   struct GPUTexture *normal_buffer_tx;    /* ref only, not alloced */
83   struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
84
85   SceneDisplay display; /* world light direction for shadows */
86   int next_object_id;
87
88   struct GPUUniformBuffer *sampling_ubo;
89   struct GPUTexture *jitter_tx;
90   int cached_sample_num;
91 } e_data = {{{{NULL}}}};
92
93 /* Shaders */
94 extern char datatoc_common_hair_lib_glsl[];
95 extern char datatoc_common_view_lib_glsl[];
96
97 extern char datatoc_workbench_prepass_vert_glsl[];
98 extern char datatoc_workbench_prepass_frag_glsl[];
99 extern char datatoc_workbench_cavity_frag_glsl[];
100 extern char datatoc_workbench_forward_composite_frag_glsl[];
101 extern char datatoc_workbench_deferred_composite_frag_glsl[];
102 extern char datatoc_workbench_deferred_background_frag_glsl[];
103 extern char datatoc_workbench_ghost_resolve_frag_glsl[];
104
105 extern char datatoc_workbench_shadow_vert_glsl[];
106 extern char datatoc_workbench_shadow_geom_glsl[];
107 extern char datatoc_workbench_shadow_caps_geom_glsl[];
108 extern char datatoc_workbench_shadow_debug_frag_glsl[];
109
110 extern char datatoc_workbench_background_lib_glsl[];
111 extern char datatoc_workbench_cavity_lib_glsl[];
112 extern char datatoc_workbench_common_lib_glsl[];
113 extern char datatoc_workbench_data_lib_glsl[];
114 extern char datatoc_workbench_object_outline_lib_glsl[];
115 extern char datatoc_workbench_curvature_lib_glsl[];
116 extern char datatoc_workbench_world_light_lib_glsl[];
117
118 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
119
120 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
121 {
122   DynStr *ds = BLI_dynstr_new();
123
124   BLI_dynstr_append(ds, datatoc_common_view_lib_glsl);
125   BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
126   BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
127   BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
128
129   if (!FLAT_ENABLED(wpd)) {
130     BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
131   }
132   if (OBJECT_OUTLINE_ENABLED(wpd)) {
133     BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
134   }
135   if (CURVATURE_ENABLED(wpd)) {
136     BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
137   }
138
139   BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
140
141   char *str = BLI_dynstr_get_cstring(ds);
142   BLI_dynstr_free(ds);
143   return str;
144 }
145
146 static char *workbench_build_prepass_frag(void)
147 {
148   DynStr *ds = BLI_dynstr_new();
149
150   BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
151   BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
152   BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
153
154   char *str = BLI_dynstr_get_cstring(ds);
155   BLI_dynstr_free(ds);
156   return str;
157 }
158
159 static char *workbench_build_prepass_vert(bool is_hair)
160 {
161   DynStr *ds = BLI_dynstr_new();
162   if (is_hair) {
163     BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
164   }
165   BLI_dynstr_append(ds, datatoc_common_view_lib_glsl);
166   BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
167   char *str = BLI_dynstr_get_cstring(ds);
168   BLI_dynstr_free(ds);
169   return str;
170 }
171
172 static char *workbench_build_cavity_frag(bool cavity, bool curvature, bool high_dpi)
173 {
174   DynStr *ds = BLI_dynstr_new();
175
176   if (cavity) {
177     BLI_dynstr_append(ds, "#define USE_CAVITY\n");
178   }
179   if (curvature) {
180     BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
181   }
182   if (high_dpi) {
183     BLI_dynstr_append(ds, "#define CURVATURE_OFFSET 2\n");
184   }
185   if (NORMAL_ENCODING_ENABLED()) {
186     BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
187   }
188   BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
189   BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
190   BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
191   BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
192
193   char *str = BLI_dynstr_get_cstring(ds);
194   BLI_dynstr_free(ds);
195   return str;
196 }
197
198 static GPUShader *workbench_cavity_shader_get(bool cavity, bool curvature)
199 {
200   const bool high_dpi = (U.pixelsize > 1.5f);
201   int index = 0;
202   SET_FLAG_FROM_TEST(index, cavity, 1 << 0);
203   SET_FLAG_FROM_TEST(index, curvature, 1 << 1);
204   SET_FLAG_FROM_TEST(index, high_dpi, 1 << 2);
205
206   GPUShader **sh = &e_data.cavity_sh[index];
207   if (*sh == NULL) {
208     char *cavity_frag = workbench_build_cavity_frag(cavity, curvature, high_dpi);
209     *sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
210     MEM_freeN(cavity_frag);
211   }
212   return *sh;
213 }
214
215 static GPUShader *ensure_deferred_prepass_shader(WORKBENCH_PrivateData *wpd,
216                                                  bool is_uniform_color,
217                                                  bool is_hair,
218                                                  eGPUShaderConfig sh_cfg)
219 {
220   WORKBENCH_DEFERRED_Shaders *sh_data = &e_data.sh_data[sh_cfg];
221   int index = workbench_material_get_prepass_shader_index(wpd, is_uniform_color, is_hair);
222   if (sh_data->prepass_sh_cache[index] == NULL) {
223     const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
224     char *defines = workbench_material_build_defines(wpd, is_uniform_color, is_hair);
225     char *prepass_vert = workbench_build_prepass_vert(is_hair);
226     char *prepass_frag = workbench_build_prepass_frag();
227     sh_data->prepass_sh_cache[index] = GPU_shader_create_from_arrays({
228         .vert = (const char *[]){sh_cfg_data->lib, prepass_vert, NULL},
229         .frag = (const char *[]){prepass_frag, NULL},
230         .defs = (const char *[]){sh_cfg_data->def, defines, NULL},
231     });
232     MEM_freeN(prepass_vert);
233     MEM_freeN(prepass_frag);
234     MEM_freeN(defines);
235   }
236   return sh_data->prepass_sh_cache[index];
237 }
238
239 static GPUShader *ensure_deferred_composite_shader(WORKBENCH_PrivateData *wpd)
240 {
241   int index = workbench_material_get_composite_shader_index(wpd);
242   if (e_data.composite_sh_cache[index] == NULL) {
243     char *defines = workbench_material_build_defines(wpd, false, false);
244     char *composite_frag = workbench_build_composite_frag(wpd);
245     e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
246     MEM_freeN(composite_frag);
247     MEM_freeN(defines);
248   }
249   return e_data.composite_sh_cache[index];
250 }
251
252 static GPUShader *ensure_background_shader(WORKBENCH_PrivateData *wpd)
253 {
254   const int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
255   if (e_data.background_sh[index] == NULL) {
256     const char *defines = (index) ? "#define V3D_SHADING_OBJECT_OUTLINE\n" : NULL;
257     char *frag = BLI_string_joinN(datatoc_workbench_data_lib_glsl,
258                                   datatoc_workbench_common_lib_glsl,
259                                   datatoc_workbench_background_lib_glsl,
260                                   datatoc_workbench_object_outline_lib_glsl,
261                                   datatoc_workbench_deferred_background_frag_glsl);
262     e_data.background_sh[index] = DRW_shader_create_fullscreen(frag, defines);
263     MEM_freeN(frag);
264   }
265   return e_data.background_sh[index];
266 }
267
268 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd, eGPUShaderConfig sh_cfg)
269 {
270   wpd->prepass_sh = ensure_deferred_prepass_shader(wpd, false, false, sh_cfg);
271   wpd->prepass_hair_sh = ensure_deferred_prepass_shader(wpd, false, true, sh_cfg);
272   wpd->prepass_uniform_sh = ensure_deferred_prepass_shader(wpd, true, false, sh_cfg);
273   wpd->prepass_uniform_hair_sh = ensure_deferred_prepass_shader(wpd, true, true, sh_cfg);
274   wpd->composite_sh = ensure_deferred_composite_shader(wpd);
275   wpd->background_sh = ensure_background_shader(wpd);
276 }
277
278 /* Using Hammersley distribution */
279 static float *create_disk_samples(int num_samples, int num_iterations)
280 {
281   /* vec4 to ensure memory alignment. */
282   const int total_samples = num_samples * num_iterations;
283   float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * total_samples, __func__);
284   const float num_samples_inv = 1.0f / num_samples;
285
286   for (int i = 0; i < total_samples; i++) {
287     float it_add = (i / num_samples) * 0.499f;
288     float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
289     double dphi;
290     BLI_hammersley_1d(i, &dphi);
291
292     float phi = (float)dphi * 2.0f * M_PI + it_add;
293     texels[i][0] = cosf(phi);
294     texels[i][1] = sinf(phi);
295     /* This deliberately distribute more samples
296      * at the center of the disk (and thus the shadow). */
297     texels[i][2] = r;
298   }
299
300   return (float *)texels;
301 }
302
303 static struct GPUTexture *create_jitter_texture(int num_samples)
304 {
305   float jitter[64 * 64][4];
306   const float num_samples_inv = 1.0f / num_samples;
307
308   for (int i = 0; i < 64 * 64; i++) {
309     float phi = blue_noise[i][0] * 2.0f * M_PI;
310     /* This rotate the sample per pixels */
311     jitter[i][0] = cosf(phi);
312     jitter[i][1] = sinf(phi);
313     /* This offset the sample along it's direction axis (reduce banding) */
314     float bn = blue_noise[i][1] - 0.5f;
315     CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
316     jitter[i][2] = bn * num_samples_inv;
317     jitter[i][3] = blue_noise[i][1];
318   }
319
320   UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
321
322   return DRW_texture_create_2d(64, 64, GPU_RGBA16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
323 }
324 /* Functions */
325
326 static void workbench_init_object_data(DrawData *dd)
327 {
328   WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
329   data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
330   data->shadow_bbox_dirty = true;
331 }
332
333 static void workbench_init_oit_framebuffer(WORKBENCH_FramebufferList *fbl,
334                                            DefaultTextureList *dtxl)
335 {
336   const float *size = DRW_viewport_size_get();
337   e_data.oit_accum_tx = DRW_texture_pool_query_2d(
338       size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
339   e_data.oit_revealage_tx = DRW_texture_pool_query_2d(
340       size[0], size[1], GPU_R16F, &draw_engine_workbench_solid);
341
342   GPU_framebuffer_ensure_config(&fbl->transparent_accum_fb,
343                                 {
344                                     GPU_ATTACHMENT_TEXTURE(dtxl->depth),
345                                     GPU_ATTACHMENT_TEXTURE(e_data.oit_accum_tx),
346                                     GPU_ATTACHMENT_TEXTURE(e_data.oit_revealage_tx),
347                                 });
348 }
349
350 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
351 {
352   WORKBENCH_FramebufferList *fbl = vedata->fbl;
353   WORKBENCH_StorageList *stl = vedata->stl;
354   WORKBENCH_PassList *psl = vedata->psl;
355   DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
356   const DRWContextState *draw_ctx = DRW_context_state_get();
357   RegionView3D *rv3d = draw_ctx->rv3d;
358   View3D *v3d = draw_ctx->v3d;
359   Scene *scene = draw_ctx->scene;
360   Object *camera;
361
362   if (v3d && rv3d) {
363     camera = (rv3d->persp == RV3D_CAMOB) ? v3d->camera : NULL;
364   }
365   else {
366     camera = scene->camera;
367   }
368
369   if (!stl->g_data) {
370     /* Alloc transient pointers */
371     stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
372   }
373   if (!stl->effects) {
374     stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
375     workbench_effect_info_init(stl->effects);
376   }
377
378   if (!e_data.next_object_id) {
379     WORKBENCH_DEFERRED_Shaders *sh_data = &e_data.sh_data[draw_ctx->sh_cfg];
380     memset(sh_data->prepass_sh_cache, 0, sizeof(sh_data->prepass_sh_cache));
381     memset(e_data.composite_sh_cache, 0, sizeof(e_data.composite_sh_cache));
382     e_data.next_object_id = 1;
383 #ifdef DEBUG_SHADOW_VOLUME
384     const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
385 #else
386     const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
387 #endif
388     /* TODO only compile on demand */
389     e_data.shadow_pass_sh = GPU_shader_create_from_arrays({
390         .vert = (const char *[]){datatoc_common_view_lib_glsl,
391                                  datatoc_workbench_shadow_vert_glsl,
392                                  NULL},
393         .geom = (const char *[]){datatoc_workbench_shadow_geom_glsl, NULL},
394         .frag = (const char *[]){shadow_frag, NULL},
395         .defs = (const char *[]){"#define SHADOW_PASS\n"
396                                  "#define DOUBLE_MANIFOLD\n",
397                                  NULL},
398     });
399     e_data.shadow_pass_manifold_sh = GPU_shader_create_from_arrays({
400         .vert = (const char *[]){datatoc_common_view_lib_glsl,
401                                  datatoc_workbench_shadow_vert_glsl,
402                                  NULL},
403         .geom = (const char *[]){datatoc_workbench_shadow_geom_glsl, NULL},
404         .frag = (const char *[]){shadow_frag, NULL},
405         .defs = (const char *[]){"#define SHADOW_PASS\n", NULL},
406     });
407     e_data.shadow_fail_sh = GPU_shader_create_from_arrays({
408         .vert = (const char *[]){datatoc_common_view_lib_glsl,
409                                  datatoc_workbench_shadow_vert_glsl,
410                                  NULL},
411         .geom = (const char *[]){datatoc_workbench_shadow_geom_glsl, NULL},
412         .frag = (const char *[]){shadow_frag, NULL},
413         .defs = (const char *[]){"#define SHADOW_FAIL\n"
414                                  "#define DOUBLE_MANIFOLD\n",
415                                  NULL},
416     });
417     e_data.shadow_fail_manifold_sh = GPU_shader_create_from_arrays({
418         .vert = (const char *[]){datatoc_common_view_lib_glsl,
419                                  datatoc_workbench_shadow_vert_glsl,
420                                  NULL},
421         .geom = (const char *[]){datatoc_workbench_shadow_geom_glsl, NULL},
422         .frag = (const char *[]){shadow_frag, NULL},
423         .defs = (const char *[]){"#define SHADOW_FAIL\n", NULL},
424     });
425     e_data.shadow_caps_sh = GPU_shader_create_from_arrays({
426         .vert = (const char *[]){datatoc_common_view_lib_glsl,
427                                  datatoc_workbench_shadow_vert_glsl,
428                                  NULL},
429         .geom = (const char *[]){datatoc_workbench_shadow_caps_geom_glsl, NULL},
430         .frag = (const char *[]){shadow_frag, NULL},
431         .defs = (const char *[]){"#define SHADOW_FAIL\n"
432                                  "#define DOUBLE_MANIFOLD\n",
433                                  NULL},
434     });
435     e_data.shadow_caps_manifold_sh = GPU_shader_create_from_arrays({
436         .vert = (const char *[]){datatoc_common_view_lib_glsl,
437                                  datatoc_workbench_shadow_vert_glsl,
438                                  NULL},
439         .geom = (const char *[]){datatoc_workbench_shadow_caps_geom_glsl, NULL},
440         .frag = (const char *[]){shadow_frag, NULL},
441         .defs = (const char *[]){"#define SHADOW_FAIL\n", NULL},
442     });
443
444     e_data.ghost_resolve_sh = DRW_shader_create_fullscreen(
445         datatoc_workbench_ghost_resolve_frag_glsl, NULL);
446   }
447   workbench_volume_engine_init();
448   workbench_fxaa_engine_init();
449   workbench_taa_engine_init(vedata);
450
451   WORKBENCH_PrivateData *wpd = stl->g_data;
452   workbench_private_data_init(wpd);
453
454   workbench_dof_engine_init(vedata, camera);
455
456   if (OIT_ENABLED(wpd)) {
457     if (e_data.oit_resolve_sh == NULL) {
458       e_data.oit_resolve_sh = DRW_shader_create_fullscreen(
459           datatoc_workbench_forward_composite_frag_glsl, "#define ALPHA_COMPOSITE\n");
460     }
461
462     workbench_forward_choose_shaders(wpd, draw_ctx->sh_cfg);
463     workbench_forward_outline_shaders_ensure(wpd, draw_ctx->sh_cfg);
464   }
465
466   {
467     const float *viewport_size = DRW_viewport_size_get();
468     const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
469     const eGPUTextureFormat nor_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16 : GPU_RGBA32F;
470     const eGPUTextureFormat comp_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F :
471                                                                             GPU_R11F_G11F_B10F;
472     const eGPUTextureFormat col_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F : GPU_RGBA8;
473     const eGPUTextureFormat id_tex_format = OBJECT_ID_PASS_ENABLED(wpd) ? GPU_R32UI : GPU_R8;
474
475     e_data.object_id_tx = NULL;
476     e_data.color_buffer_tx = NULL;
477     e_data.composite_buffer_tx = NULL;
478     e_data.normal_buffer_tx = NULL;
479     e_data.cavity_buffer_tx = NULL;
480
481     e_data.composite_buffer_tx = DRW_texture_pool_query_2d(
482         size[0], size[1], comp_tex_format, &draw_engine_workbench_solid);
483
484     if (MATDATA_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
485       e_data.color_buffer_tx = DRW_texture_pool_query_2d(
486           size[0], size[1], col_tex_format, &draw_engine_workbench_solid);
487     }
488     if (OBJECT_ID_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
489       e_data.object_id_tx = DRW_texture_pool_query_2d(
490           size[0], size[1], id_tex_format, &draw_engine_workbench_solid);
491     }
492     if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
493       e_data.normal_buffer_tx = DRW_texture_pool_query_2d(
494           size[0], size[1], nor_tex_format, &draw_engine_workbench_solid);
495     }
496     if (CAVITY_ENABLED(wpd)) {
497       e_data.cavity_buffer_tx = DRW_texture_pool_query_2d(
498           size[0], size[1], GPU_R16, &draw_engine_workbench_solid);
499     }
500
501     GPU_framebuffer_ensure_config(&fbl->prepass_fb,
502                                   {
503                                       GPU_ATTACHMENT_TEXTURE(dtxl->depth),
504                                       GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
505                                       GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
506                                       GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
507                                   });
508     GPU_framebuffer_ensure_config(&fbl->cavity_fb,
509                                   {
510                                       GPU_ATTACHMENT_NONE,
511                                       GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
512                                   });
513     GPU_framebuffer_ensure_config(&fbl->composite_fb,
514                                   {
515                                       GPU_ATTACHMENT_TEXTURE(dtxl->depth),
516                                       GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
517                                   });
518     GPU_framebuffer_ensure_config(&fbl->color_only_fb,
519                                   {
520                                       GPU_ATTACHMENT_NONE,
521                                       GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
522                                   });
523
524     if (!MATDATA_PASS_ENABLED(wpd) && !GPU_unused_fb_slot_workaround()) {
525       e_data.color_buffer_tx = DRW_texture_pool_query_2d(
526           size[0], size[1], col_tex_format, &draw_engine_workbench_solid);
527     }
528
529     GPU_framebuffer_ensure_config(&fbl->effect_fb,
530                                   {
531                                       GPU_ATTACHMENT_NONE,
532                                       GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
533                                   });
534
535     if (OBJECT_ID_PASS_ENABLED(wpd)) {
536       GPU_framebuffer_ensure_config(&fbl->id_clear_fb,
537                                     {
538                                         GPU_ATTACHMENT_NONE,
539                                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
540                                     });
541     }
542   }
543
544   {
545     /* AO Samples Tex */
546     int num_iterations = workbench_taa_calculate_num_iterations(vedata);
547
548     const int ssao_samples_single_iteration = scene->display.matcap_ssao_samples;
549     const int ssao_samples = MIN2(num_iterations * ssao_samples_single_iteration, 500);
550
551     if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
552       DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
553       DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
554     }
555
556     if (e_data.sampling_ubo == NULL) {
557       float *samples = create_disk_samples(ssao_samples_single_iteration, num_iterations);
558       e_data.jitter_tx = create_jitter_texture(ssao_samples);
559       e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
560       e_data.cached_sample_num = ssao_samples;
561       MEM_freeN(samples);
562     }
563   }
564
565   /* Prepass */
566   {
567     DRWShadingGroup *grp;
568     const bool do_cull = CULL_BACKFACE_ENABLED(wpd);
569
570     int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
571     psl->prepass_pass = DRW_pass_create("Prepass",
572                                         (do_cull) ? state | DRW_STATE_CULL_BACK : state);
573     psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
574
575     psl->ghost_prepass_pass = DRW_pass_create("Prepass Ghost",
576                                               (do_cull) ? state | DRW_STATE_CULL_BACK : state);
577     psl->ghost_prepass_hair_pass = DRW_pass_create("Prepass Ghost", state);
578
579     psl->ghost_resolve_pass = DRW_pass_create("Resolve Ghost Depth",
580                                               DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
581     grp = DRW_shgroup_create(e_data.ghost_resolve_sh, psl->ghost_resolve_pass);
582     DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &e_data.ghost_depth_tx);
583     DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
584   }
585
586   {
587     workbench_aa_create_pass(vedata, &e_data.color_buffer_tx);
588   }
589
590   {
591     workbench_dof_create_pass(vedata, &e_data.composite_buffer_tx, e_data.jitter_tx);
592   }
593
594   if (CAVITY_ENABLED(wpd)) {
595     int state = DRW_STATE_WRITE_COLOR;
596     GPUShader *shader = workbench_cavity_shader_get(SSAO_ENABLED(wpd), CURVATURE_ENABLED(wpd));
597     psl->cavity_pass = DRW_pass_create("Cavity", state);
598     DRWShadingGroup *grp = DRW_shgroup_create(shader, psl->cavity_pass);
599     DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
600     DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
601
602     if (SSAO_ENABLED(wpd)) {
603       DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
604       DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
605       DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
606       DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
607       DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
608       DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
609       DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
610     }
611
612     if (CURVATURE_ENABLED(wpd)) {
613       DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
614       DRW_shgroup_uniform_vec2(grp, "curvature_settings", &wpd->world_data.curvature_ridge, 1);
615     }
616
617     DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
618   }
619 }
620
621 static void workbench_setup_ghost_framebuffer(WORKBENCH_FramebufferList *fbl)
622 {
623   const float *viewport_size = DRW_viewport_size_get();
624   const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
625
626   e_data.ghost_depth_tx = DRW_texture_pool_query_2d(
627       size[0], size[1], GPU_DEPTH_COMPONENT24, &draw_engine_workbench_solid);
628
629   GPU_framebuffer_ensure_config(&fbl->ghost_prepass_fb,
630                                 {
631                                     GPU_ATTACHMENT_TEXTURE(e_data.ghost_depth_tx),
632                                     GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
633                                     GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
634                                     GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
635                                 });
636 }
637
638 void workbench_deferred_engine_free(void)
639 {
640   for (int sh_data_index = 0; sh_data_index < ARRAY_SIZE(e_data.sh_data); sh_data_index++) {
641     WORKBENCH_DEFERRED_Shaders *sh_data = &e_data.sh_data[sh_data_index];
642     for (int index = 0; index < MAX_PREPASS_SHADERS; index++) {
643       DRW_SHADER_FREE_SAFE(sh_data->prepass_sh_cache[index]);
644     }
645   }
646   for (int index = 0; index < MAX_COMPOSITE_SHADERS; index++) {
647     DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
648   }
649   for (int index = 0; index < MAX_CAVITY_SHADERS; ++index) {
650     DRW_SHADER_FREE_SAFE(e_data.cavity_sh[index]);
651   }
652   DRW_SHADER_FREE_SAFE(e_data.ghost_resolve_sh);
653   DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
654   DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
655   DRW_SHADER_FREE_SAFE(e_data.background_sh[0]);
656   DRW_SHADER_FREE_SAFE(e_data.background_sh[1]);
657
658   DRW_SHADER_FREE_SAFE(e_data.oit_resolve_sh);
659
660   DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
661   DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
662   DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
663   DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
664   DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
665   DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
666
667   workbench_volume_engine_free();
668   workbench_fxaa_engine_free();
669   workbench_taa_engine_free();
670   workbench_dof_engine_free();
671 }
672
673 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
674 {
675   DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
676   if (MATDATA_PASS_ENABLED(wpd)) {
677     DRW_shgroup_uniform_texture_ref(grp, "materialBuffer", &e_data.color_buffer_tx);
678   }
679   else {
680     DRW_shgroup_uniform_vec3(grp, "materialSingleColor", wpd->shading.single_color, 1);
681   }
682   if (OBJECT_OUTLINE_ENABLED(wpd)) {
683     DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
684   }
685   if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
686     DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
687   }
688   if (CAVITY_ENABLED(wpd)) {
689     DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
690   }
691   if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
692     DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
693   }
694   if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
695     DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
696   }
697   if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
698     BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
699     DRW_shgroup_uniform_texture(
700         grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture);
701   }
702 }
703
704 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
705 {
706   WORKBENCH_StorageList *stl = vedata->stl;
707   WORKBENCH_PassList *psl = vedata->psl;
708   WORKBENCH_PrivateData *wpd = stl->g_data;
709   DRWShadingGroup *grp;
710   const DRWContextState *draw_ctx = DRW_context_state_get();
711
712   Scene *scene = draw_ctx->scene;
713
714   workbench_volume_cache_init(vedata);
715
716   select_deferred_shaders(wpd, draw_ctx->sh_cfg);
717
718   /* Background Pass */
719   {
720     psl->background_pass = DRW_pass_create("Background",
721                                            DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
722     grp = DRW_shgroup_create(wpd->background_sh, psl->background_pass);
723     DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
724     DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
725     if (OBJECT_OUTLINE_ENABLED(wpd)) {
726       DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
727     }
728     DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
729
730     if (draw_ctx->rv3d && (draw_ctx->rv3d->rflag & RV3D_CLIPPING) && draw_ctx->rv3d->clipbb) {
731       GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_3D_UNIFORM_COLOR_BACKGROUND);
732       grp = DRW_shgroup_create(shader, psl->background_pass);
733       wpd->world_clip_planes_batch = DRW_draw_background_clipping_batch_from_rv3d(draw_ctx->rv3d);
734       DRW_shgroup_call(grp, wpd->world_clip_planes_batch, NULL);
735       DRW_shgroup_uniform_vec4(grp, "color", &wpd->world_clip_planes_color[0], 1);
736     }
737   }
738
739   /* Deferred Mix Pass */
740   {
741     workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
742     studiolight_update_light(wpd, e_data.display.light_direction);
743
744     if (SHADOW_ENABLED(wpd)) {
745       psl->composite_pass = DRW_pass_create(
746           "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL | DRW_STATE_DEPTH_GREATER);
747       grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
748       workbench_composite_uniforms(wpd, grp);
749       DRW_shgroup_stencil_mask(grp, 0x00);
750       DRW_shgroup_uniform_float_copy(grp, "lightMultiplier", 1.0f);
751       DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
752       DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
753       DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
754       DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
755
756       /* Stencil Shadow passes. */
757 #ifdef DEBUG_SHADOW_VOLUME
758       DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR |
759                                   DRW_STATE_BLEND_ADD;
760       DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR |
761                                   DRW_STATE_BLEND_ADD;
762 #else
763       DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
764       DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
765 #endif
766       psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
767       psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
768       psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
769       psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
770       psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
771       psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani",
772                                                               depth_fail_state);
773
774 #ifndef DEBUG_SHADOW_VOLUME
775       grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
776       DRW_shgroup_stencil_mask(grp, 0xFF);
777       grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
778       DRW_shgroup_stencil_mask(grp, 0xFF);
779       grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
780       DRW_shgroup_stencil_mask(grp, 0xFF);
781       grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
782       DRW_shgroup_stencil_mask(grp, 0xFF);
783       grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
784       DRW_shgroup_stencil_mask(grp, 0xFF);
785       grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh,
786                                psl->shadow_depth_fail_caps_mani_pass);
787       DRW_shgroup_stencil_mask(grp, 0xFF);
788
789       psl->composite_shadow_pass = DRW_pass_create(
790           "Composite Shadow",
791           DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL | DRW_STATE_DEPTH_GREATER);
792       grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
793       DRW_shgroup_stencil_mask(grp, 0x00);
794       workbench_composite_uniforms(wpd, grp);
795       DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
796       DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
797       DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
798       DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
799       DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
800 #endif
801     }
802     else {
803       psl->composite_pass = DRW_pass_create("Composite",
804                                             DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_GREATER);
805       grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
806       workbench_composite_uniforms(wpd, grp);
807       DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
808     }
809   }
810
811   /**
812    * Order Independent Transparency.
813    * Similar to workbench forward. Duplicated code to avoid
814    * spaghetti with workbench forward. It would be great if we unify
815    * this in a clean way.
816    */
817   if (OIT_ENABLED(wpd)) {
818     const bool do_cull = CULL_BACKFACE_ENABLED(wpd);
819     const int cull_state = (do_cull) ? DRW_STATE_CULL_BACK : 0;
820     /* Transparency Accum */
821     {
822       /* Same as forward but here we use depth test to
823        * not bleed through other solid objects. */
824       int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_OIT | DRW_STATE_DEPTH_LESS | cull_state;
825       psl->transparent_accum_pass = DRW_pass_create("Transparent Accum", state);
826     }
827     /* Depth */
828     {
829       int state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS | cull_state;
830       psl->object_outline_pass = DRW_pass_create("Transparent Depth", state);
831     }
832     /* OIT Composite */
833     {
834       int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_ALPHA;
835       psl->oit_composite_pass = DRW_pass_create("OIT Composite", state);
836
837       grp = DRW_shgroup_create(e_data.oit_resolve_sh, psl->oit_composite_pass);
838       DRW_shgroup_uniform_texture_ref(grp, "transparentAccum", &e_data.oit_accum_tx);
839       DRW_shgroup_uniform_texture_ref(grp, "transparentRevealage", &e_data.oit_revealage_tx);
840       DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
841       DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
842     }
843   }
844 }
845
846 static WORKBENCH_MaterialData *get_or_create_material_data(WORKBENCH_Data *vedata,
847                                                            Object *ob,
848                                                            Material *mat,
849                                                            Image *ima,
850                                                            ImageUser *iuser,
851                                                            int color_type,
852                                                            int interp)
853 {
854   WORKBENCH_StorageList *stl = vedata->stl;
855   WORKBENCH_PassList *psl = vedata->psl;
856   WORKBENCH_PrivateData *wpd = stl->g_data;
857   WORKBENCH_MaterialData *material;
858   WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
859       &ob->id,
860       &draw_engine_workbench_solid,
861       sizeof(WORKBENCH_ObjectData),
862       &workbench_init_object_data,
863       NULL);
864   WORKBENCH_MaterialData material_template;
865   const bool is_ghost = (ob->dtx & OB_DRAWXRAY);
866
867   /* Solid */
868   workbench_material_update_data(wpd, ob, mat, &material_template);
869   material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
870   material_template.color_type = color_type;
871   material_template.ima = ima;
872   material_template.iuser = iuser;
873   material_template.interp = interp;
874   uint hash = workbench_material_get_hash(&material_template, is_ghost);
875
876   material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
877   if (material == NULL) {
878     material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
879     material->shgrp = DRW_shgroup_create(
880         (wpd->shading.color_type == color_type) ? wpd->prepass_sh : wpd->prepass_uniform_sh,
881         (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
882     workbench_material_copy(material, &material_template);
883     DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
884     DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
885     workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, true, interp);
886     BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
887   }
888   return material;
889 }
890
891 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
892 {
893   WORKBENCH_StorageList *stl = vedata->stl;
894   WORKBENCH_PassList *psl = vedata->psl;
895   WORKBENCH_PrivateData *wpd = stl->g_data;
896
897   for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
898     if (md->type != eModifierType_ParticleSystem) {
899       continue;
900     }
901     ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
902     if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
903       continue;
904     }
905     ParticleSettings *part = psys->part;
906     const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
907
908     if (draw_as == PART_DRAW_PATH) {
909       Material *mat;
910       Image *image;
911       ImageUser *iuser;
912       int interp;
913       workbench_material_get_image_and_mat(ob, part->omat, &image, &iuser, &interp, &mat);
914       int color_type = workbench_material_determine_color_type(wpd, image, ob, false);
915       WORKBENCH_MaterialData *material = get_or_create_material_data(
916           vedata, ob, mat, image, iuser, color_type, interp);
917
918       struct GPUShader *shader = (wpd->shading.color_type == color_type) ?
919                                      wpd->prepass_hair_sh :
920                                      wpd->prepass_uniform_hair_sh;
921       DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
922           ob,
923           psys,
924           md,
925           (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
926           shader);
927       DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
928       DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
929       workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, true, interp);
930     }
931   }
932 }
933
934 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
935 {
936   WORKBENCH_StorageList *stl = vedata->stl;
937   WORKBENCH_PassList *psl = vedata->psl;
938   WORKBENCH_PrivateData *wpd = stl->g_data;
939   const DRWContextState *draw_ctx = DRW_context_state_get();
940   Scene *scene = draw_ctx->scene;
941
942   if (!DRW_object_is_renderable(ob)) {
943     return;
944   }
945
946   if (ob->type == OB_MESH) {
947     workbench_cache_populate_particles(vedata, ob);
948   }
949
950   ModifierData *md;
951   if (((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
952       (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
953       (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
954       (((SmokeModifierData *)md)->domain != NULL)) {
955     workbench_volume_cache_populate(vedata, scene, ob, md);
956     return; /* Do not draw solid in this case. */
957   }
958
959   if (!(DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF)) {
960     return;
961   }
962   if ((ob->dt < OB_SOLID) && !DRW_state_is_image_render()) {
963     return;
964   }
965
966   WORKBENCH_MaterialData *material;
967   if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
968     const bool is_active = (ob == draw_ctx->obact);
969     const bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->v3d);
970     const bool use_hide = is_active && DRW_object_use_hide_faces(ob);
971     const int materials_len = MAX2(1, ob->totcol);
972     const Mesh *me = (ob->type == OB_MESH) ? ob->data : NULL;
973     bool has_transp_mat = false;
974
975     if (!use_sculpt_pbvh && TEXTURE_DRAWING_ENABLED(wpd) && me && me->mloopuv) {
976       /* Draw textured */
977       struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
978       for (int i = 0; i < materials_len; i++) {
979         if (geom_array != NULL && geom_array[i] != NULL) {
980           Material *mat;
981           Image *image;
982           ImageUser *iuser;
983           int interp;
984           workbench_material_get_image_and_mat(ob, i + 1, &image, &iuser, &interp, &mat);
985           int color_type = workbench_material_determine_color_type(
986               wpd, image, ob, use_sculpt_pbvh);
987           if (color_type == V3D_SHADING_MATERIAL_COLOR && mat && mat->a < 1.0) {
988             /* Hack */
989             wpd->shading.xray_alpha = mat->a;
990             material = workbench_forward_get_or_create_material_data(
991                 vedata, ob, mat, image, iuser, color_type, 0, use_sculpt_pbvh);
992             has_transp_mat = true;
993           }
994           else {
995             material = get_or_create_material_data(
996                 vedata, ob, mat, image, iuser, color_type, interp);
997           }
998           DRW_shgroup_call(material->shgrp, geom_array[i], ob);
999         }
1000       }
1001     }
1002     else if (ELEM(wpd->shading.color_type,
1003                   V3D_SHADING_SINGLE_COLOR,
1004                   V3D_SHADING_OBJECT_COLOR,
1005                   V3D_SHADING_RANDOM_COLOR,
1006                   V3D_SHADING_VERTEX_COLOR)) {
1007       int color_type = workbench_material_determine_color_type(wpd, NULL, ob, use_sculpt_pbvh);
1008
1009       if ((ob->color[3] < 1.0f) && (color_type == V3D_SHADING_OBJECT_COLOR)) {
1010         /* Hack */
1011         wpd->shading.xray_alpha = ob->color[3];
1012         material = workbench_forward_get_or_create_material_data(
1013             vedata, ob, NULL, NULL, NULL, color_type, 0, use_sculpt_pbvh);
1014         has_transp_mat = true;
1015       }
1016       else {
1017         /* Draw solid color */
1018         material = get_or_create_material_data(vedata, ob, NULL, NULL, NULL, color_type, 0);
1019       }
1020
1021       if (use_sculpt_pbvh) {
1022         bool use_vcol = (color_type == V3D_SHADING_VERTEX_COLOR);
1023         DRW_shgroup_call_sculpt(material->shgrp, ob, false, false, use_vcol);
1024       }
1025       else {
1026         struct GPUBatch *geom;
1027         if (color_type == V3D_SHADING_VERTEX_COLOR) {
1028           geom = DRW_cache_mesh_surface_vertpaint_get(ob);
1029         }
1030         else {
1031           geom = DRW_cache_object_surface_get(ob);
1032         }
1033
1034         if (geom) {
1035           DRW_shgroup_call(material->shgrp, geom, ob);
1036         }
1037       }
1038     }
1039     else {
1040       /* Draw material color */
1041       if (use_sculpt_pbvh) {
1042         struct DRWShadingGroup **shgrps = BLI_array_alloca(shgrps, materials_len);
1043
1044         for (int i = 0; i < materials_len; ++i) {
1045           struct Material *mat = give_current_material(ob, i + 1);
1046           if (mat != NULL && mat->a < 1.0f) {
1047             /* Hack */
1048             wpd->shading.xray_alpha = mat->a;
1049             material = workbench_forward_get_or_create_material_data(
1050                 vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0, use_sculpt_pbvh);
1051             has_transp_mat = true;
1052           }
1053           else {
1054             material = get_or_create_material_data(
1055                 vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
1056           }
1057           shgrps[i] = material->shgrp;
1058         }
1059         DRW_shgroup_call_sculpt_with_materials(shgrps, ob, false);
1060       }
1061       else {
1062         struct GPUBatch **geoms;
1063         struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
1064         memset(gpumat_array, 0, sizeof(*gpumat_array) * materials_len);
1065
1066         geoms = DRW_cache_object_surface_material_get(
1067             ob, gpumat_array, materials_len, NULL, NULL, NULL);
1068         for (int i = 0; i < materials_len; ++i) {
1069           if (geoms != NULL && geoms[i] != NULL) {
1070             Material *mat = give_current_material(ob, i + 1);
1071             if (mat != NULL && mat->a < 1.0f) {
1072               /* Hack */
1073               wpd->shading.xray_alpha = mat->a;
1074               material = workbench_forward_get_or_create_material_data(
1075                   vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0, use_sculpt_pbvh);
1076               has_transp_mat = true;
1077             }
1078             else {
1079               material = get_or_create_material_data(
1080                   vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
1081             }
1082             DRW_shgroup_call(material->shgrp, geoms[i], ob);
1083           }
1084         }
1085       }
1086     }
1087
1088     if (SHADOW_ENABLED(wpd) && !(ob->dtx & OB_DRAW_NO_SHADOW_CAST)) {
1089       bool is_manifold;
1090       struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
1091       if (geom_shadow) {
1092         if (use_sculpt_pbvh || use_hide) {
1093           /* Currently unsupported in sculpt mode. We could revert to the slow
1094            * method in this case but I'm not sure if it's a good idea given that
1095            * sculpted meshes are heavy to begin with. */
1096           // DRW_shgroup_call_sculpt(wpd->shadow_shgrp, ob, ob->obmat);
1097         }
1098         else {
1099           WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
1100               &ob->id,
1101               &draw_engine_workbench_solid,
1102               sizeof(WORKBENCH_ObjectData),
1103               &workbench_init_object_data,
1104               NULL);
1105
1106           if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
1107
1108             mul_v3_mat3_m4v3(
1109                 engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
1110
1111             DRWShadingGroup *grp;
1112             bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(
1113                 wpd, ob, engine_object_data);
1114
1115             if (use_shadow_pass_technique && !has_transp_mat) {
1116               if (is_manifold) {
1117                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh,
1118                                          psl->shadow_depth_pass_mani_pass);
1119               }
1120               else {
1121                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
1122               }
1123               DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1124               DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
1125               DRW_shgroup_call_no_cull(grp, geom_shadow, ob);
1126 #ifdef DEBUG_SHADOW_VOLUME
1127               DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
1128 #endif
1129             }
1130             else {
1131               float extrude_distance = studiolight_object_shadow_distance(
1132                   wpd, ob, engine_object_data);
1133
1134               /* TODO(fclem): only use caps if they are in the view frustum. */
1135               const bool need_caps = true;
1136               if (need_caps) {
1137                 if (is_manifold) {
1138                   grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh,
1139                                            psl->shadow_depth_fail_caps_mani_pass);
1140                 }
1141                 else {
1142                   grp = DRW_shgroup_create(e_data.shadow_caps_sh,
1143                                            psl->shadow_depth_fail_caps_pass);
1144                 }
1145                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1146                 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
1147                 DRW_shgroup_call_no_cull(grp, DRW_cache_object_surface_get(ob), ob);
1148               }
1149
1150               if (is_manifold) {
1151                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh,
1152                                          psl->shadow_depth_fail_mani_pass);
1153               }
1154               else {
1155                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
1156               }
1157               DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
1158               DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
1159               DRW_shgroup_call_no_cull(grp, geom_shadow, ob);
1160 #ifdef DEBUG_SHADOW_VOLUME
1161               DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
1162 #endif
1163             }
1164           }
1165         }
1166       }
1167     }
1168   }
1169 }
1170
1171 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
1172 {
1173 }
1174
1175 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
1176 {
1177   WORKBENCH_StorageList *stl = vedata->stl;
1178   WORKBENCH_FramebufferList *fbl = vedata->fbl;
1179   WORKBENCH_PrivateData *wpd = stl->g_data;
1180   const float clear_depth = 1.0f;
1181   const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
1182   uint clear_stencil = 0x00;
1183
1184   DRW_stats_group_start("Clear Background");
1185
1186   if (OBJECT_ID_PASS_ENABLED(wpd)) {
1187     /* From all the color buffers, only object id needs to be cleared. */
1188     GPU_framebuffer_bind(fbl->id_clear_fb);
1189     GPU_framebuffer_clear_color(fbl->id_clear_fb, clear_color);
1190   }
1191
1192   GPU_framebuffer_bind(fbl->prepass_fb);
1193   int clear_bits = GPU_DEPTH_BIT;
1194   SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
1195   GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
1196   DRW_stats_group_end();
1197 }
1198
1199 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
1200 {
1201   WORKBENCH_PassList *psl = vedata->psl;
1202   WORKBENCH_StorageList *stl = vedata->stl;
1203   WORKBENCH_FramebufferList *fbl = vedata->fbl;
1204   WORKBENCH_PrivateData *wpd = stl->g_data;
1205   DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
1206
1207   if (workbench_is_taa_enabled(wpd)) {
1208     workbench_taa_draw_scene_start(vedata);
1209   }
1210
1211   /* clear in background */
1212   GPU_framebuffer_bind(fbl->prepass_fb);
1213   DRW_draw_pass(psl->prepass_pass);
1214   DRW_draw_pass(psl->prepass_hair_pass);
1215
1216   if (GHOST_ENABLED(psl)) {
1217     /* meh, late init to not request a depth buffer we won't use. */
1218     workbench_setup_ghost_framebuffer(fbl);
1219
1220     GPU_framebuffer_bind(fbl->ghost_prepass_fb);
1221     GPU_framebuffer_clear_depth(fbl->ghost_prepass_fb, 1.0f);
1222     DRW_draw_pass(psl->ghost_prepass_pass);
1223     DRW_draw_pass(psl->ghost_prepass_hair_pass);
1224
1225     GPU_framebuffer_bind(dfbl->depth_only_fb);
1226     DRW_draw_pass(psl->ghost_resolve_pass);
1227   }
1228
1229   if (CAVITY_ENABLED(wpd)) {
1230     GPU_framebuffer_bind(fbl->cavity_fb);
1231     DRW_draw_pass(psl->cavity_pass);
1232   }
1233
1234   if (SHADOW_ENABLED(wpd)) {
1235 #ifdef DEBUG_SHADOW_VOLUME
1236     GPU_framebuffer_bind(fbl->composite_fb);
1237     DRW_draw_pass(psl->composite_pass);
1238 #else
1239     GPU_framebuffer_bind(dfbl->depth_only_fb);
1240 #endif
1241     DRW_draw_pass(psl->shadow_depth_pass_pass);
1242     DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
1243     DRW_draw_pass(psl->shadow_depth_fail_pass);
1244     DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
1245     DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
1246     DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
1247
1248     if (GHOST_ENABLED(psl)) {
1249       /* We need to set the stencil buffer to 0 where Ghost objects
1250        * else they will get shadow and even badly shadowed. */
1251       DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1252       DRW_pass_state_set(psl->ghost_prepass_hair_pass,
1253                          DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1254
1255       DRW_draw_pass(psl->ghost_prepass_pass);
1256       DRW_draw_pass(psl->ghost_prepass_hair_pass);
1257     }
1258 #ifndef DEBUG_SHADOW_VOLUME
1259     GPU_framebuffer_bind(fbl->composite_fb);
1260     DRW_draw_pass(psl->composite_pass);
1261     DRW_draw_pass(psl->composite_shadow_pass);
1262 #endif
1263   }
1264   else {
1265     GPU_framebuffer_bind(fbl->composite_fb);
1266     DRW_draw_pass(psl->composite_pass);
1267   }
1268
1269   /* TODO(fclem): only enable when needed (when there is overlays). */
1270   if (GHOST_ENABLED(psl)) {
1271     /* In order to not draw on top of ghost objects, we clear the stencil
1272      * to 0xFF and the ghost object to 0x00 and only draw overlays on top if
1273      * stencil is not 0. */
1274     GPU_framebuffer_bind(dfbl->depth_only_fb);
1275     GPU_framebuffer_clear_stencil(dfbl->depth_only_fb, 0xFF);
1276
1277     DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1278     DRW_pass_state_set(psl->ghost_prepass_hair_pass,
1279                        DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1280
1281     DRW_draw_pass(psl->ghost_prepass_pass);
1282     DRW_draw_pass(psl->ghost_prepass_hair_pass);
1283   }
1284
1285   GPU_framebuffer_bind(fbl->composite_fb);
1286   DRW_draw_pass(psl->background_pass);
1287
1288   if (OIT_ENABLED(wpd) && !DRW_pass_is_empty(psl->transparent_accum_pass)) {
1289     DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
1290     /* meh, late init to not request buffers we won't use. */
1291     workbench_init_oit_framebuffer(fbl, dtxl);
1292
1293     const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
1294     GPU_framebuffer_bind(fbl->transparent_accum_fb);
1295     GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
1296     DRW_draw_pass(psl->transparent_accum_pass);
1297
1298     GPU_framebuffer_bind(fbl->composite_fb);
1299     DRW_draw_pass(psl->oit_composite_pass);
1300   }
1301
1302   if (wpd->volumes_do) {
1303     GPU_framebuffer_bind(fbl->color_only_fb);
1304     DRW_draw_pass(psl->volume_pass);
1305   }
1306
1307   workbench_dof_draw_pass(vedata);
1308   workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
1309 }
1310
1311 void workbench_deferred_draw_finish(WORKBENCH_Data *vedata)
1312 {
1313   WORKBENCH_StorageList *stl = vedata->stl;
1314   WORKBENCH_PrivateData *wpd = stl->g_data;
1315
1316   /* XXX TODO(fclem) do not discard UBOS after drawing! Store them per viewport. */
1317   workbench_private_data_free(wpd);
1318   workbench_volume_smoke_textures_free(wpd);
1319 }