Merge remote-tracking branch 'origin/master' into blender2.8
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33
34 #include "BKE_node.h"
35 #include "BKE_particle.h"
36
37 #include "DNA_image_types.h"
38 #include "DNA_mesh_types.h"
39 #include "DNA_modifier_types.h"
40 #include "DNA_node_types.h"
41
42 #include "ED_uvedit.h"
43
44 #include "GPU_shader.h"
45 #include "GPU_texture.h"
46
47
48 /* *********** STATIC *********** */
49
50 // #define DEBUG_SHADOW_VOLUME
51
52 #ifdef DEBUG_SHADOW_VOLUME
53 #  include "draw_debug.h"
54 #endif
55
56 static struct {
57         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
58         struct GPUShader *composite_sh_cache[MAX_SHADERS];
59         struct GPUShader *shadow_fail_sh;
60         struct GPUShader *shadow_fail_manifold_sh;
61         struct GPUShader *shadow_pass_sh;
62         struct GPUShader *shadow_pass_manifold_sh;
63         struct GPUShader *shadow_caps_sh;
64         struct GPUShader *shadow_caps_manifold_sh;
65
66         struct GPUTexture *object_id_tx; /* ref only, not alloced */
67         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
68         struct GPUTexture *specular_buffer_tx; /* ref only, not alloced */
69         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
70         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
71
72         SceneDisplay display; /* world light direction for shadows */
73         float light_direction_vs[3];
74         int next_object_id;
75         float normal_world_matrix[3][3];
76 } e_data = {{NULL}};
77
78 /* Shaders */
79 extern char datatoc_workbench_prepass_vert_glsl[];
80 extern char datatoc_workbench_prepass_frag_glsl[];
81 extern char datatoc_workbench_deferred_composite_frag_glsl[];
82
83 extern char datatoc_workbench_shadow_vert_glsl[];
84 extern char datatoc_workbench_shadow_geom_glsl[];
85 extern char datatoc_workbench_shadow_caps_geom_glsl[];
86 extern char datatoc_workbench_shadow_debug_frag_glsl[];
87
88 extern char datatoc_workbench_background_lib_glsl[];
89 extern char datatoc_workbench_common_lib_glsl[];
90 extern char datatoc_workbench_data_lib_glsl[];
91 extern char datatoc_workbench_object_outline_lib_glsl[];
92 extern char datatoc_workbench_world_light_lib_glsl[];
93
94 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
95 {
96         char *str = NULL;
97
98         DynStr *ds = BLI_dynstr_new();
99
100         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
101         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
102         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
103
104         if ((wpd->shading.light & V3D_LIGHTING_STUDIO) || (wpd->shading.flag & V3D_SHADING_SPECULAR_HIGHLIGHT)) {
105                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
106         }
107         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
108                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
109         }
110
111         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
112
113         str = BLI_dynstr_get_cstring(ds);
114         BLI_dynstr_free(ds);
115         return str;
116 }
117
118 static char *workbench_build_prepass_frag(void)
119 {
120         char *str = NULL;
121
122         DynStr *ds = BLI_dynstr_new();
123
124         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
125         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
126         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
127
128         str = BLI_dynstr_get_cstring(ds);
129         BLI_dynstr_free(ds);
130         return str;
131 }
132
133 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, int drawtype)
134 {
135         if (e_data.prepass_sh_cache[index] == NULL) {
136                 char *defines = workbench_material_build_defines(wpd, drawtype);
137                 char *composite_frag = workbench_build_composite_frag(wpd);
138                 char *prepass_frag = workbench_build_prepass_frag();
139                 e_data.prepass_sh_cache[index] = DRW_shader_create(
140                         datatoc_workbench_prepass_vert_glsl, NULL, prepass_frag, defines);
141                 if (drawtype == OB_SOLID) {
142                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
143                 }
144                 MEM_freeN(prepass_frag);
145                 MEM_freeN(composite_frag);
146                 MEM_freeN(defines);
147         }
148 }
149
150 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
151 {
152         int index_solid = workbench_material_get_shader_index(wpd, OB_SOLID);
153         int index_texture = workbench_material_get_shader_index(wpd, OB_TEXTURE);
154
155         ensure_deferred_shaders(wpd, index_solid, OB_SOLID);
156         ensure_deferred_shaders(wpd, index_texture, OB_TEXTURE);
157
158         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
159         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
160         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
161 }
162
163 /* Functions */
164
165
166 static void workbench_init_object_data(ObjectEngineData *engine_data)
167 {
168         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)engine_data;
169         data->object_id = e_data.next_object_id++;
170         data->shadow_bbox_dirty = true;
171 }
172
173 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
174 {
175         WORKBENCH_FramebufferList *fbl = vedata->fbl;
176         WORKBENCH_StorageList *stl = vedata->stl;
177         WORKBENCH_PassList *psl = vedata->psl;
178         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
179
180         if (!e_data.next_object_id) {
181                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
182                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
183                 e_data.next_object_id = 1;
184 #ifdef DEBUG_SHADOW_VOLUME
185                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
186 #else
187                 const char *shadow_frag = NULL;
188 #endif
189                 e_data.shadow_pass_sh = DRW_shader_create(
190                         datatoc_workbench_shadow_vert_glsl,
191                         datatoc_workbench_shadow_geom_glsl,
192                         shadow_frag,
193                         "#define SHADOW_PASS\n"
194                         "#define DOUBLE_MANIFOLD\n");
195                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
196                         datatoc_workbench_shadow_vert_glsl,
197                         datatoc_workbench_shadow_geom_glsl,
198                         shadow_frag,
199                         "#define SHADOW_PASS\n");
200                 e_data.shadow_fail_sh = DRW_shader_create(
201                         datatoc_workbench_shadow_vert_glsl,
202                         datatoc_workbench_shadow_geom_glsl,
203                         shadow_frag,
204                         "#define SHADOW_FAIL\n"
205                         "#define DOUBLE_MANIFOLD\n");
206                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
207                         datatoc_workbench_shadow_vert_glsl,
208                         datatoc_workbench_shadow_geom_glsl,
209                         shadow_frag,
210                         "#define SHADOW_FAIL\n");
211                 e_data.shadow_caps_sh = DRW_shader_create(
212                         datatoc_workbench_shadow_vert_glsl,
213                         datatoc_workbench_shadow_caps_geom_glsl,
214                         shadow_frag,
215                         "#define SHADOW_FAIL\n"
216                         "#define DOUBLE_MANIFOLD\n");
217                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
218                         datatoc_workbench_shadow_vert_glsl,
219                         datatoc_workbench_shadow_caps_geom_glsl,
220                         shadow_frag,
221                         "#define SHADOW_FAIL\n");
222         }
223
224         if (!stl->g_data) {
225                 /* Alloc transient pointers */
226                 stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
227         }
228
229         workbench_private_data_init(stl->g_data);
230
231         {
232                 const float *viewport_size = DRW_viewport_size_get();
233                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
234                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
235                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
236                 e_data.specular_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
237                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
238                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
239
240                 if (NORMAL_ENCODING_ENABLED()) {
241                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
242                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
243                 }
244                 else {
245                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
246                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
247                 }
248
249                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
250                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
251                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
252                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
253                         GPU_ATTACHMENT_TEXTURE(e_data.specular_buffer_tx),
254                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
255                 });
256                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
257                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
258                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
259                 });
260         }
261
262         /* Prepass */
263         {
264                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
265                 psl->prepass_pass = DRW_pass_create("Prepass", state);
266         }
267 }
268
269 void workbench_deferred_engine_free()
270 {
271         for (int index = 0; index < MAX_SHADERS; index++) {
272                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
273                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
274         }
275         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
276         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
277         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
278         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
279         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
280         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
281 }
282
283 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
284 {
285         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
286         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
287         if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
288                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
289         }
290         if (SPECULAR_HIGHLIGHT_ENABLED(wpd)) {
291                 DRW_shgroup_uniform_texture_ref(grp, "specularBuffer", &e_data.specular_buffer_tx);
292
293 #if 0
294                 float invwinmat[4][4];
295                 DRW_viewport_matrix_get(invwinmat, DRW_MAT_WININV);
296
297                 copy_v4_fl4(e_data.screenvecs[0],  1.0f, -1.0f, 0.0f, 1.0f);
298                 copy_v4_fl4(e_data.screenvecs[1], -1.0f,  1.0f, 0.0f, 1.0f);
299                 copy_v4_fl4(e_data.screenvecs[2], -1.0f, -1.0f, 0.0f, 1.0f);
300                 for (int i = 0; i < 3; i++) {
301                         mul_m4_v4(invwinmat, e_data.screenvecs[i]);
302                         e_data.screenvecs[i][0] /= e_data.screenvecs[i][3]; /* perspective divide */
303                         e_data.screenvecs[i][1] /= e_data.screenvecs[i][3]; /* perspective divide */
304                         e_data.screenvecs[i][2] /= e_data.screenvecs[i][3]; /* perspective divide */
305                         e_data.screenvecs[i][3] = 1.0f;
306                 }
307                 sub_v3_v3(e_data.screenvecs[0], e_data.screenvecs[2]);
308                 sub_v3_v3(e_data.screenvecs[1], e_data.screenvecs[2]);
309                 DRW_shgroup_uniform_vec4(grp, "screenvecs[0]", e_data.screenvecs[0], 3);
310 #endif
311         }
312         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
313         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
314
315         workbench_material_set_normal_world_matrix(grp, wpd, e_data.normal_world_matrix);
316 }
317
318 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
319 {
320         WORKBENCH_StorageList *stl = vedata->stl;
321         WORKBENCH_PassList *psl = vedata->psl;
322         WORKBENCH_PrivateData *wpd = stl->g_data;
323         DRWShadingGroup *grp;
324         const DRWContextState *draw_ctx = DRW_context_state_get();
325         static float light_multiplier = 1.0f;
326
327
328         Scene *scene = draw_ctx->scene;
329
330         select_deferred_shaders(wpd);
331         /* Deferred Mix Pass */
332         {
333                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
334
335                 e_data.display.shadow_shift = scene->display.shadow_shift;
336                 copy_v3_v3(e_data.light_direction_vs, wpd->world_data.lights[0].light_direction_vs);
337
338                 if (SHADOW_ENABLED(wpd)) {
339                         psl->composite_pass = DRW_pass_create(
340                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
341                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
342                         workbench_composite_uniforms(wpd, grp);
343                         DRW_shgroup_stencil_mask(grp, 0x00);
344                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &light_multiplier, 1);
345                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
346                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
347                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
348
349                         /* Stencil Shadow passes. */
350 #ifdef DEBUG_SHADOW_VOLUME
351                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
352                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
353 #else
354                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
355                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
356 #endif
357                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
358                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
359                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
360                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
361                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
362                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
363
364 #ifndef DEBUG_SHADOW_VOLUME
365                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
366                         DRW_shgroup_stencil_mask(grp, 0xFF);
367                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
368                         DRW_shgroup_stencil_mask(grp, 0xFF);
369                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
370                         DRW_shgroup_stencil_mask(grp, 0xFF);
371                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
372                         DRW_shgroup_stencil_mask(grp, 0xFF);
373                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
374                         DRW_shgroup_stencil_mask(grp, 0xFF);
375                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
376                         DRW_shgroup_stencil_mask(grp, 0xFF);
377                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
378                         DRW_shgroup_stencil_mask(grp, 0xFF);
379
380                         psl->composite_shadow_pass = DRW_pass_create("Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
381                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
382                         DRW_shgroup_stencil_mask(grp, 0x00);
383                         workbench_composite_uniforms(wpd, grp);
384                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
385                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
386                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
387                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
388 #endif
389
390                         studiolight_update_light(wpd, e_data.display.light_direction);
391                 }
392                 else {
393                         psl->composite_pass = DRW_pass_create(
394                                 "Composite", DRW_STATE_WRITE_COLOR);
395                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
396                         workbench_composite_uniforms(wpd, grp);
397                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
398                 }
399         }
400 }
401
402 static WORKBENCH_MaterialData *get_or_create_material_data(
403         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int drawtype)
404 {
405         WORKBENCH_StorageList *stl = vedata->stl;
406         WORKBENCH_PassList *psl = vedata->psl;
407         WORKBENCH_PrivateData *wpd = stl->g_data;
408         WORKBENCH_MaterialData *material;
409         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
410                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
411         WORKBENCH_MaterialData material_template;
412
413         /* Solid */
414         workbench_material_update_data(wpd, ob, mat, &material_template);
415         material_template.object_id = engine_object_data->object_id;
416         material_template.drawtype = drawtype;
417         material_template.ima = ima;
418         uint hash = workbench_material_get_hash(&material_template);
419
420         material = BLI_ghash_lookup(wpd->material_hash, SET_UINT_IN_POINTER(hash));
421         if (material == NULL) {
422                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
423                 material->shgrp = DRW_shgroup_create(
424                         drawtype == OB_SOLID ? wpd->prepass_solid_sh : wpd->prepass_texture_sh, psl->prepass_pass);
425                 DRW_shgroup_stencil_mask(material->shgrp, 0xFF);
426                 material->object_id = engine_object_data->object_id;
427                 copy_v4_v4(material->material_data.diffuse_color, material_template.material_data.diffuse_color);
428                 copy_v4_v4(material->material_data.specular_color, material_template.material_data.specular_color);
429                 material->material_data.roughness = material_template.material_data.roughness;
430                 switch (drawtype) {
431                         case OB_SOLID:
432                                 break;
433
434                         case OB_TEXTURE:
435                         {
436                                 GPUTexture *tex = GPU_texture_from_blender(ima, NULL, GL_TEXTURE_2D, false, false, false);
437                                 DRW_shgroup_uniform_texture(material->shgrp, "image", tex);
438                                 break;
439                         }
440                 }
441                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
442                 material->material_ubo = DRW_uniformbuffer_create(sizeof(WORKBENCH_UBO_Material), &material->material_data);
443                 DRW_shgroup_uniform_block(material->shgrp, "material_block", material->material_ubo);
444
445                 BLI_ghash_insert(wpd->material_hash, SET_UINT_IN_POINTER(hash), material);
446         }
447         return material;
448 }
449
450 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
451 {
452         const DRWContextState *draw_ctx = DRW_context_state_get();
453         if (ob == draw_ctx->object_edit) {
454                 return;
455         }
456         for (ParticleSystem *psys = ob->particlesystem.first; psys != NULL; psys = psys->next) {
457                 if (!psys_check_enabled(ob, psys, false)) {
458                         continue;
459                 }
460                 if (!DRW_check_psys_visible_within_active_context(ob, psys)) {
461                         return;
462                 }
463                 ParticleSettings *part = psys->part;
464                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
465
466                 static float mat[4][4];
467                 unit_m4(mat);
468
469                 if (draw_as == PART_DRAW_PATH) {
470                         struct Gwn_Batch *geom = DRW_cache_particles_get_hair(ob, psys, NULL);
471                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
472                         DRW_shgroup_call_add(material->shgrp, geom, mat);
473                 }
474         }
475 }
476
477 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
478 {
479         WORKBENCH_StorageList *stl = vedata->stl;
480         WORKBENCH_PassList *psl = vedata->psl;
481         WORKBENCH_PrivateData *wpd = stl->g_data;
482
483         if (!DRW_object_is_renderable(ob))
484                 return;
485
486         if (ob->type == OB_MESH) {
487                 workbench_cache_populate_particles(vedata, ob);
488         }
489
490         WORKBENCH_MaterialData *material;
491         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT)) {
492                 const DRWContextState *draw_ctx = DRW_context_state_get();
493                 const bool is_active = (ob == draw_ctx->obact);
494                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
495                 bool is_drawn = false;
496                 if (!is_sculpt_mode && wpd->drawtype == OB_TEXTURE && ob->type == OB_MESH) {
497                         const Mesh *me = ob->data;
498                         if (me->mloopuv) {
499                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
500                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
501                                 struct Gwn_Batch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
502                                 if (materials_len > 0 && geom_array) {
503                                         for (int i = 0; i < materials_len; i++) {
504                                                 Material *mat = give_current_material(ob, i + 1);
505                                                 Image *image;
506                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
507                                                 /* use OB_SOLID when no texture could be determined */
508                                                 int mat_drawtype = OB_SOLID;
509                                                 if (image) {
510                                                         mat_drawtype = OB_TEXTURE;
511                                                 }
512                                                 material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
513                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
514                                         }
515                                         is_drawn = true;
516                                 }
517                         }
518                 }
519
520                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
521                 if (!is_drawn) {
522                         if ((wpd->shading.color_type != V3D_SHADING_MATERIAL_COLOR) || is_sculpt_mode) {
523                                 /* No material split needed */
524                                 struct Gwn_Batch *geom = DRW_cache_object_surface_get(ob);
525                                 if (geom) {
526                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
527                                         if (is_sculpt_mode) {
528                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
529                                         }
530                                         else {
531                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
532                                         }
533                                 }
534                         }
535                         else { /* MATERIAL colors */
536                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
537                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
538                                 for (int i = 0; i < materials_len; i++) {
539                                         gpumat_array[i] = NULL;
540                                 }
541
542                                 struct Gwn_Batch **mat_geom = DRW_cache_object_surface_material_get(
543                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
544                                 if (mat_geom) {
545                                         for (int i = 0; i < materials_len; ++i) {
546                                                 Material *mat = give_current_material(ob, i + 1);
547                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, OB_SOLID);
548                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
549                                         }
550                                 }
551                         }
552                 }
553
554                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
555                         bool is_manifold;
556                         struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
557                         if (geom_shadow) {
558                                 if (is_sculpt_mode) {
559                                         /* Currently unsupported in sculpt mode. We could revert to the slow
560                                          * method in this case but i'm not sure if it's a good idea given that
561                                          * sculped meshes are heavy to begin with. */
562                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
563                                 }
564                                 else {
565                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
566                                                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
567
568                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
569
570                                                 invert_m4_m4(ob->imat, ob->obmat);
571                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
572
573                                                 DRWShadingGroup *grp;
574                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
575
576                                                 /* Unless we expose a parameter to the user, it's better to use the depth pass technique if the object is
577                                                  * non manifold. Exposing a switch to the user to force depth fail in this case can be beneficial for
578                                                  * planes and non-closed terrains. */
579                                                 if (!is_manifold) {
580                                                         use_shadow_pass_technique = true;
581                                                 }
582
583                                                 if (use_shadow_pass_technique) {
584                                                         if (is_manifold) {
585                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
586                                                         }
587                                                         else {
588                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
589                                                         }
590                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
591                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
592 #ifdef DEBUG_SHADOW_VOLUME
593                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
594 #endif
595                                                 }
596                                                 else {
597                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
598                                                         const bool need_caps = true;
599                                                         if (need_caps) {
600                                                                 if (is_manifold) {
601                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
602                                                                 }
603                                                                 else {
604                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
605                                                                 }
606                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
607                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
608                                                         }
609
610                                                         if (is_manifold) {
611                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
612                                                         }
613                                                         else {
614                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
615                                                         }
616                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
617                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
618 #ifdef DEBUG_SHADOW_VOLUME
619                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
620 #endif
621                                                 }
622                                         }
623                                 }
624                         }
625                 }
626         }
627 }
628
629 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
630 {
631 }
632
633 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
634 {
635         WORKBENCH_StorageList *stl = vedata->stl;
636         WORKBENCH_FramebufferList *fbl = vedata->fbl;
637         WORKBENCH_PrivateData *wpd = stl->g_data;
638         const float clear_depth = 1.0f;
639         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
640         uint clear_stencil = 0xFF;
641
642         DRW_stats_group_start("Clear Background");
643         GPU_framebuffer_bind(fbl->prepass_fb);
644         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
645         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
646         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
647         DRW_stats_group_end();
648 }
649
650 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
651 {
652         WORKBENCH_PassList *psl = vedata->psl;
653         WORKBENCH_StorageList *stl = vedata->stl;
654         WORKBENCH_FramebufferList *fbl = vedata->fbl;
655         WORKBENCH_PrivateData *wpd = stl->g_data;
656         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
657
658         /* clear in background */
659         GPU_framebuffer_bind(fbl->prepass_fb);
660         DRW_draw_pass(psl->prepass_pass);
661         if (SHADOW_ENABLED(wpd)) {
662 #ifdef DEBUG_SHADOW_VOLUME
663                 GPU_framebuffer_bind(fbl->composite_fb);
664                 DRW_draw_pass(psl->composite_pass);
665 #else
666                 GPU_framebuffer_bind(dfbl->depth_only_fb);
667 #endif
668                 DRW_draw_pass(psl->shadow_depth_pass_pass);
669                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
670                 DRW_draw_pass(psl->shadow_depth_fail_pass);
671                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
672                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
673                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
674 #ifndef DEBUG_SHADOW_VOLUME
675                 GPU_framebuffer_bind(fbl->composite_fb);
676                 DRW_draw_pass(psl->composite_pass);
677                 DRW_draw_pass(psl->composite_shadow_pass);
678 #endif
679         }
680         else {
681                 GPU_framebuffer_bind(fbl->composite_fb);
682                 DRW_draw_pass(psl->composite_pass);
683         }
684
685         GPU_framebuffer_bind(dfbl->color_only_fb);
686         DRW_transform_to_display(e_data.composite_buffer_tx);
687
688         workbench_private_data_free(wpd);
689 }