Workbench: Shadow: Use depth fail method for manifold objects.
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33
34 #include "BKE_node.h"
35 #include "BKE_particle.h"
36
37 #include "DNA_image_types.h"
38 #include "DNA_mesh_types.h"
39 #include "DNA_modifier_types.h"
40 #include "DNA_node_types.h"
41
42 #include "ED_uvedit.h"
43
44 #include "GPU_shader.h"
45 #include "GPU_texture.h"
46
47
48 /* *********** STATIC *********** */
49
50 // #define DEBUG_SHADOW_VOLUME
51
52 static struct {
53         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
54         struct GPUShader *composite_sh_cache[MAX_SHADERS];
55         struct GPUShader *shadow_fail_sh;
56         struct GPUShader *shadow_pass_sh;
57         struct GPUShader *shadow_caps_sh;
58
59         struct GPUTexture *object_id_tx; /* ref only, not alloced */
60         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
61         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
62         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
63
64         SceneDisplay display; /* world light direction for shadows */
65         float light_direction_vs[3];
66         int next_object_id;
67         float normal_world_matrix[3][3];
68 } e_data = {NULL};
69
70 /* Shaders */
71 extern char datatoc_workbench_prepass_vert_glsl[];
72 extern char datatoc_workbench_prepass_frag_glsl[];
73 extern char datatoc_workbench_deferred_composite_frag_glsl[];
74
75 extern char datatoc_workbench_shadow_vert_glsl[];
76 extern char datatoc_workbench_shadow_geom_glsl[];
77 extern char datatoc_workbench_shadow_caps_geom_glsl[];
78 extern char datatoc_workbench_shadow_debug_frag_glsl[];
79
80 extern char datatoc_workbench_background_lib_glsl[];
81 extern char datatoc_workbench_common_lib_glsl[];
82 extern char datatoc_workbench_data_lib_glsl[];
83 extern char datatoc_workbench_object_outline_lib_glsl[];
84 extern char datatoc_workbench_world_light_lib_glsl[];
85
86
87
88 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
89 {
90         char *str = NULL;
91
92         DynStr *ds = BLI_dynstr_new();
93
94         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
95         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
96         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
97
98         if (wpd->shading.light & V3D_LIGHTING_STUDIO) {
99                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
100         }
101         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
102                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
103         }
104
105         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
106
107         str = BLI_dynstr_get_cstring(ds);
108         BLI_dynstr_free(ds);
109         return str;
110 }
111
112 static char *workbench_build_prepass_frag(void)
113 {
114         char *str = NULL;
115
116         DynStr *ds = BLI_dynstr_new();
117
118         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
119         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
120
121         str = BLI_dynstr_get_cstring(ds);
122         BLI_dynstr_free(ds);
123         return str;
124 }
125
126
127
128 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, int drawtype)
129 {
130         if (e_data.prepass_sh_cache[index] == NULL) {
131                 char *defines = workbench_material_build_defines(wpd, drawtype);
132                 char *composite_frag = workbench_build_composite_frag(wpd);
133                 char *prepass_frag = workbench_build_prepass_frag();
134                 e_data.prepass_sh_cache[index] = DRW_shader_create(
135                         datatoc_workbench_prepass_vert_glsl, NULL, prepass_frag, defines);
136                 if (drawtype == OB_SOLID) {
137                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
138                 }
139                 MEM_freeN(prepass_frag);
140                 MEM_freeN(composite_frag);
141                 MEM_freeN(defines);
142         }
143 }
144
145 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
146 {
147         int index_solid = workbench_material_get_shader_index(wpd, OB_SOLID);
148         int index_texture = workbench_material_get_shader_index(wpd, OB_TEXTURE);
149
150         ensure_deferred_shaders(wpd, index_solid, OB_SOLID);
151         ensure_deferred_shaders(wpd, index_texture, OB_TEXTURE);
152
153         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
154         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
155         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
156 }
157
158 /* Functions */
159
160
161 static void workbench_init_object_data(ObjectEngineData *engine_data)
162 {
163         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)engine_data;
164         data->object_id = e_data.next_object_id++;
165 }
166
167 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
168 {
169         WORKBENCH_FramebufferList *fbl = vedata->fbl;
170         WORKBENCH_StorageList *stl = vedata->stl;
171         WORKBENCH_PassList *psl = vedata->psl;
172         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
173
174         if (!e_data.next_object_id) {
175                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
176                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
177                 e_data.next_object_id = 1;
178 #ifdef DEBUG_SHADOW_VOLUME
179                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
180 #else
181                 const char *shadow_frag = NULL;
182 #endif
183                 e_data.shadow_pass_sh = DRW_shader_create(
184                         datatoc_workbench_shadow_vert_glsl,
185                         datatoc_workbench_shadow_geom_glsl,
186                         shadow_frag,
187                         "#define SHADOW_PASS\n");
188                 e_data.shadow_fail_sh = DRW_shader_create(
189                         datatoc_workbench_shadow_vert_glsl,
190                         datatoc_workbench_shadow_geom_glsl,
191                         shadow_frag,
192                         "#define SHADOW_FAIL\n");
193                 e_data.shadow_caps_sh = DRW_shader_create(
194                         datatoc_workbench_shadow_vert_glsl,
195                         datatoc_workbench_shadow_caps_geom_glsl,
196                         shadow_frag,
197                         NULL);
198         }
199
200         if (!stl->g_data) {
201                 /* Alloc transient pointers */
202                 stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
203         }
204
205         workbench_private_data_init(stl->g_data);
206
207         {
208                 const float *viewport_size = DRW_viewport_size_get();
209                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
210                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
211                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
212                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
213                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
214
215                 if (NORMAL_ENCODING_ENABLED()) {
216                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
217                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
218                 }
219                 else {
220                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
221                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
222                 }
223
224                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
225                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
226                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
227                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
228                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
229                 });
230                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
231                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
232                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
233                 });
234         }
235
236         /* Prepass */
237         {
238                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
239                 psl->prepass_pass = DRW_pass_create("Prepass", state);
240         }
241 }
242
243 void workbench_deferred_engine_free()
244 {
245         for (int index = 0; index < MAX_SHADERS; index++) {
246                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
247                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
248         }
249         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
250         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
251         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
252 }
253
254 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
255 {
256         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
257         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
258         if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
259                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
260         }
261         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
262         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
263
264         workbench_material_set_normal_world_matrix(grp, wpd, e_data.normal_world_matrix);
265 }
266
267 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
268 {
269         WORKBENCH_StorageList *stl = vedata->stl;
270         WORKBENCH_PassList *psl = vedata->psl;
271         WORKBENCH_PrivateData *wpd = stl->g_data;
272         DRWShadingGroup *grp;
273         const DRWContextState *draw_ctx = DRW_context_state_get();
274         static float light_multiplier = 1.0f;
275
276
277         Scene *scene = draw_ctx->scene;
278
279         select_deferred_shaders(wpd);
280         /* Deferred Mix Pass */
281         {
282                 copy_v3_v3(e_data.display.light_direction, scene->display.light_direction);
283                 negate_v3(e_data.display.light_direction);
284 #if 0
285         if (STUDIOLIGHT_ORIENTATION_WORLD_ENABLED(wpd)) {
286                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_LIGHT_DIRECTION_CALCULATED);
287                 float rot_matrix[3][3];
288                 // float dir[3] = {0.57, 0.57, -0.57};
289                 axis_angle_to_mat3_single(rot_matrix, 'Z', wpd->shading.studiolight_rot_z);
290                 mul_v3_m3v3(e_data.display.light_direction, rot_matrix, wpd->studio_light->light_direction);
291         }
292 #endif
293                 float view_matrix[4][4];
294                 DRW_viewport_matrix_get(view_matrix, DRW_MAT_VIEW);
295                 mul_v3_mat3_m4v3(e_data.light_direction_vs, view_matrix, e_data.display.light_direction);
296
297                 e_data.display.shadow_shift = scene->display.shadow_shift;
298
299                 if (SHADOW_ENABLED(wpd)) {
300                         psl->composite_pass = DRW_pass_create(
301                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
302                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
303                         workbench_composite_uniforms(wpd, grp);
304                         DRW_shgroup_stencil_mask(grp, 0x00);
305                         DRW_shgroup_uniform_vec3(grp, "lightDirection", e_data.light_direction_vs, 1);
306                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &light_multiplier, 1);
307                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
308                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
309                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
310
311 #ifdef DEBUG_SHADOW_VOLUME
312                         psl->shadow_depth_pass_pass = DRW_pass_create(
313                                 "Shadow Debug Pass", DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE);
314                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
315                         psl->shadow_depth_fail_pass = DRW_pass_create(
316                                 "Shadow Debug Fail", DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE);
317                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
318                         psl->shadow_depth_fail_caps_pass = DRW_pass_create(
319                                 "Shadow Depth Fail Caps", DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE);
320                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
321 #else
322                         psl->shadow_depth_pass_pass = DRW_pass_create(
323                                 "Shadow Depth Pass", DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS);
324                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
325                         DRW_shgroup_stencil_mask(grp, 0xFF);
326                         psl->shadow_depth_fail_pass = DRW_pass_create(
327                                 "Shadow Depth Fail", DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL);
328                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
329                         DRW_shgroup_stencil_mask(grp, 0xFF);
330                         psl->shadow_depth_fail_caps_pass = DRW_pass_create(
331                                 "Shadow Depth Fail Caps", DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL);
332                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
333                         DRW_shgroup_stencil_mask(grp, 0xFF);
334
335                         psl->composite_shadow_pass = DRW_pass_create(
336                                 "Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
337                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
338                         DRW_shgroup_stencil_mask(grp, 0x00);
339                         workbench_composite_uniforms(wpd, grp);
340                         DRW_shgroup_uniform_vec3(grp, "lightDirection", e_data.light_direction_vs, 1);
341                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
342                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
343                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
344                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
345 #endif
346                 }
347                 else {
348                         psl->composite_pass = DRW_pass_create(
349                                 "Composite", DRW_STATE_WRITE_COLOR);
350                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
351                         workbench_composite_uniforms(wpd, grp);
352                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
353                 }
354         }
355 }
356 static WORKBENCH_MaterialData *get_or_create_material_data(
357         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int drawtype)
358 {
359         WORKBENCH_StorageList *stl = vedata->stl;
360         WORKBENCH_PassList *psl = vedata->psl;
361         WORKBENCH_PrivateData *wpd = stl->g_data;
362         WORKBENCH_MaterialData *material;
363         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
364                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
365         WORKBENCH_MaterialData material_template;
366
367         /* Solid */
368         workbench_material_get_solid_color(wpd, ob, mat, material_template.color);
369         material_template.object_id = engine_object_data->object_id;
370         material_template.drawtype = drawtype;
371         material_template.ima = ima;
372         uint hash = workbench_material_get_hash(&material_template);
373
374         material = BLI_ghash_lookup(wpd->material_hash, SET_UINT_IN_POINTER(hash));
375         if (material == NULL) {
376                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
377                 material->shgrp = DRW_shgroup_create(
378                         drawtype == OB_SOLID ? wpd->prepass_solid_sh : wpd->prepass_texture_sh, psl->prepass_pass);
379                 DRW_shgroup_stencil_mask(material->shgrp, 0xFF);
380                 material->object_id = engine_object_data->object_id;
381                 copy_v4_v4(material->color, material_template.color);
382                 switch (drawtype) {
383                         case OB_SOLID:
384                                 DRW_shgroup_uniform_vec3(material->shgrp, "object_color", material->color, 1);
385                                 break;
386
387                         case OB_TEXTURE:
388                         {
389                                 GPUTexture *tex = GPU_texture_from_blender(ima, NULL, GL_TEXTURE_2D, false, false, false);
390                                 DRW_shgroup_uniform_texture(material->shgrp, "image", tex);
391                                 break;
392                         }
393                 }
394                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
395                 BLI_ghash_insert(wpd->material_hash, SET_UINT_IN_POINTER(hash), material);
396         }
397         return material;
398 }
399
400 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
401 {
402         const DRWContextState *draw_ctx = DRW_context_state_get();
403         if (ob == draw_ctx->object_edit) {
404                 return;
405         }
406         for (ParticleSystem *psys = ob->particlesystem.first; psys != NULL; psys = psys->next) {
407                 if (!psys_check_enabled(ob, psys, false)) {
408                         continue;
409                 }
410                 if (!DRW_check_psys_visible_within_active_context(ob, psys)) {
411                         return;
412                 }
413                 ParticleSettings *part = psys->part;
414                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
415
416                 static float mat[4][4];
417                 unit_m4(mat);
418
419                 if (draw_as == PART_DRAW_PATH) {
420                         struct Gwn_Batch *geom = DRW_cache_particles_get_hair(ob, psys, NULL);
421                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
422                         DRW_shgroup_call_add(material->shgrp, geom, mat);
423                 }
424         }
425 }
426
427 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
428 {
429         WORKBENCH_StorageList *stl = vedata->stl;
430         WORKBENCH_PassList *psl = vedata->psl;
431         WORKBENCH_PrivateData *wpd = stl->g_data;
432
433         if (!DRW_object_is_renderable(ob))
434                 return;
435
436         if (ob->type == OB_MESH) {
437                 workbench_cache_populate_particles(vedata, ob);
438         }
439
440         WORKBENCH_MaterialData *material;
441         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT)) {
442                 const DRWContextState *draw_ctx = DRW_context_state_get();
443                 const bool is_active = (ob == draw_ctx->obact);
444                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
445                 bool is_drawn = false;
446                 if (!is_sculpt_mode && wpd->drawtype == OB_TEXTURE && ob->type == OB_MESH) {
447                         const Mesh *me = ob->data;
448                         if (me->mloopuv) {
449                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
450                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
451                                 struct Gwn_Batch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
452                                 if (materials_len > 0 && geom_array) {
453                                         for (int i = 0; i < materials_len; i++) {
454                                                 Material *mat = give_current_material(ob, i + 1);
455                                                 Image *image;
456                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
457                                                 /* use OB_SOLID when no texture could be determined */
458                                                 int mat_drawtype = OB_SOLID;
459                                                 if (image) {
460                                                         mat_drawtype = OB_TEXTURE;
461                                                 }
462                                                 material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
463                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
464                                         }
465                                         is_drawn = true;
466                                 }
467                         }
468                 }
469
470                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
471                 if (!is_drawn) {
472                         if ((wpd->shading.color_type != V3D_SHADING_MATERIAL_COLOR) || is_sculpt_mode) {
473                                 /* No material split needed */
474                                 struct Gwn_Batch *geom = DRW_cache_object_surface_get(ob);
475                                 if (geom) {
476                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
477                                         if (is_sculpt_mode) {
478                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
479                                         }
480                                         else {
481                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
482                                         }
483                                 }
484                         }
485                         else { /* MATERIAL colors */
486                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
487                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
488                                 for (int i = 0; i < materials_len; i++) {
489                                         gpumat_array[i] = NULL;
490                                 }
491
492                                 struct Gwn_Batch **mat_geom = DRW_cache_object_surface_material_get(
493                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
494                                 if (mat_geom) {
495                                         for (int i = 0; i < materials_len; ++i) {
496                                                 Material *mat = give_current_material(ob, i + 1);
497                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, OB_SOLID);
498                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
499                                         }
500                                 }
501                         }
502                 }
503
504                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
505                         bool is_manifold;
506                         struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
507                         if (geom_shadow) {
508                                 if (is_sculpt_mode) {
509                                         /* Currently unsupported in sculpt mode. We could revert to the slow
510                                          * method in this case but i'm not sure if it's a good idea given that
511                                          * sculped meshes are heavy to begin with. */
512                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
513                                 }
514                                 else {
515                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
516                                                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
517
518                                         invert_m4_m4(ob->imat, ob->obmat);
519                                         mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
520
521                                         DRWShadingGroup *grp;
522                                         if (!is_manifold) {
523                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
524                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
525                                                 DRW_shgroup_call_object_add(grp, geom_shadow, ob);
526                                         }
527                                         else {
528                                                 struct Gwn_Batch *geom_caps = DRW_cache_object_surface_get(ob);
529                                                 if (geom_caps) {
530                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
531                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
532                                                         DRW_shgroup_call_object_add(grp, geom_caps, ob);
533                                                 }
534
535                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
536                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
537                                                 DRW_shgroup_call_object_add(grp, geom_shadow, ob);
538                                         }
539                                 }
540                         }
541                 }
542         }
543 }
544
545 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
546 {
547 }
548
549 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
550 {
551         WORKBENCH_StorageList *stl = vedata->stl;
552         WORKBENCH_FramebufferList *fbl = vedata->fbl;
553         WORKBENCH_PrivateData *wpd = stl->g_data;
554         const float clear_depth = 1.0f;
555         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
556         uint clear_stencil = 0xFF;
557
558         DRW_stats_group_start("Clear Background");
559         GPU_framebuffer_bind(fbl->prepass_fb);
560         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
561         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
562         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
563         DRW_stats_group_end();
564 }
565
566 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
567 {
568         WORKBENCH_PassList *psl = vedata->psl;
569         WORKBENCH_StorageList *stl = vedata->stl;
570         WORKBENCH_FramebufferList *fbl = vedata->fbl;
571         WORKBENCH_PrivateData *wpd = stl->g_data;
572         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
573
574         /* clear in background */
575         GPU_framebuffer_bind(fbl->prepass_fb);
576         DRW_draw_pass(psl->prepass_pass);
577         if (SHADOW_ENABLED(wpd)) {
578 #ifdef DEBUG_SHADOW_VOLUME
579                 GPU_framebuffer_bind(fbl->composite_fb);
580                 DRW_draw_pass(psl->composite_pass);
581                 DRW_draw_pass(psl->shadow_depth_pass_pass);
582                 DRW_draw_pass(psl->shadow_depth_fail_pass);
583                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
584 #else
585                 GPU_framebuffer_bind(dfbl->depth_only_fb);
586                 DRW_draw_pass(psl->shadow_depth_pass_pass);
587                 DRW_draw_pass(psl->shadow_depth_fail_pass);
588                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
589                 GPU_framebuffer_bind(fbl->composite_fb);
590                 DRW_draw_pass(psl->composite_pass);
591                 DRW_draw_pass(psl->composite_shadow_pass);
592 #endif
593         }
594         else {
595                 GPU_framebuffer_bind(fbl->composite_fb);
596                 DRW_draw_pass(psl->composite_pass);
597         }
598
599         GPU_framebuffer_bind(dfbl->color_only_fb);
600         DRW_transform_to_display(e_data.composite_buffer_tx);
601
602         workbench_private_data_free(wpd);
603 }