Merge branch 'master' into blender2.8
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33
34 #include "BKE_node.h"
35 #include "BKE_particle.h"
36
37 #include "DNA_image_types.h"
38 #include "DNA_mesh_types.h"
39 #include "DNA_modifier_types.h"
40 #include "DNA_node_types.h"
41
42 #include "ED_uvedit.h"
43
44 #include "GPU_shader.h"
45 #include "GPU_texture.h"
46
47
48 /* *********** STATIC *********** */
49
50 // #define DEBUG_SHADOW_VOLUME
51
52 #ifdef DEBUG_SHADOW_VOLUME
53 #  include "draw_debug.h"
54 #endif
55
56 static struct {
57         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
58         struct GPUShader *composite_sh_cache[MAX_SHADERS];
59         struct GPUShader *shadow_fail_sh;
60         struct GPUShader *shadow_fail_manifold_sh;
61         struct GPUShader *shadow_pass_sh;
62         struct GPUShader *shadow_pass_manifold_sh;
63         struct GPUShader *shadow_caps_sh;
64         struct GPUShader *shadow_caps_manifold_sh;
65
66         struct GPUTexture *object_id_tx; /* ref only, not alloced */
67         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
68         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
69         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
70
71         SceneDisplay display; /* world light direction for shadows */
72         float light_direction_vs[3];
73         int next_object_id;
74         float normal_world_matrix[3][3];
75 } e_data = {NULL};
76
77 /* Shaders */
78 extern char datatoc_workbench_prepass_vert_glsl[];
79 extern char datatoc_workbench_prepass_frag_glsl[];
80 extern char datatoc_workbench_deferred_composite_frag_glsl[];
81
82 extern char datatoc_workbench_shadow_vert_glsl[];
83 extern char datatoc_workbench_shadow_geom_glsl[];
84 extern char datatoc_workbench_shadow_caps_geom_glsl[];
85 extern char datatoc_workbench_shadow_debug_frag_glsl[];
86
87 extern char datatoc_workbench_background_lib_glsl[];
88 extern char datatoc_workbench_common_lib_glsl[];
89 extern char datatoc_workbench_data_lib_glsl[];
90 extern char datatoc_workbench_object_outline_lib_glsl[];
91 extern char datatoc_workbench_world_light_lib_glsl[];
92
93 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
94 {
95         char *str = NULL;
96
97         DynStr *ds = BLI_dynstr_new();
98
99         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
100         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
101         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
102
103         if (wpd->shading.light & V3D_LIGHTING_STUDIO) {
104                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
105         }
106         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
107                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
108         }
109
110         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
111
112         str = BLI_dynstr_get_cstring(ds);
113         BLI_dynstr_free(ds);
114         return str;
115 }
116
117 static char *workbench_build_prepass_frag(void)
118 {
119         char *str = NULL;
120
121         DynStr *ds = BLI_dynstr_new();
122
123         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
124         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
125
126         str = BLI_dynstr_get_cstring(ds);
127         BLI_dynstr_free(ds);
128         return str;
129 }
130
131 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, int drawtype)
132 {
133         if (e_data.prepass_sh_cache[index] == NULL) {
134                 char *defines = workbench_material_build_defines(wpd, drawtype);
135                 char *composite_frag = workbench_build_composite_frag(wpd);
136                 char *prepass_frag = workbench_build_prepass_frag();
137                 e_data.prepass_sh_cache[index] = DRW_shader_create(
138                         datatoc_workbench_prepass_vert_glsl, NULL, prepass_frag, defines);
139                 if (drawtype == OB_SOLID) {
140                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
141                 }
142                 MEM_freeN(prepass_frag);
143                 MEM_freeN(composite_frag);
144                 MEM_freeN(defines);
145         }
146 }
147
148 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
149 {
150         int index_solid = workbench_material_get_shader_index(wpd, OB_SOLID);
151         int index_texture = workbench_material_get_shader_index(wpd, OB_TEXTURE);
152
153         ensure_deferred_shaders(wpd, index_solid, OB_SOLID);
154         ensure_deferred_shaders(wpd, index_texture, OB_TEXTURE);
155
156         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
157         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
158         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
159 }
160
161 /* Functions */
162
163
164 static void workbench_init_object_data(ObjectEngineData *engine_data)
165 {
166         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)engine_data;
167         data->object_id = e_data.next_object_id++;
168         data->shadow_bbox_dirty = true;
169 }
170
171 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
172 {
173         WORKBENCH_FramebufferList *fbl = vedata->fbl;
174         WORKBENCH_StorageList *stl = vedata->stl;
175         WORKBENCH_PassList *psl = vedata->psl;
176         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
177
178         if (!e_data.next_object_id) {
179                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
180                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
181                 e_data.next_object_id = 1;
182 #ifdef DEBUG_SHADOW_VOLUME
183                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
184 #else
185                 const char *shadow_frag = NULL;
186 #endif
187                 e_data.shadow_pass_sh = DRW_shader_create(
188                         datatoc_workbench_shadow_vert_glsl,
189                         datatoc_workbench_shadow_geom_glsl,
190                         shadow_frag,
191                         "#define SHADOW_PASS\n"
192                         "#define DOUBLE_MANIFOLD\n");
193                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
194                         datatoc_workbench_shadow_vert_glsl,
195                         datatoc_workbench_shadow_geom_glsl,
196                         shadow_frag,
197                         "#define SHADOW_PASS\n");
198                 e_data.shadow_fail_sh = DRW_shader_create(
199                         datatoc_workbench_shadow_vert_glsl,
200                         datatoc_workbench_shadow_geom_glsl,
201                         shadow_frag,
202                         "#define SHADOW_FAIL\n"
203                         "#define DOUBLE_MANIFOLD\n");
204                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
205                         datatoc_workbench_shadow_vert_glsl,
206                         datatoc_workbench_shadow_geom_glsl,
207                         shadow_frag,
208                         "#define SHADOW_FAIL\n");
209                 e_data.shadow_caps_sh = DRW_shader_create(
210                         datatoc_workbench_shadow_vert_glsl,
211                         datatoc_workbench_shadow_caps_geom_glsl,
212                         shadow_frag,
213                         "#define SHADOW_FAIL\n"
214                         "#define DOUBLE_MANIFOLD\n");
215                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
216                         datatoc_workbench_shadow_vert_glsl,
217                         datatoc_workbench_shadow_caps_geom_glsl,
218                         shadow_frag,
219                         "#define SHADOW_FAIL\n");
220         }
221
222         if (!stl->g_data) {
223                 /* Alloc transient pointers */
224                 stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
225         }
226
227         workbench_private_data_init(stl->g_data);
228
229         {
230                 const float *viewport_size = DRW_viewport_size_get();
231                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
232                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
233                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
234                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
235                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
236
237                 if (NORMAL_ENCODING_ENABLED()) {
238                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
239                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
240                 }
241                 else {
242                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
243                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
244                 }
245
246                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
247                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
248                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
249                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
250                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
251                 });
252                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
253                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
254                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
255                 });
256         }
257
258         /* Prepass */
259         {
260                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
261                 psl->prepass_pass = DRW_pass_create("Prepass", state);
262         }
263 }
264
265 void workbench_deferred_engine_free()
266 {
267         for (int index = 0; index < MAX_SHADERS; index++) {
268                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
269                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
270         }
271         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
272         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
273         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
274         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
275         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
276         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
277 }
278
279 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
280 {
281         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
282         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
283         if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
284                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
285         }
286         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
287         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
288
289         workbench_material_set_normal_world_matrix(grp, wpd, e_data.normal_world_matrix);
290 }
291
292 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
293 {
294         WORKBENCH_StorageList *stl = vedata->stl;
295         WORKBENCH_PassList *psl = vedata->psl;
296         WORKBENCH_PrivateData *wpd = stl->g_data;
297         DRWShadingGroup *grp;
298         const DRWContextState *draw_ctx = DRW_context_state_get();
299         static float light_multiplier = 1.0f;
300
301
302         Scene *scene = draw_ctx->scene;
303
304         select_deferred_shaders(wpd);
305         /* Deferred Mix Pass */
306         {
307                 copy_v3_v3(e_data.display.light_direction, scene->display.light_direction);
308                 negate_v3(e_data.display.light_direction);
309 #if 0
310         if (STUDIOLIGHT_ORIENTATION_WORLD_ENABLED(wpd)) {
311                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_LIGHT_DIRECTION_CALCULATED);
312                 float rot_matrix[3][3];
313                 // float dir[3] = {0.57, 0.57, -0.57};
314                 axis_angle_to_mat3_single(rot_matrix, 'Z', wpd->shading.studiolight_rot_z);
315                 mul_v3_m3v3(e_data.display.light_direction, rot_matrix, wpd->studio_light->light_direction);
316         }
317 #endif
318                 float view_matrix[4][4];
319                 DRW_viewport_matrix_get(view_matrix, DRW_MAT_VIEW);
320                 mul_v3_mat3_m4v3(e_data.light_direction_vs, view_matrix, e_data.display.light_direction);
321
322                 e_data.display.shadow_shift = scene->display.shadow_shift;
323
324                 if (SHADOW_ENABLED(wpd)) {
325                         psl->composite_pass = DRW_pass_create(
326                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
327                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
328                         workbench_composite_uniforms(wpd, grp);
329                         DRW_shgroup_stencil_mask(grp, 0x00);
330                         DRW_shgroup_uniform_vec3(grp, "lightDirection", e_data.light_direction_vs, 1);
331                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &light_multiplier, 1);
332                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
333                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
334                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
335
336                         /* Stencil Shadow passes. */
337 #ifdef DEBUG_SHADOW_VOLUME
338                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
339                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
340 #else
341                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
342                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
343 #endif
344                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
345                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
346                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
347                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
348                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
349                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
350
351 #ifndef DEBUG_SHADOW_VOLUME
352                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
353                         DRW_shgroup_stencil_mask(grp, 0xFF);
354                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
355                         DRW_shgroup_stencil_mask(grp, 0xFF);
356                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
357                         DRW_shgroup_stencil_mask(grp, 0xFF);
358                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
359                         DRW_shgroup_stencil_mask(grp, 0xFF);
360                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
361                         DRW_shgroup_stencil_mask(grp, 0xFF);
362                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
363                         DRW_shgroup_stencil_mask(grp, 0xFF);
364                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
365                         DRW_shgroup_stencil_mask(grp, 0xFF);
366
367                         psl->composite_shadow_pass = DRW_pass_create("Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
368                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
369                         DRW_shgroup_stencil_mask(grp, 0x00);
370                         workbench_composite_uniforms(wpd, grp);
371                         DRW_shgroup_uniform_vec3(grp, "lightDirection", e_data.light_direction_vs, 1);
372                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
373                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
374                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
375                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
376 #endif
377
378                         studiolight_update_light(wpd, e_data.display.light_direction);
379                 }
380                 else {
381                         psl->composite_pass = DRW_pass_create(
382                                 "Composite", DRW_STATE_WRITE_COLOR);
383                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
384                         workbench_composite_uniforms(wpd, grp);
385                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
386                 }
387         }
388 }
389
390 static WORKBENCH_MaterialData *get_or_create_material_data(
391         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int drawtype)
392 {
393         WORKBENCH_StorageList *stl = vedata->stl;
394         WORKBENCH_PassList *psl = vedata->psl;
395         WORKBENCH_PrivateData *wpd = stl->g_data;
396         WORKBENCH_MaterialData *material;
397         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
398                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
399         WORKBENCH_MaterialData material_template;
400
401         /* Solid */
402         workbench_material_get_solid_color(wpd, ob, mat, material_template.color);
403         material_template.object_id = engine_object_data->object_id;
404         material_template.drawtype = drawtype;
405         material_template.ima = ima;
406         uint hash = workbench_material_get_hash(&material_template);
407
408         material = BLI_ghash_lookup(wpd->material_hash, SET_UINT_IN_POINTER(hash));
409         if (material == NULL) {
410                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
411                 material->shgrp = DRW_shgroup_create(
412                         drawtype == OB_SOLID ? wpd->prepass_solid_sh : wpd->prepass_texture_sh, psl->prepass_pass);
413                 DRW_shgroup_stencil_mask(material->shgrp, 0xFF);
414                 material->object_id = engine_object_data->object_id;
415                 copy_v4_v4(material->color, material_template.color);
416                 switch (drawtype) {
417                         case OB_SOLID:
418                                 DRW_shgroup_uniform_vec3(material->shgrp, "object_color", material->color, 1);
419                                 break;
420
421                         case OB_TEXTURE:
422                         {
423                                 GPUTexture *tex = GPU_texture_from_blender(ima, NULL, GL_TEXTURE_2D, false, false, false);
424                                 DRW_shgroup_uniform_texture(material->shgrp, "image", tex);
425                                 break;
426                         }
427                 }
428                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
429                 BLI_ghash_insert(wpd->material_hash, SET_UINT_IN_POINTER(hash), material);
430         }
431         return material;
432 }
433
434 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
435 {
436         const DRWContextState *draw_ctx = DRW_context_state_get();
437         if (ob == draw_ctx->object_edit) {
438                 return;
439         }
440         for (ParticleSystem *psys = ob->particlesystem.first; psys != NULL; psys = psys->next) {
441                 if (!psys_check_enabled(ob, psys, false)) {
442                         continue;
443                 }
444                 if (!DRW_check_psys_visible_within_active_context(ob, psys)) {
445                         return;
446                 }
447                 ParticleSettings *part = psys->part;
448                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
449
450                 static float mat[4][4];
451                 unit_m4(mat);
452
453                 if (draw_as == PART_DRAW_PATH) {
454                         struct Gwn_Batch *geom = DRW_cache_particles_get_hair(ob, psys, NULL);
455                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
456                         DRW_shgroup_call_add(material->shgrp, geom, mat);
457                 }
458         }
459 }
460
461 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
462 {
463         WORKBENCH_StorageList *stl = vedata->stl;
464         WORKBENCH_PassList *psl = vedata->psl;
465         WORKBENCH_PrivateData *wpd = stl->g_data;
466
467         if (!DRW_object_is_renderable(ob))
468                 return;
469
470         if (ob->type == OB_MESH) {
471                 workbench_cache_populate_particles(vedata, ob);
472         }
473
474         WORKBENCH_MaterialData *material;
475         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT)) {
476                 const DRWContextState *draw_ctx = DRW_context_state_get();
477                 const bool is_active = (ob == draw_ctx->obact);
478                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
479                 bool is_drawn = false;
480                 if (!is_sculpt_mode && wpd->drawtype == OB_TEXTURE && ob->type == OB_MESH) {
481                         const Mesh *me = ob->data;
482                         if (me->mloopuv) {
483                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
484                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
485                                 struct Gwn_Batch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
486                                 if (materials_len > 0 && geom_array) {
487                                         for (int i = 0; i < materials_len; i++) {
488                                                 Material *mat = give_current_material(ob, i + 1);
489                                                 Image *image;
490                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
491                                                 /* use OB_SOLID when no texture could be determined */
492                                                 int mat_drawtype = OB_SOLID;
493                                                 if (image) {
494                                                         mat_drawtype = OB_TEXTURE;
495                                                 }
496                                                 material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
497                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
498                                         }
499                                         is_drawn = true;
500                                 }
501                         }
502                 }
503
504                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
505                 if (!is_drawn) {
506                         if ((wpd->shading.color_type != V3D_SHADING_MATERIAL_COLOR) || is_sculpt_mode) {
507                                 /* No material split needed */
508                                 struct Gwn_Batch *geom = DRW_cache_object_surface_get(ob);
509                                 if (geom) {
510                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
511                                         if (is_sculpt_mode) {
512                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
513                                         }
514                                         else {
515                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
516                                         }
517                                 }
518                         }
519                         else { /* MATERIAL colors */
520                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
521                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
522                                 for (int i = 0; i < materials_len; i++) {
523                                         gpumat_array[i] = NULL;
524                                 }
525
526                                 struct Gwn_Batch **mat_geom = DRW_cache_object_surface_material_get(
527                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
528                                 if (mat_geom) {
529                                         for (int i = 0; i < materials_len; ++i) {
530                                                 Material *mat = give_current_material(ob, i + 1);
531                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, OB_SOLID);
532                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
533                                         }
534                                 }
535                         }
536                 }
537
538                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
539                         bool is_manifold;
540                         struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
541                         if (geom_shadow) {
542                                 if (is_sculpt_mode) {
543                                         /* Currently unsupported in sculpt mode. We could revert to the slow
544                                          * method in this case but i'm not sure if it's a good idea given that
545                                          * sculped meshes are heavy to begin with. */
546                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
547                                 }
548                                 else {
549                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
550                                                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
551
552                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
553
554                                                 invert_m4_m4(ob->imat, ob->obmat);
555                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
556
557                                                 DRWShadingGroup *grp;
558                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
559
560                                                 /* Unless we expose a parameter to the user, it's better to use the depth pass technique if the object is
561                                                  * non manifold. Exposing a switch to the user to force depth fail in this case can be beneficial for
562                                                  * planes and non-closed terrains. */
563                                                 if (!is_manifold) {
564                                                         use_shadow_pass_technique = true;
565                                                 }
566
567                                                 if (use_shadow_pass_technique) {
568                                                         if (is_manifold) {
569                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
570                                                         }
571                                                         else {
572                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
573                                                         }
574                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
575                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
576 #ifdef DEBUG_SHADOW_VOLUME
577                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
578 #endif
579                                                 }
580                                                 else {
581                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
582                                                         const bool need_caps = true;
583                                                         if (need_caps) {
584                                                                 if (is_manifold) {
585                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
586                                                                 }
587                                                                 else {
588                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
589                                                                 }
590                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
591                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
592                                                         }
593
594                                                         if (is_manifold) {
595                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
596                                                         }
597                                                         else {
598                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
599                                                         }
600                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
601                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
602 #ifdef DEBUG_SHADOW_VOLUME
603                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
604 #endif
605                                                 }
606                                         }
607                                 }
608                         }
609                 }
610         }
611 }
612
613 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
614 {
615 }
616
617 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
618 {
619         WORKBENCH_StorageList *stl = vedata->stl;
620         WORKBENCH_FramebufferList *fbl = vedata->fbl;
621         WORKBENCH_PrivateData *wpd = stl->g_data;
622         const float clear_depth = 1.0f;
623         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
624         uint clear_stencil = 0xFF;
625
626         DRW_stats_group_start("Clear Background");
627         GPU_framebuffer_bind(fbl->prepass_fb);
628         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
629         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
630         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
631         DRW_stats_group_end();
632 }
633
634 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
635 {
636         WORKBENCH_PassList *psl = vedata->psl;
637         WORKBENCH_StorageList *stl = vedata->stl;
638         WORKBENCH_FramebufferList *fbl = vedata->fbl;
639         WORKBENCH_PrivateData *wpd = stl->g_data;
640         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
641
642         /* clear in background */
643         GPU_framebuffer_bind(fbl->prepass_fb);
644         DRW_draw_pass(psl->prepass_pass);
645         if (SHADOW_ENABLED(wpd)) {
646 #ifdef DEBUG_SHADOW_VOLUME
647                 GPU_framebuffer_bind(fbl->composite_fb);
648                 DRW_draw_pass(psl->composite_pass);
649 #else
650                 GPU_framebuffer_bind(dfbl->depth_only_fb);
651 #endif
652                 DRW_draw_pass(psl->shadow_depth_pass_pass);
653                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
654                 DRW_draw_pass(psl->shadow_depth_fail_pass);
655                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
656                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
657                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
658 #ifndef DEBUG_SHADOW_VOLUME
659                 GPU_framebuffer_bind(fbl->composite_fb);
660                 DRW_draw_pass(psl->composite_pass);
661                 DRW_draw_pass(psl->composite_shadow_pass);
662 #endif
663         }
664         else {
665                 GPU_framebuffer_bind(fbl->composite_fb);
666                 DRW_draw_pass(psl->composite_pass);
667         }
668
669         GPU_framebuffer_bind(dfbl->color_only_fb);
670         DRW_transform_to_display(e_data.composite_buffer_tx);
671
672         workbench_private_data_free(wpd);
673 }