Merge branch 'master' into blender2.8
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33
34 #include "BKE_node.h"
35 #include "BKE_particle.h"
36
37 #include "DNA_image_types.h"
38 #include "DNA_mesh_types.h"
39 #include "DNA_modifier_types.h"
40 #include "DNA_node_types.h"
41
42 #include "ED_uvedit.h"
43
44 #include "GPU_shader.h"
45 #include "GPU_texture.h"
46
47
48 /* *********** STATIC *********** */
49
50 // #define DEBUG_SHADOW_VOLUME
51
52 #ifdef DEBUG_SHADOW_VOLUME
53 #  include "draw_debug.h"
54 #endif
55
56 static struct {
57         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
58         struct GPUShader *composite_sh_cache[MAX_SHADERS];
59         struct GPUShader *shadow_fail_sh;
60         struct GPUShader *shadow_fail_manifold_sh;
61         struct GPUShader *shadow_pass_sh;
62         struct GPUShader *shadow_pass_manifold_sh;
63         struct GPUShader *shadow_caps_sh;
64         struct GPUShader *shadow_caps_manifold_sh;
65
66         struct GPUTexture *object_id_tx; /* ref only, not alloced */
67         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
68         struct GPUTexture *specular_buffer_tx; /* ref only, not alloced */
69         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
70         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
71
72         SceneDisplay display; /* world light direction for shadows */
73         float light_direction_vs[3];
74         int next_object_id;
75         float normal_world_matrix[3][3];
76 } e_data = {{NULL}};
77
78 /* Shaders */
79 extern char datatoc_common_hair_lib_glsl[];
80
81 extern char datatoc_workbench_prepass_vert_glsl[];
82 extern char datatoc_workbench_prepass_frag_glsl[];
83 extern char datatoc_workbench_deferred_composite_frag_glsl[];
84
85 extern char datatoc_workbench_shadow_vert_glsl[];
86 extern char datatoc_workbench_shadow_geom_glsl[];
87 extern char datatoc_workbench_shadow_caps_geom_glsl[];
88 extern char datatoc_workbench_shadow_debug_frag_glsl[];
89
90 extern char datatoc_workbench_background_lib_glsl[];
91 extern char datatoc_workbench_common_lib_glsl[];
92 extern char datatoc_workbench_data_lib_glsl[];
93 extern char datatoc_workbench_object_outline_lib_glsl[];
94 extern char datatoc_workbench_world_light_lib_glsl[];
95
96 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
97 {
98         char *str = NULL;
99
100         DynStr *ds = BLI_dynstr_new();
101
102         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
103         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
104         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
105
106         if ((wpd->shading.light & V3D_LIGHTING_STUDIO) || (wpd->shading.flag & V3D_SHADING_SPECULAR_HIGHLIGHT)) {
107                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
108         }
109         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
110                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
111         }
112
113         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
114
115         str = BLI_dynstr_get_cstring(ds);
116         BLI_dynstr_free(ds);
117         return str;
118 }
119
120 static char *workbench_build_prepass_frag(void)
121 {
122         char *str = NULL;
123
124         DynStr *ds = BLI_dynstr_new();
125
126         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
127         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
128         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
129
130         str = BLI_dynstr_get_cstring(ds);
131         BLI_dynstr_free(ds);
132         return str;
133 }
134
135 static char *workbench_build_prepass_vert(void)
136 {
137         char *str = NULL;
138
139         DynStr *ds = BLI_dynstr_new();
140
141         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
142         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
143
144         str = BLI_dynstr_get_cstring(ds);
145         BLI_dynstr_free(ds);
146         return str;
147 }
148
149 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, int drawtype, bool is_hair)
150 {
151         if (e_data.prepass_sh_cache[index] == NULL) {
152                 char *defines = workbench_material_build_defines(wpd, drawtype, is_hair);
153                 char *composite_frag = workbench_build_composite_frag(wpd);
154                 char *prepass_vert = workbench_build_prepass_vert();
155                 char *prepass_frag = workbench_build_prepass_frag();
156                 e_data.prepass_sh_cache[index] = DRW_shader_create(
157                         prepass_vert, NULL,
158                         prepass_frag, defines);
159                 if (drawtype == OB_SOLID && !is_hair) {
160                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
161                 }
162                 MEM_freeN(prepass_vert);
163                 MEM_freeN(prepass_frag);
164                 MEM_freeN(composite_frag);
165                 MEM_freeN(defines);
166         }
167 }
168
169 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
170 {
171         int index_solid = workbench_material_get_shader_index(wpd, OB_SOLID, false);
172         int index_solid_hair = workbench_material_get_shader_index(wpd, OB_SOLID, true);
173         int index_texture = workbench_material_get_shader_index(wpd, OB_TEXTURE, false);
174         int index_texture_hair = workbench_material_get_shader_index(wpd, OB_TEXTURE, true);
175
176         ensure_deferred_shaders(wpd, index_solid, OB_SOLID, false);
177         ensure_deferred_shaders(wpd, index_solid_hair, OB_SOLID, true);
178         ensure_deferred_shaders(wpd, index_texture, OB_TEXTURE, false);
179         ensure_deferred_shaders(wpd, index_texture_hair, OB_TEXTURE, true);
180
181         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
182         wpd->prepass_solid_hair_sh = e_data.prepass_sh_cache[index_solid_hair];
183         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
184         wpd->prepass_texture_hair_sh = e_data.prepass_sh_cache[index_texture_hair];
185         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
186 }
187
188 /* Functions */
189
190
191 static void workbench_init_object_data(ObjectEngineData *engine_data)
192 {
193         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)engine_data;
194         data->object_id = e_data.next_object_id++;
195         data->shadow_bbox_dirty = true;
196 }
197
198 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
199 {
200         WORKBENCH_FramebufferList *fbl = vedata->fbl;
201         WORKBENCH_StorageList *stl = vedata->stl;
202         WORKBENCH_PassList *psl = vedata->psl;
203         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
204
205         if (!e_data.next_object_id) {
206                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
207                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
208                 e_data.next_object_id = 1;
209 #ifdef DEBUG_SHADOW_VOLUME
210                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
211 #else
212                 const char *shadow_frag = NULL;
213 #endif
214                 e_data.shadow_pass_sh = DRW_shader_create(
215                         datatoc_workbench_shadow_vert_glsl,
216                         datatoc_workbench_shadow_geom_glsl,
217                         shadow_frag,
218                         "#define SHADOW_PASS\n"
219                         "#define DOUBLE_MANIFOLD\n");
220                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
221                         datatoc_workbench_shadow_vert_glsl,
222                         datatoc_workbench_shadow_geom_glsl,
223                         shadow_frag,
224                         "#define SHADOW_PASS\n");
225                 e_data.shadow_fail_sh = DRW_shader_create(
226                         datatoc_workbench_shadow_vert_glsl,
227                         datatoc_workbench_shadow_geom_glsl,
228                         shadow_frag,
229                         "#define SHADOW_FAIL\n"
230                         "#define DOUBLE_MANIFOLD\n");
231                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
232                         datatoc_workbench_shadow_vert_glsl,
233                         datatoc_workbench_shadow_geom_glsl,
234                         shadow_frag,
235                         "#define SHADOW_FAIL\n");
236                 e_data.shadow_caps_sh = DRW_shader_create(
237                         datatoc_workbench_shadow_vert_glsl,
238                         datatoc_workbench_shadow_caps_geom_glsl,
239                         shadow_frag,
240                         "#define SHADOW_FAIL\n"
241                         "#define DOUBLE_MANIFOLD\n");
242                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
243                         datatoc_workbench_shadow_vert_glsl,
244                         datatoc_workbench_shadow_caps_geom_glsl,
245                         shadow_frag,
246                         "#define SHADOW_FAIL\n");
247         }
248
249         if (!stl->g_data) {
250                 /* Alloc transient pointers */
251                 stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
252         }
253
254         workbench_private_data_init(stl->g_data);
255
256         {
257                 const float *viewport_size = DRW_viewport_size_get();
258                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
259                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
260                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
261                 e_data.specular_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
262                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
263                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
264
265                 if (NORMAL_ENCODING_ENABLED()) {
266                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
267                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
268                 }
269                 else {
270                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
271                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
272                 }
273
274                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
275                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
276                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
277                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
278                         GPU_ATTACHMENT_TEXTURE(e_data.specular_buffer_tx),
279                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
280                 });
281                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
282                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
283                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
284                 });
285         }
286
287         /* Prepass */
288         {
289                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
290                 psl->prepass_pass = DRW_pass_create("Prepass", state);
291                 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
292         }
293 }
294
295 void workbench_deferred_engine_free()
296 {
297         for (int index = 0; index < MAX_SHADERS; index++) {
298                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
299                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
300         }
301         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
302         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
303         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
304         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
305         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
306         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
307 }
308
309 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
310 {
311         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
312         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
313         if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
314                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
315         }
316         if (SPECULAR_HIGHLIGHT_ENABLED(wpd)) {
317                 DRW_shgroup_uniform_texture_ref(grp, "specularBuffer", &e_data.specular_buffer_tx);
318
319 #if 0
320                 float invwinmat[4][4];
321                 DRW_viewport_matrix_get(invwinmat, DRW_MAT_WININV);
322
323                 copy_v4_fl4(e_data.screenvecs[0],  1.0f, -1.0f, 0.0f, 1.0f);
324                 copy_v4_fl4(e_data.screenvecs[1], -1.0f,  1.0f, 0.0f, 1.0f);
325                 copy_v4_fl4(e_data.screenvecs[2], -1.0f, -1.0f, 0.0f, 1.0f);
326                 for (int i = 0; i < 3; i++) {
327                         mul_m4_v4(invwinmat, e_data.screenvecs[i]);
328                         e_data.screenvecs[i][0] /= e_data.screenvecs[i][3]; /* perspective divide */
329                         e_data.screenvecs[i][1] /= e_data.screenvecs[i][3]; /* perspective divide */
330                         e_data.screenvecs[i][2] /= e_data.screenvecs[i][3]; /* perspective divide */
331                         e_data.screenvecs[i][3] = 1.0f;
332                 }
333                 sub_v3_v3(e_data.screenvecs[0], e_data.screenvecs[2]);
334                 sub_v3_v3(e_data.screenvecs[1], e_data.screenvecs[2]);
335                 DRW_shgroup_uniform_vec4(grp, "screenvecs[0]", e_data.screenvecs[0], 3);
336 #endif
337         }
338         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
339         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
340
341         workbench_material_set_normal_world_matrix(grp, wpd, e_data.normal_world_matrix);
342 }
343
344 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
345 {
346         WORKBENCH_StorageList *stl = vedata->stl;
347         WORKBENCH_PassList *psl = vedata->psl;
348         WORKBENCH_PrivateData *wpd = stl->g_data;
349         DRWShadingGroup *grp;
350         const DRWContextState *draw_ctx = DRW_context_state_get();
351         static float light_multiplier = 1.0f;
352
353
354         Scene *scene = draw_ctx->scene;
355
356         select_deferred_shaders(wpd);
357         /* Deferred Mix Pass */
358         {
359                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
360
361                 e_data.display.shadow_shift = scene->display.shadow_shift;
362                 copy_v3_v3(e_data.light_direction_vs, wpd->world_data.lights[0].light_direction_vs);
363
364                 if (SHADOW_ENABLED(wpd)) {
365                         psl->composite_pass = DRW_pass_create(
366                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
367                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
368                         workbench_composite_uniforms(wpd, grp);
369                         DRW_shgroup_stencil_mask(grp, 0x00);
370                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &light_multiplier, 1);
371                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
372                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
373                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
374
375                         /* Stencil Shadow passes. */
376 #ifdef DEBUG_SHADOW_VOLUME
377                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
378                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
379 #else
380                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
381                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
382 #endif
383                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
384                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
385                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
386                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
387                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
388                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
389
390 #ifndef DEBUG_SHADOW_VOLUME
391                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
392                         DRW_shgroup_stencil_mask(grp, 0xFF);
393                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
394                         DRW_shgroup_stencil_mask(grp, 0xFF);
395                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
396                         DRW_shgroup_stencil_mask(grp, 0xFF);
397                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
398                         DRW_shgroup_stencil_mask(grp, 0xFF);
399                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
400                         DRW_shgroup_stencil_mask(grp, 0xFF);
401                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
402                         DRW_shgroup_stencil_mask(grp, 0xFF);
403                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
404                         DRW_shgroup_stencil_mask(grp, 0xFF);
405
406                         psl->composite_shadow_pass = DRW_pass_create("Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
407                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
408                         DRW_shgroup_stencil_mask(grp, 0x00);
409                         workbench_composite_uniforms(wpd, grp);
410                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
411                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
412                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
413                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
414 #endif
415
416                         studiolight_update_light(wpd, e_data.display.light_direction);
417                 }
418                 else {
419                         psl->composite_pass = DRW_pass_create(
420                                 "Composite", DRW_STATE_WRITE_COLOR);
421                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
422                         workbench_composite_uniforms(wpd, grp);
423                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
424                 }
425         }
426 }
427
428 static WORKBENCH_MaterialData *get_or_create_material_data(
429         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int drawtype)
430 {
431         WORKBENCH_StorageList *stl = vedata->stl;
432         WORKBENCH_PassList *psl = vedata->psl;
433         WORKBENCH_PrivateData *wpd = stl->g_data;
434         WORKBENCH_MaterialData *material;
435         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
436                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
437         WORKBENCH_MaterialData material_template;
438
439         /* Solid */
440         workbench_material_update_data(wpd, ob, mat, &material_template);
441         material_template.object_id = engine_object_data->object_id;
442         material_template.drawtype = drawtype;
443         material_template.ima = ima;
444         uint hash = workbench_material_get_hash(&material_template);
445
446         material = BLI_ghash_lookup(wpd->material_hash, SET_UINT_IN_POINTER(hash));
447         if (material == NULL) {
448                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
449                 material->shgrp = DRW_shgroup_create(
450                         drawtype == OB_SOLID ? wpd->prepass_solid_sh : wpd->prepass_texture_sh, psl->prepass_pass);
451                 DRW_shgroup_stencil_mask(material->shgrp, 0xFF);
452                 material->object_id = engine_object_data->object_id;
453                 copy_v4_v4(material->material_data.diffuse_color, material_template.material_data.diffuse_color);
454                 copy_v4_v4(material->material_data.specular_color, material_template.material_data.specular_color);
455                 material->material_data.roughness = material_template.material_data.roughness;
456                 switch (drawtype) {
457                         case OB_SOLID:
458                                 break;
459
460                         case OB_TEXTURE:
461                         {
462                                 GPUTexture *tex = GPU_texture_from_blender(ima, NULL, GL_TEXTURE_2D, false, false, false);
463                                 DRW_shgroup_uniform_texture(material->shgrp, "image", tex);
464                                 break;
465                         }
466                 }
467                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
468                 material->material_ubo = DRW_uniformbuffer_create(sizeof(WORKBENCH_UBO_Material), &material->material_data);
469                 DRW_shgroup_uniform_block(material->shgrp, "material_block", material->material_ubo);
470
471                 BLI_ghash_insert(wpd->material_hash, SET_UINT_IN_POINTER(hash), material);
472         }
473         return material;
474 }
475
476 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
477 {
478         WORKBENCH_StorageList *stl = vedata->stl;
479         WORKBENCH_PassList *psl = vedata->psl;
480         WORKBENCH_PrivateData *wpd = stl->g_data;
481         const DRWContextState *draw_ctx = DRW_context_state_get();
482         if (ob == draw_ctx->object_edit) {
483                 return;
484         }
485         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
486                 if (md->type != eModifierType_ParticleSystem) {
487                         continue;
488                 }
489                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
490                 if (!psys_check_enabled(ob, psys, false)) {
491                         continue;
492                 }
493                 if (!DRW_check_psys_visible_within_active_context(ob, psys)) {
494                         return;
495                 }
496                 ParticleSettings *part = psys->part;
497                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
498
499                 if (draw_as == PART_DRAW_PATH) {
500                         Image *image = NULL;
501                         Material *mat = give_current_material(ob, part->omat);
502                         int mat_drawtype = OB_SOLID;
503
504                         if (wpd->drawtype == OB_TEXTURE) {
505                                 ED_object_get_active_image(ob, part->omat, &image, NULL, NULL, NULL);
506                                 /* use OB_SOLID when no texture could be determined */
507                                 if (image) {
508                                         mat_drawtype = OB_TEXTURE;
509                                 }
510                         }
511
512                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
513
514                         struct GPUShader *shader = (mat_drawtype == OB_SOLID)
515                                                    ? wpd->prepass_solid_hair_sh
516                                                    : wpd->prepass_texture_hair_sh;
517                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
518                                                         ob, psys, md,
519                                                         psl->prepass_hair_pass,
520                                                         shader);
521                         DRW_shgroup_stencil_mask(shgrp, 0xFF);
522                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
523                         DRW_shgroup_uniform_block(shgrp, "material_block", material->material_ubo);
524                         if (image) {
525                                 GPUTexture *tex = GPU_texture_from_blender(image, NULL, GL_TEXTURE_2D, false, false, false);
526                                 DRW_shgroup_uniform_texture(shgrp, "image", tex);
527                         }
528                 }
529         }
530 }
531
532 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
533 {
534         WORKBENCH_StorageList *stl = vedata->stl;
535         WORKBENCH_PassList *psl = vedata->psl;
536         WORKBENCH_PrivateData *wpd = stl->g_data;
537
538         if (!DRW_object_is_renderable(ob))
539                 return;
540
541         if (ob->type == OB_MESH) {
542                 workbench_cache_populate_particles(vedata, ob);
543         }
544
545         WORKBENCH_MaterialData *material;
546         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT)) {
547                 const DRWContextState *draw_ctx = DRW_context_state_get();
548                 const bool is_active = (ob == draw_ctx->obact);
549                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
550                 bool is_drawn = false;
551                 if (!is_sculpt_mode && wpd->drawtype == OB_TEXTURE && ob->type == OB_MESH) {
552                         const Mesh *me = ob->data;
553                         if (me->mloopuv) {
554                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
555                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
556                                 struct Gwn_Batch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
557                                 if (materials_len > 0 && geom_array) {
558                                         for (int i = 0; i < materials_len; i++) {
559                                                 Material *mat = give_current_material(ob, i + 1);
560                                                 Image *image;
561                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
562                                                 /* use OB_SOLID when no texture could be determined */
563                                                 int mat_drawtype = OB_SOLID;
564                                                 if (image) {
565                                                         mat_drawtype = OB_TEXTURE;
566                                                 }
567                                                 material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
568                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
569                                         }
570                                         is_drawn = true;
571                                 }
572                         }
573                 }
574
575                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
576                 if (!is_drawn) {
577                         if ((wpd->shading.color_type != V3D_SHADING_MATERIAL_COLOR) || is_sculpt_mode) {
578                                 /* No material split needed */
579                                 struct Gwn_Batch *geom = DRW_cache_object_surface_get(ob);
580                                 if (geom) {
581                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
582                                         if (is_sculpt_mode) {
583                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
584                                         }
585                                         else {
586                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
587                                         }
588                                 }
589                         }
590                         else { /* MATERIAL colors */
591                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
592                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
593                                 for (int i = 0; i < materials_len; i++) {
594                                         gpumat_array[i] = NULL;
595                                 }
596
597                                 struct Gwn_Batch **mat_geom = DRW_cache_object_surface_material_get(
598                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
599                                 if (mat_geom) {
600                                         for (int i = 0; i < materials_len; ++i) {
601                                                 Material *mat = give_current_material(ob, i + 1);
602                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, OB_SOLID);
603                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
604                                         }
605                                 }
606                         }
607                 }
608
609                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
610                         bool is_manifold;
611                         struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
612                         if (geom_shadow) {
613                                 if (is_sculpt_mode) {
614                                         /* Currently unsupported in sculpt mode. We could revert to the slow
615                                          * method in this case but i'm not sure if it's a good idea given that
616                                          * sculped meshes are heavy to begin with. */
617                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
618                                 }
619                                 else {
620                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
621                                                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
622
623                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
624
625                                                 invert_m4_m4(ob->imat, ob->obmat);
626                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
627
628                                                 DRWShadingGroup *grp;
629                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
630
631                                                 /* Unless we expose a parameter to the user, it's better to use the depth pass technique if the object is
632                                                  * non manifold. Exposing a switch to the user to force depth fail in this case can be beneficial for
633                                                  * planes and non-closed terrains. */
634                                                 if (!is_manifold) {
635                                                         use_shadow_pass_technique = true;
636                                                 }
637
638                                                 if (use_shadow_pass_technique) {
639                                                         if (is_manifold) {
640                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
641                                                         }
642                                                         else {
643                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
644                                                         }
645                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
646                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
647 #ifdef DEBUG_SHADOW_VOLUME
648                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
649 #endif
650                                                 }
651                                                 else {
652                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
653                                                         const bool need_caps = true;
654                                                         if (need_caps) {
655                                                                 if (is_manifold) {
656                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
657                                                                 }
658                                                                 else {
659                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
660                                                                 }
661                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
662                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
663                                                         }
664
665                                                         if (is_manifold) {
666                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
667                                                         }
668                                                         else {
669                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
670                                                         }
671                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
672                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
673 #ifdef DEBUG_SHADOW_VOLUME
674                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
675 #endif
676                                                 }
677                                         }
678                                 }
679                         }
680                 }
681         }
682 }
683
684 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
685 {
686 }
687
688 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
689 {
690         WORKBENCH_StorageList *stl = vedata->stl;
691         WORKBENCH_FramebufferList *fbl = vedata->fbl;
692         WORKBENCH_PrivateData *wpd = stl->g_data;
693         const float clear_depth = 1.0f;
694         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
695         uint clear_stencil = 0xFF;
696
697         DRW_stats_group_start("Clear Background");
698         GPU_framebuffer_bind(fbl->prepass_fb);
699         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
700         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
701         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
702         DRW_stats_group_end();
703 }
704
705 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
706 {
707         WORKBENCH_PassList *psl = vedata->psl;
708         WORKBENCH_StorageList *stl = vedata->stl;
709         WORKBENCH_FramebufferList *fbl = vedata->fbl;
710         WORKBENCH_PrivateData *wpd = stl->g_data;
711         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
712
713         /* clear in background */
714         GPU_framebuffer_bind(fbl->prepass_fb);
715         DRW_draw_pass(psl->prepass_pass);
716         DRW_draw_pass(psl->prepass_hair_pass);
717         if (SHADOW_ENABLED(wpd)) {
718 #ifdef DEBUG_SHADOW_VOLUME
719                 GPU_framebuffer_bind(fbl->composite_fb);
720                 DRW_draw_pass(psl->composite_pass);
721 #else
722                 GPU_framebuffer_bind(dfbl->depth_only_fb);
723 #endif
724                 DRW_draw_pass(psl->shadow_depth_pass_pass);
725                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
726                 DRW_draw_pass(psl->shadow_depth_fail_pass);
727                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
728                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
729                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
730 #ifndef DEBUG_SHADOW_VOLUME
731                 GPU_framebuffer_bind(fbl->composite_fb);
732                 DRW_draw_pass(psl->composite_pass);
733                 DRW_draw_pass(psl->composite_shadow_pass);
734 #endif
735         }
736         else {
737                 GPU_framebuffer_bind(fbl->composite_fb);
738                 DRW_draw_pass(psl->composite_pass);
739         }
740
741         GPU_framebuffer_bind(dfbl->color_only_fb);
742         DRW_transform_to_display(e_data.composite_buffer_tx);
743
744         workbench_private_data_free(wpd);
745 }