Workbench: XFlip Matcap per 3D View
[blender.git] / source / blender / draw / engines / workbench / workbench_deferred.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_deferred.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33 #include "BLI_rand.h"
34
35 #include "BKE_node.h"
36 #include "BKE_particle.h"
37
38 #include "DNA_image_types.h"
39 #include "DNA_mesh_types.h"
40 #include "DNA_modifier_types.h"
41 #include "DNA_node_types.h"
42
43 #include "ED_uvedit.h"
44
45 #include "GPU_shader.h"
46 #include "GPU_texture.h"
47
48 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
49
50 /* *********** STATIC *********** */
51
52 // #define DEBUG_SHADOW_VOLUME
53
54 #ifdef DEBUG_SHADOW_VOLUME
55 #  include "draw_debug.h"
56 #endif
57
58 static struct {
59         struct GPUShader *prepass_sh_cache[MAX_SHADERS];
60         struct GPUShader *composite_sh_cache[MAX_SHADERS];
61         struct GPUShader *cavity_sh;
62         struct GPUShader *shadow_fail_sh;
63         struct GPUShader *shadow_fail_manifold_sh;
64         struct GPUShader *shadow_pass_sh;
65         struct GPUShader *shadow_pass_manifold_sh;
66         struct GPUShader *shadow_caps_sh;
67         struct GPUShader *shadow_caps_manifold_sh;
68
69         struct GPUTexture *object_id_tx; /* ref only, not alloced */
70         struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
71         struct GPUTexture *cavity_buffer_tx; /* ref only, not alloced */
72         struct GPUTexture *specular_buffer_tx; /* ref only, not alloced */
73         struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
74         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
75
76         SceneDisplay display; /* world light direction for shadows */
77         float light_direction_vs[3];
78         int next_object_id;
79         float normal_world_matrix[3][3];
80
81         struct GPUUniformBuffer *sampling_ubo;
82         struct GPUTexture *jitter_tx;
83         int cached_sample_num;
84 } e_data = {{NULL}};
85
86 /* Shaders */
87 extern char datatoc_common_hair_lib_glsl[];
88
89 extern char datatoc_workbench_prepass_vert_glsl[];
90 extern char datatoc_workbench_prepass_frag_glsl[];
91 extern char datatoc_workbench_cavity_frag_glsl[];
92 extern char datatoc_workbench_deferred_composite_frag_glsl[];
93
94 extern char datatoc_workbench_shadow_vert_glsl[];
95 extern char datatoc_workbench_shadow_geom_glsl[];
96 extern char datatoc_workbench_shadow_caps_geom_glsl[];
97 extern char datatoc_workbench_shadow_debug_frag_glsl[];
98
99 extern char datatoc_workbench_background_lib_glsl[];
100 extern char datatoc_workbench_cavity_lib_glsl[];
101 extern char datatoc_workbench_common_lib_glsl[];
102 extern char datatoc_workbench_data_lib_glsl[];
103 extern char datatoc_workbench_object_outline_lib_glsl[];
104 extern char datatoc_workbench_world_light_lib_glsl[];
105
106 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
107 {
108         char *str = NULL;
109
110         DynStr *ds = BLI_dynstr_new();
111
112         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
113         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
114         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
115
116         if ((wpd->shading.light & V3D_LIGHTING_STUDIO) || (wpd->shading.flag & V3D_SHADING_SPECULAR_HIGHLIGHT)) {
117                 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
118         }
119         if (wpd->shading.flag & V3D_SHADING_OBJECT_OUTLINE) {
120                 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
121         }
122
123         BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
124
125         str = BLI_dynstr_get_cstring(ds);
126         BLI_dynstr_free(ds);
127         return str;
128 }
129
130 static char *workbench_build_prepass_frag(void)
131 {
132         char *str = NULL;
133
134         DynStr *ds = BLI_dynstr_new();
135
136         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
137         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
138         BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
139
140         str = BLI_dynstr_get_cstring(ds);
141         BLI_dynstr_free(ds);
142         return str;
143 }
144
145 static char *workbench_build_prepass_vert(void)
146 {
147         char *str = NULL;
148
149         DynStr *ds = BLI_dynstr_new();
150
151         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
152         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
153
154         str = BLI_dynstr_get_cstring(ds);
155         BLI_dynstr_free(ds);
156         return str;
157 }
158
159 static char *workbench_build_cavity_frag(void)
160 {
161         char *str = NULL;
162
163         DynStr *ds = BLI_dynstr_new();
164
165         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
166         BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
167         BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
168
169         str = BLI_dynstr_get_cstring(ds);
170         BLI_dynstr_free(ds);
171         return str;
172 }
173
174 static void ensure_deferred_shaders(WORKBENCH_PrivateData *wpd, int index, int drawtype, bool is_hair)
175 {
176         if (e_data.prepass_sh_cache[index] == NULL) {
177                 char *defines = workbench_material_build_defines(wpd, drawtype, is_hair);
178                 char *composite_frag = workbench_build_composite_frag(wpd);
179                 char *prepass_vert = workbench_build_prepass_vert();
180                 char *prepass_frag = workbench_build_prepass_frag();
181                 e_data.prepass_sh_cache[index] = DRW_shader_create(
182                         prepass_vert, NULL,
183                         prepass_frag, defines);
184                 if (drawtype == OB_SOLID && !is_hair) {
185                         e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
186                 }
187                 MEM_freeN(prepass_vert);
188                 MEM_freeN(prepass_frag);
189                 MEM_freeN(composite_frag);
190                 MEM_freeN(defines);
191         }
192 }
193
194 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
195 {
196         int index_solid = workbench_material_get_shader_index(wpd, OB_SOLID, false);
197         int index_solid_hair = workbench_material_get_shader_index(wpd, OB_SOLID, true);
198         int index_texture = workbench_material_get_shader_index(wpd, OB_TEXTURE, false);
199         int index_texture_hair = workbench_material_get_shader_index(wpd, OB_TEXTURE, true);
200
201         ensure_deferred_shaders(wpd, index_solid, OB_SOLID, false);
202         ensure_deferred_shaders(wpd, index_solid_hair, OB_SOLID, true);
203         ensure_deferred_shaders(wpd, index_texture, OB_TEXTURE, false);
204         ensure_deferred_shaders(wpd, index_texture_hair, OB_TEXTURE, true);
205
206         wpd->prepass_solid_sh = e_data.prepass_sh_cache[index_solid];
207         wpd->prepass_solid_hair_sh = e_data.prepass_sh_cache[index_solid_hair];
208         wpd->prepass_texture_sh = e_data.prepass_sh_cache[index_texture];
209         wpd->prepass_texture_hair_sh = e_data.prepass_sh_cache[index_texture_hair];
210         wpd->composite_sh = e_data.composite_sh_cache[index_solid];
211 }
212
213
214 /* Using Hammersley distribution */
215 static float *create_disk_samples(int num_samples)
216 {
217         /* vec4 to ensure memory alignment. */
218         float (*texels)[4] = MEM_mallocN(sizeof(float[4]) * num_samples, "concentric_tex");
219         const float num_samples_inv = 1.0f / num_samples;
220
221         for (int i = 0; i < num_samples; i++) {
222                 float r = (i + 0.5f) * num_samples_inv;
223                 double dphi;
224                 BLI_hammersley_1D(i, &dphi);
225
226                 float phi = (float)dphi * 2.0f * M_PI;
227                 texels[i][0] = cosf(phi);
228                 texels[i][1] = sinf(phi);
229                 /* This deliberatly distribute more samples
230                  * at the center of the disk (and thus the shadow). */
231                 texels[i][2] = r;
232         }
233
234         return (float *)texels;
235 }
236
237 static struct GPUTexture *create_jitter_texture(int num_samples)
238 {
239         float jitter[64 * 64][3];
240         const float num_samples_inv = 1.0f / num_samples;
241
242         for (int i = 0; i < 64 * 64; i++) {
243                 float phi = blue_noise[i][0] * 2.0f * M_PI;
244                 /* This rotate the sample per pixels */
245                 jitter[i][0] = cosf(phi);
246                 jitter[i][1] = sinf(phi);
247                 /* This offset the sample along it's direction axis (reduce banding) */
248                 float bn = blue_noise[i][1] - 0.5f;
249                 CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
250                 jitter[i][2] = bn * num_samples_inv;
251         }
252
253         UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
254
255         return DRW_texture_create_2D(64, 64, GPU_RGB16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
256 }
257 /* Functions */
258
259
260 static void workbench_init_object_data(ObjectEngineData *engine_data)
261 {
262         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)engine_data;
263         data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
264         data->shadow_bbox_dirty = true;
265 }
266
267 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
268 {
269         WORKBENCH_FramebufferList *fbl = vedata->fbl;
270         WORKBENCH_StorageList *stl = vedata->stl;
271         WORKBENCH_PassList *psl = vedata->psl;
272         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
273
274         if (!e_data.next_object_id) {
275                 memset(e_data.prepass_sh_cache,   0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
276                 memset(e_data.composite_sh_cache, 0x00, sizeof(struct GPUShader *) * MAX_SHADERS);
277                 e_data.next_object_id = 1;
278 #ifdef DEBUG_SHADOW_VOLUME
279                 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
280 #else
281                 const char *shadow_frag = NULL;
282 #endif
283                 e_data.shadow_pass_sh = DRW_shader_create(
284                         datatoc_workbench_shadow_vert_glsl,
285                         datatoc_workbench_shadow_geom_glsl,
286                         shadow_frag,
287                         "#define SHADOW_PASS\n"
288                         "#define DOUBLE_MANIFOLD\n");
289                 e_data.shadow_pass_manifold_sh = DRW_shader_create(
290                         datatoc_workbench_shadow_vert_glsl,
291                         datatoc_workbench_shadow_geom_glsl,
292                         shadow_frag,
293                         "#define SHADOW_PASS\n");
294                 e_data.shadow_fail_sh = DRW_shader_create(
295                         datatoc_workbench_shadow_vert_glsl,
296                         datatoc_workbench_shadow_geom_glsl,
297                         shadow_frag,
298                         "#define SHADOW_FAIL\n"
299                         "#define DOUBLE_MANIFOLD\n");
300                 e_data.shadow_fail_manifold_sh = DRW_shader_create(
301                         datatoc_workbench_shadow_vert_glsl,
302                         datatoc_workbench_shadow_geom_glsl,
303                         shadow_frag,
304                         "#define SHADOW_FAIL\n");
305                 e_data.shadow_caps_sh = DRW_shader_create(
306                         datatoc_workbench_shadow_vert_glsl,
307                         datatoc_workbench_shadow_caps_geom_glsl,
308                         shadow_frag,
309                         "#define SHADOW_FAIL\n"
310                         "#define DOUBLE_MANIFOLD\n");
311                 e_data.shadow_caps_manifold_sh = DRW_shader_create(
312                         datatoc_workbench_shadow_vert_glsl,
313                         datatoc_workbench_shadow_caps_geom_glsl,
314                         shadow_frag,
315                         "#define SHADOW_FAIL\n");
316
317                 char *cavity_frag = workbench_build_cavity_frag();
318                 e_data.cavity_sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
319                 MEM_freeN(cavity_frag);
320         }
321
322         if (!stl->g_data) {
323                 /* Alloc transient pointers */
324                 stl->g_data = MEM_mallocN(sizeof(*stl->g_data), __func__);
325         }
326
327         WORKBENCH_PrivateData *wpd = stl->g_data;
328         workbench_private_data_init(wpd);
329
330         {
331                 const float *viewport_size = DRW_viewport_size_get();
332                 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
333                 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R32UI, &draw_engine_workbench_solid);
334                 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
335                 e_data.cavity_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
336                 e_data.specular_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
337                 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
338                         size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_solid);
339
340                 if (NORMAL_ENCODING_ENABLED()) {
341                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
342                                 size[0], size[1], GPU_RG16, &draw_engine_workbench_solid);
343                 }
344                 else {
345                         e_data.normal_buffer_tx = DRW_texture_pool_query_2D(
346                                 size[0], size[1], GPU_RGBA32F, &draw_engine_workbench_solid);
347                 }
348
349                 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
350                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
351                         GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
352                         GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
353                         GPU_ATTACHMENT_TEXTURE(e_data.specular_buffer_tx),
354                         GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
355                 });
356                 GPU_framebuffer_ensure_config(&fbl->cavity_fb, {
357                         GPU_ATTACHMENT_NONE,
358                         GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
359                 });
360                 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
361                         GPU_ATTACHMENT_TEXTURE(dtxl->depth),
362                         GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
363                 });
364         }
365
366         {
367                 const DRWContextState *draw_ctx = DRW_context_state_get();
368                 Scene *scene = draw_ctx->scene;
369                 /* AO Samples Tex */
370                 const int ssao_samples = scene->display.matcap_ssao_samples;
371                 if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
372                         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
373                         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
374                 }
375
376                 if (e_data.sampling_ubo == NULL) {
377                         float *samples = create_disk_samples(ssao_samples);
378                         e_data.jitter_tx = create_jitter_texture(ssao_samples);
379                         e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
380                         e_data.cached_sample_num = ssao_samples;
381                         MEM_freeN(samples);
382                 }
383         }
384
385         /* Prepass */
386         {
387                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
388                 psl->prepass_pass = DRW_pass_create("Prepass", state);
389                 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
390         }
391
392         {
393                 int state = DRW_STATE_WRITE_COLOR;
394                 psl->cavity_pass = DRW_pass_create("Cavity", state);
395                 DRWShadingGroup *grp = DRW_shgroup_create(e_data.cavity_sh, psl->cavity_pass);
396                 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
397                 DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
398                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
399
400                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
401                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
402                 DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
403                 DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
404                 DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
405                 DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
406                 DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
407                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
408         }
409 }
410
411 void workbench_deferred_engine_free()
412 {
413         for (int index = 0; index < MAX_SHADERS; index++) {
414                 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
415                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
416         }
417         DRW_SHADER_FREE_SAFE(e_data.cavity_sh);
418         DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
419         DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
420
421         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
422         DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
423         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
424         DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
425         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
426         DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
427
428 }
429
430 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
431 {
432         DRW_shgroup_uniform_texture_ref(grp, "colorBuffer", &e_data.color_buffer_tx);
433         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
434         if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
435                 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
436         }
437         if (CAVITY_ENABLED(wpd)) {
438                 DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
439         }
440         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || MATCAP_ENABLED(wpd)) {
441                 DRW_shgroup_uniform_texture_ref(grp, "specularBuffer", &e_data.specular_buffer_tx);
442
443 #if 0
444                 float invwinmat[4][4];
445                 DRW_viewport_matrix_get(invwinmat, DRW_MAT_WININV);
446
447                 copy_v4_fl4(e_data.screenvecs[0],  1.0f, -1.0f, 0.0f, 1.0f);
448                 copy_v4_fl4(e_data.screenvecs[1], -1.0f,  1.0f, 0.0f, 1.0f);
449                 copy_v4_fl4(e_data.screenvecs[2], -1.0f, -1.0f, 0.0f, 1.0f);
450                 for (int i = 0; i < 3; i++) {
451                         mul_m4_v4(invwinmat, e_data.screenvecs[i]);
452                         e_data.screenvecs[i][0] /= e_data.screenvecs[i][3]; /* perspective divide */
453                         e_data.screenvecs[i][1] /= e_data.screenvecs[i][3]; /* perspective divide */
454                         e_data.screenvecs[i][2] /= e_data.screenvecs[i][3]; /* perspective divide */
455                         e_data.screenvecs[i][3] = 1.0f;
456                 }
457                 sub_v3_v3(e_data.screenvecs[0], e_data.screenvecs[2]);
458                 sub_v3_v3(e_data.screenvecs[1], e_data.screenvecs[2]);
459                 DRW_shgroup_uniform_vec4(grp, "screenvecs[0]", e_data.screenvecs[0], 3);
460 #endif
461         }
462         DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
463         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
464
465         if (STUDIOLIGHT_ORIENTATION_VIEWNORMAL_ENABLED(wpd)) {
466                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECTANGULAR_RADIANCE_GPUTEXTURE);
467                 DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirectangular_radiance_gputexture);                DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirectangular_radiance_gputexture);
468         }
469
470         workbench_material_set_normal_world_matrix(grp, wpd, e_data.normal_world_matrix);
471 }
472
473 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
474 {
475         WORKBENCH_StorageList *stl = vedata->stl;
476         WORKBENCH_PassList *psl = vedata->psl;
477         WORKBENCH_PrivateData *wpd = stl->g_data;
478         DRWShadingGroup *grp;
479         const DRWContextState *draw_ctx = DRW_context_state_get();
480         static float light_multiplier = 1.0f;
481
482
483         Scene *scene = draw_ctx->scene;
484
485         select_deferred_shaders(wpd);
486         /* Deferred Mix Pass */
487         {
488                 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
489
490                 e_data.display.shadow_shift = scene->display.shadow_shift;
491                 copy_v3_v3(e_data.light_direction_vs, wpd->world_data.lights[0].light_direction_vs);
492
493                 if (SHADOW_ENABLED(wpd)) {
494                         psl->composite_pass = DRW_pass_create(
495                                 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL);
496                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
497                         workbench_composite_uniforms(wpd, grp);
498                         DRW_shgroup_stencil_mask(grp, 0x00);
499                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &light_multiplier, 1);
500                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
501                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
502                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
503
504                         /* Stencil Shadow passes. */
505 #ifdef DEBUG_SHADOW_VOLUME
506                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
507                         DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
508 #else
509                         DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
510                         DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
511 #endif
512                         psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
513                         psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
514                         psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
515                         psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
516                         psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
517                         psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
518
519 #ifndef DEBUG_SHADOW_VOLUME
520                         grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
521                         DRW_shgroup_stencil_mask(grp, 0xFF);
522                         grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
523                         DRW_shgroup_stencil_mask(grp, 0xFF);
524                         grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
525                         DRW_shgroup_stencil_mask(grp, 0xFF);
526                         grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
527                         DRW_shgroup_stencil_mask(grp, 0xFF);
528                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
529                         DRW_shgroup_stencil_mask(grp, 0xFF);
530                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
531                         DRW_shgroup_stencil_mask(grp, 0xFF);
532
533                         psl->composite_shadow_pass = DRW_pass_create("Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL);
534                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
535                         DRW_shgroup_stencil_mask(grp, 0x00);
536                         workbench_composite_uniforms(wpd, grp);
537                         DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
538                         DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
539                         DRW_shgroup_uniform_float(grp, "shadowShift", &scene->display.shadow_shift, 1);
540                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
541 #endif
542
543                         studiolight_update_light(wpd, e_data.display.light_direction);
544                 }
545                 else {
546                         psl->composite_pass = DRW_pass_create(
547                                 "Composite", DRW_STATE_WRITE_COLOR);
548                         grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
549                         workbench_composite_uniforms(wpd, grp);
550                         DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
551                 }
552         }
553 }
554
555 static WORKBENCH_MaterialData *get_or_create_material_data(
556         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int drawtype)
557 {
558         WORKBENCH_StorageList *stl = vedata->stl;
559         WORKBENCH_PassList *psl = vedata->psl;
560         WORKBENCH_PrivateData *wpd = stl->g_data;
561         WORKBENCH_MaterialData *material;
562         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
563                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
564         WORKBENCH_MaterialData material_template;
565
566         /* Solid */
567         workbench_material_update_data(wpd, ob, mat, &material_template);
568         material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
569         material_template.drawtype = drawtype;
570         material_template.ima = ima;
571         uint hash = workbench_material_get_hash(&material_template);
572
573         material = BLI_ghash_lookup(wpd->material_hash, SET_UINT_IN_POINTER(hash));
574         if (material == NULL) {
575                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
576                 material->shgrp = DRW_shgroup_create(
577                         drawtype == OB_SOLID ? wpd->prepass_solid_sh : wpd->prepass_texture_sh, psl->prepass_pass);
578                 DRW_shgroup_stencil_mask(material->shgrp, 0xFF);
579                 material->object_id = material_template.object_id;
580                 copy_v4_v4(material->material_data.diffuse_color, material_template.material_data.diffuse_color);
581                 copy_v4_v4(material->material_data.specular_color, material_template.material_data.specular_color);
582                 material->material_data.roughness = material_template.material_data.roughness;
583                 switch (drawtype) {
584                         case OB_SOLID:
585                                 break;
586
587                         case OB_TEXTURE:
588                         {
589                                 GPUTexture *tex = GPU_texture_from_blender(ima, NULL, GL_TEXTURE_2D, false, false, false);
590                                 DRW_shgroup_uniform_texture(material->shgrp, "image", tex);
591                                 break;
592                         }
593                 }
594                 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
595                 material->material_ubo = DRW_uniformbuffer_create(sizeof(WORKBENCH_UBO_Material), &material->material_data);
596                 DRW_shgroup_uniform_block(material->shgrp, "material_block", material->material_ubo);
597
598                 BLI_ghash_insert(wpd->material_hash, SET_UINT_IN_POINTER(hash), material);
599         }
600         return material;
601 }
602
603 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
604 {
605         WORKBENCH_StorageList *stl = vedata->stl;
606         WORKBENCH_PassList *psl = vedata->psl;
607         WORKBENCH_PrivateData *wpd = stl->g_data;
608         const DRWContextState *draw_ctx = DRW_context_state_get();
609         if (ob == draw_ctx->object_edit) {
610                 return;
611         }
612         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
613                 if (md->type != eModifierType_ParticleSystem) {
614                         continue;
615                 }
616                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
617                 if (!psys_check_enabled(ob, psys, false)) {
618                         continue;
619                 }
620                 if (!DRW_check_psys_visible_within_active_context(ob, psys)) {
621                         continue;
622                 }
623                 ParticleSettings *part = psys->part;
624                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
625
626                 if (draw_as == PART_DRAW_PATH) {
627                         Image *image = NULL;
628                         Material *mat = give_current_material(ob, part->omat);
629                         int mat_drawtype = OB_SOLID;
630
631                         if (wpd->drawtype == OB_TEXTURE) {
632                                 ED_object_get_active_image(ob, part->omat, &image, NULL, NULL, NULL);
633                                 /* use OB_SOLID when no texture could be determined */
634                                 if (image) {
635                                         mat_drawtype = OB_TEXTURE;
636                                 }
637                         }
638
639                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
640
641                         struct GPUShader *shader = (mat_drawtype == OB_SOLID)
642                                                    ? wpd->prepass_solid_hair_sh
643                                                    : wpd->prepass_texture_hair_sh;
644                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
645                                                         ob, psys, md,
646                                                         psl->prepass_hair_pass,
647                                                         shader);
648                         DRW_shgroup_stencil_mask(shgrp, 0xFF);
649                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
650                         DRW_shgroup_uniform_block(shgrp, "material_block", material->material_ubo);
651                         if (image) {
652                                 GPUTexture *tex = GPU_texture_from_blender(image, NULL, GL_TEXTURE_2D, false, false, false);
653                                 DRW_shgroup_uniform_texture(shgrp, "image", tex);
654                         }
655                 }
656         }
657 }
658
659 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
660 {
661         WORKBENCH_StorageList *stl = vedata->stl;
662         WORKBENCH_PassList *psl = vedata->psl;
663         WORKBENCH_PrivateData *wpd = stl->g_data;
664         if (!DRW_object_is_renderable(ob))
665                 return;
666
667         if (ob->type == OB_MESH) {
668                 workbench_cache_populate_particles(vedata, ob);
669         }
670
671         if (!DRW_check_object_visible_within_active_context(ob)) {
672                 return;
673         }
674
675         WORKBENCH_MaterialData *material;
676         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT)) {
677                 const DRWContextState *draw_ctx = DRW_context_state_get();
678                 const bool is_active = (ob == draw_ctx->obact);
679                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
680                 bool is_drawn = false;
681                 if (!is_sculpt_mode && wpd->drawtype == OB_TEXTURE && ob->type == OB_MESH) {
682                         const Mesh *me = ob->data;
683                         if (me->mloopuv) {
684                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
685                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
686                                 struct Gwn_Batch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob) : NULL;
687                                 if (materials_len > 0 && geom_array) {
688                                         for (int i = 0; i < materials_len; i++) {
689                                                 Material *mat = give_current_material(ob, i + 1);
690                                                 Image *image;
691                                                 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
692                                                 /* use OB_SOLID when no texture could be determined */
693                                                 int mat_drawtype = OB_SOLID;
694                                                 if (image) {
695                                                         mat_drawtype = OB_TEXTURE;
696                                                 }
697                                                 material = get_or_create_material_data(vedata, ob, mat, image, mat_drawtype);
698                                                 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
699                                         }
700                                         is_drawn = true;
701                                 }
702                         }
703                 }
704
705                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
706                 if (!is_drawn) {
707                         if ((wpd->shading.color_type != V3D_SHADING_MATERIAL_COLOR) || is_sculpt_mode) {
708                                 /* No material split needed */
709                                 struct Gwn_Batch *geom = DRW_cache_object_surface_get(ob);
710                                 if (geom) {
711                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, OB_SOLID);
712                                         if (is_sculpt_mode) {
713                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
714                                         }
715                                         else {
716                                                 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
717                                         }
718                                 }
719                         }
720                         else { /* MATERIAL colors */
721                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
722                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
723                                 for (int i = 0; i < materials_len; i++) {
724                                         gpumat_array[i] = NULL;
725                                 }
726
727                                 struct Gwn_Batch **mat_geom = DRW_cache_object_surface_material_get(
728                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
729                                 if (mat_geom) {
730                                         for (int i = 0; i < materials_len; ++i) {
731                                                 Material *mat = give_current_material(ob, i + 1);
732                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, OB_SOLID);
733                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
734                                         }
735                                 }
736                         }
737                 }
738
739                 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW) > 0) {
740                         bool is_manifold;
741                         struct Gwn_Batch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
742                         if (geom_shadow) {
743                                 if (is_sculpt_mode) {
744                                         /* Currently unsupported in sculpt mode. We could revert to the slow
745                                          * method in this case but i'm not sure if it's a good idea given that
746                                          * sculped meshes are heavy to begin with. */
747                                         // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
748                                 }
749                                 else {
750                                         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_object_engine_data_ensure(
751                                                 ob, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
752
753                                         if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
754
755                                                 invert_m4_m4(ob->imat, ob->obmat);
756                                                 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
757
758                                                 DRWShadingGroup *grp;
759                                                 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
760
761                                                 if (use_shadow_pass_technique) {
762                                                         if (is_manifold) {
763                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
764                                                         }
765                                                         else {
766                                                                 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
767                                                         }
768                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
769                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
770                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
771 #ifdef DEBUG_SHADOW_VOLUME
772                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
773 #endif
774                                                 }
775                                                 else {
776                                                         float extrude_distance = studiolight_object_shadow_distance(wpd, ob, engine_object_data);
777
778                                                         /* TODO(fclem): only use caps if they are in the view frustum. */
779                                                         const bool need_caps = true;
780                                                         if (need_caps) {
781                                                                 if (is_manifold) {
782                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
783                                                                 }
784                                                                 else {
785                                                                         grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
786                                                                 }
787                                                                 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
788                                                                 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
789                                                                 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
790                                                         }
791
792                                                         if (is_manifold) {
793                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
794                                                         }
795                                                         else {
796                                                                 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
797                                                         }
798                                                         DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
799                                                         DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
800                                                         DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
801 #ifdef DEBUG_SHADOW_VOLUME
802                                                         DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
803 #endif
804                                                 }
805                                         }
806                                 }
807                         }
808                 }
809         }
810 }
811
812 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
813 {
814 }
815
816 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
817 {
818         WORKBENCH_StorageList *stl = vedata->stl;
819         WORKBENCH_FramebufferList *fbl = vedata->fbl;
820         WORKBENCH_PrivateData *wpd = stl->g_data;
821         const float clear_depth = 1.0f;
822         const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
823         uint clear_stencil = 0xFF;
824
825         DRW_stats_group_start("Clear Background");
826         GPU_framebuffer_bind(fbl->prepass_fb);
827         int clear_bits = GPU_DEPTH_BIT | GPU_COLOR_BIT;
828         SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
829         GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
830         DRW_stats_group_end();
831 }
832
833 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
834 {
835         WORKBENCH_PassList *psl = vedata->psl;
836         WORKBENCH_StorageList *stl = vedata->stl;
837         WORKBENCH_FramebufferList *fbl = vedata->fbl;
838         WORKBENCH_PrivateData *wpd = stl->g_data;
839         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
840
841         /* clear in background */
842         GPU_framebuffer_bind(fbl->prepass_fb);
843         DRW_draw_pass(psl->prepass_pass);
844         DRW_draw_pass(psl->prepass_hair_pass);
845
846         if (CAVITY_ENABLED(wpd)) {
847                 GPU_framebuffer_bind(fbl->cavity_fb);
848                 DRW_draw_pass(psl->cavity_pass);
849         }
850
851         if (SHADOW_ENABLED(wpd)) {
852 #ifdef DEBUG_SHADOW_VOLUME
853                 GPU_framebuffer_bind(fbl->composite_fb);
854                 DRW_draw_pass(psl->composite_pass);
855 #else
856                 GPU_framebuffer_bind(dfbl->depth_only_fb);
857 #endif
858                 DRW_draw_pass(psl->shadow_depth_pass_pass);
859                 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
860                 DRW_draw_pass(psl->shadow_depth_fail_pass);
861                 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
862                 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
863                 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
864 #ifndef DEBUG_SHADOW_VOLUME
865                 GPU_framebuffer_bind(fbl->composite_fb);
866                 DRW_draw_pass(psl->composite_pass);
867                 DRW_draw_pass(psl->composite_shadow_pass);
868 #endif
869         }
870         else {
871                 GPU_framebuffer_bind(fbl->composite_fb);
872                 DRW_draw_pass(psl->composite_pass);
873         }
874
875         GPU_framebuffer_bind(dfbl->color_only_fb);
876         DRW_transform_to_display(e_data.composite_buffer_tx);
877
878         workbench_private_data_free(wpd);
879 }