2 * Copyright 2016, Blender Foundation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Contributor(s): Blender Institute
22 /** \file workbench_deferred.c
23 * \ingroup draw_engine
26 #include "workbench_private.h"
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
34 #include "BLI_string_utils.h"
37 #include "BKE_modifier.h"
38 #include "BKE_particle.h"
40 #include "DNA_image_types.h"
41 #include "DNA_mesh_types.h"
42 #include "DNA_modifier_types.h"
43 #include "DNA_node_types.h"
45 #include "ED_uvedit.h"
47 #include "GPU_shader.h"
48 #include "GPU_texture.h"
49 #include "GPU_extensions.h"
51 #include "../eevee/eevee_lut.h" /* TODO find somewhere to share blue noise Table */
53 /* *********** STATIC *********** */
55 /* #define DEBUG_SHADOW_VOLUME */
57 #ifdef DEBUG_SHADOW_VOLUME
58 # include "draw_debug.h"
62 struct GPUShader *prepass_sh_cache[MAX_PREPASS_SHADERS];
63 struct GPUShader *composite_sh_cache[MAX_COMPOSITE_SHADERS];
64 struct GPUShader *cavity_sh[MAX_CAVITY_SHADERS];
65 struct GPUShader *background_sh[2];
66 struct GPUShader *ghost_resolve_sh;
67 struct GPUShader *shadow_fail_sh;
68 struct GPUShader *shadow_fail_manifold_sh;
69 struct GPUShader *shadow_pass_sh;
70 struct GPUShader *shadow_pass_manifold_sh;
71 struct GPUShader *shadow_caps_sh;
72 struct GPUShader *shadow_caps_manifold_sh;
74 struct GPUTexture *ghost_depth_tx; /* ref only, not alloced */
75 struct GPUTexture *object_id_tx; /* ref only, not alloced */
76 struct GPUTexture *color_buffer_tx; /* ref only, not alloced */
77 struct GPUTexture *cavity_buffer_tx; /* ref only, not alloced */
78 struct GPUTexture *metallic_buffer_tx; /* ref only, not alloced */
79 struct GPUTexture *normal_buffer_tx; /* ref only, not alloced */
80 struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
82 SceneDisplay display; /* world light direction for shadows */
85 struct GPUUniformBuffer *sampling_ubo;
86 struct GPUTexture *jitter_tx;
87 int cached_sample_num;
91 extern char datatoc_common_hair_lib_glsl[];
93 extern char datatoc_workbench_prepass_vert_glsl[];
94 extern char datatoc_workbench_prepass_frag_glsl[];
95 extern char datatoc_workbench_cavity_frag_glsl[];
96 extern char datatoc_workbench_deferred_composite_frag_glsl[];
97 extern char datatoc_workbench_deferred_background_frag_glsl[];
98 extern char datatoc_workbench_ghost_resolve_frag_glsl[];
100 extern char datatoc_workbench_shadow_vert_glsl[];
101 extern char datatoc_workbench_shadow_geom_glsl[];
102 extern char datatoc_workbench_shadow_caps_geom_glsl[];
103 extern char datatoc_workbench_shadow_debug_frag_glsl[];
105 extern char datatoc_workbench_background_lib_glsl[];
106 extern char datatoc_workbench_cavity_lib_glsl[];
107 extern char datatoc_workbench_common_lib_glsl[];
108 extern char datatoc_workbench_data_lib_glsl[];
109 extern char datatoc_workbench_object_outline_lib_glsl[];
110 extern char datatoc_workbench_curvature_lib_glsl[];
111 extern char datatoc_workbench_world_light_lib_glsl[];
113 extern char datatoc_gpu_shader_depth_only_frag_glsl[];
115 static char *workbench_build_composite_frag(WORKBENCH_PrivateData *wpd)
119 DynStr *ds = BLI_dynstr_new();
121 BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
122 BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
123 BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
125 if (!FLAT_ENABLED(wpd)) {
126 BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
128 if (OBJECT_OUTLINE_ENABLED(wpd)) {
129 BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
131 if (CURVATURE_ENABLED(wpd)) {
132 BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
135 BLI_dynstr_append(ds, datatoc_workbench_deferred_composite_frag_glsl);
137 str = BLI_dynstr_get_cstring(ds);
142 static char *workbench_build_prepass_frag(void)
146 DynStr *ds = BLI_dynstr_new();
148 BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
149 BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
150 BLI_dynstr_append(ds, datatoc_workbench_prepass_frag_glsl);
152 str = BLI_dynstr_get_cstring(ds);
157 static char *workbench_build_prepass_vert(bool is_hair)
161 return BLI_strdup(datatoc_workbench_prepass_vert_glsl);
164 DynStr *ds = BLI_dynstr_new();
166 BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
167 BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
169 str = BLI_dynstr_get_cstring(ds);
174 static char *workbench_build_cavity_frag(bool cavity, bool curvature, bool high_dpi)
178 DynStr *ds = BLI_dynstr_new();
181 BLI_dynstr_append(ds, "#define USE_CAVITY\n");
184 BLI_dynstr_append(ds, "#define USE_CURVATURE\n");
187 BLI_dynstr_append(ds, "#define CURVATURE_OFFSET 2\n");
189 if (NORMAL_ENCODING_ENABLED()) {
190 BLI_dynstr_append(ds, "#define WORKBENCH_ENCODE_NORMALS\n");
192 BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
193 BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
194 BLI_dynstr_append(ds, datatoc_workbench_cavity_frag_glsl);
195 BLI_dynstr_append(ds, datatoc_workbench_cavity_lib_glsl);
197 str = BLI_dynstr_get_cstring(ds);
202 static GPUShader *workbench_cavity_shader_get(bool cavity, bool curvature)
204 const bool high_dpi = (U.pixelsize > 1.5f);
206 SET_FLAG_FROM_TEST(index, cavity, 1 << 0);
207 SET_FLAG_FROM_TEST(index, curvature, 1 << 1);
208 SET_FLAG_FROM_TEST(index, high_dpi, 1 << 2);
210 GPUShader **sh = &e_data.cavity_sh[index];
212 char *cavity_frag = workbench_build_cavity_frag(cavity, curvature, high_dpi);
213 *sh = DRW_shader_create_fullscreen(cavity_frag, NULL);
214 MEM_freeN(cavity_frag);
219 static GPUShader *ensure_deferred_prepass_shader(WORKBENCH_PrivateData *wpd, bool use_textures, bool is_hair)
221 int index = workbench_material_get_prepass_shader_index(wpd, use_textures, is_hair);
222 if (e_data.prepass_sh_cache[index] == NULL) {
223 char *defines = workbench_material_build_defines(wpd, use_textures, is_hair);
224 char *prepass_vert = workbench_build_prepass_vert(is_hair);
225 char *prepass_frag = workbench_build_prepass_frag();
226 e_data.prepass_sh_cache[index] = DRW_shader_create(
228 prepass_frag, defines);
229 MEM_freeN(prepass_vert);
230 MEM_freeN(prepass_frag);
233 return e_data.prepass_sh_cache[index];
236 static GPUShader *ensure_deferred_composite_shader(WORKBENCH_PrivateData *wpd)
238 int index = workbench_material_get_composite_shader_index(wpd);
239 if (e_data.composite_sh_cache[index] == NULL) {
240 char *defines = workbench_material_build_defines(wpd, false, false);
241 char *composite_frag = workbench_build_composite_frag(wpd);
242 e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
243 MEM_freeN(composite_frag);
246 return e_data.composite_sh_cache[index];
249 static GPUShader *ensure_background_shader(WORKBENCH_PrivateData *wpd)
251 const int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
252 if (e_data.background_sh[index] == NULL) {
253 const char *defines = (index) ? "#define V3D_SHADING_OBJECT_OUTLINE\n" : NULL;
254 char *frag = BLI_string_joinN(
255 datatoc_workbench_data_lib_glsl,
256 datatoc_workbench_common_lib_glsl,
257 datatoc_workbench_background_lib_glsl,
258 datatoc_workbench_object_outline_lib_glsl,
259 datatoc_workbench_deferred_background_frag_glsl);
260 e_data.background_sh[index] = DRW_shader_create_fullscreen(frag, defines);
263 return e_data.background_sh[index];
266 static void select_deferred_shaders(WORKBENCH_PrivateData *wpd)
268 wpd->prepass_solid_sh = ensure_deferred_prepass_shader(wpd, false, false);
269 wpd->prepass_solid_hair_sh = ensure_deferred_prepass_shader(wpd, false, true);
270 wpd->prepass_texture_sh = ensure_deferred_prepass_shader(wpd, true, false);
271 wpd->prepass_texture_hair_sh = ensure_deferred_prepass_shader(wpd, true, true);
272 wpd->composite_sh = ensure_deferred_composite_shader(wpd);
273 wpd->background_sh = ensure_background_shader(wpd);
276 /* Using Hammersley distribution */
277 static float *create_disk_samples(int num_samples, int num_iterations)
279 /* vec4 to ensure memory alignment. */
280 const int total_samples = num_samples * num_iterations;
281 float(*texels)[4] = MEM_mallocN(sizeof(float[4]) * total_samples, __func__);
282 const float num_samples_inv = 1.0f / num_samples;
284 for (int i = 0; i < total_samples; i++) {
285 float it_add = (i / num_samples) * 0.499f;
286 float r = fmodf((i + 0.5f + it_add) * num_samples_inv, 1.0f);
288 BLI_hammersley_1D(i, &dphi);
290 float phi = (float)dphi * 2.0f * M_PI + it_add;
291 texels[i][0] = cosf(phi);
292 texels[i][1] = sinf(phi);
293 /* This deliberately distribute more samples
294 * at the center of the disk (and thus the shadow). */
298 return (float *)texels;
301 static struct GPUTexture *create_jitter_texture(int num_samples)
303 float jitter[64 * 64][3];
304 const float num_samples_inv = 1.0f / num_samples;
306 for (int i = 0; i < 64 * 64; i++) {
307 float phi = blue_noise[i][0] * 2.0f * M_PI;
308 /* This rotate the sample per pixels */
309 jitter[i][0] = cosf(phi);
310 jitter[i][1] = sinf(phi);
311 /* This offset the sample along it's direction axis (reduce banding) */
312 float bn = blue_noise[i][1] - 0.5f;
313 CLAMP(bn, -0.499f, 0.499f); /* fix fireflies */
314 jitter[i][2] = bn * num_samples_inv;
317 UNUSED_VARS(bsdf_split_sum_ggx, btdf_split_sum_ggx, ltc_mag_ggx, ltc_mat_ggx, ltc_disk_integral);
319 return DRW_texture_create_2D(64, 64, GPU_RGB16F, DRW_TEX_FILTER | DRW_TEX_WRAP, &jitter[0][0]);
324 static void workbench_init_object_data(DrawData *dd)
326 WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
327 data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
328 data->shadow_bbox_dirty = true;
331 void workbench_deferred_engine_init(WORKBENCH_Data *vedata)
333 WORKBENCH_FramebufferList *fbl = vedata->fbl;
334 WORKBENCH_StorageList *stl = vedata->stl;
335 WORKBENCH_PassList *psl = vedata->psl;
336 DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
337 const DRWContextState *draw_ctx = DRW_context_state_get();
340 /* Alloc transient pointers */
341 stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
344 stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
345 workbench_effect_info_init(stl->effects);
348 if (!e_data.next_object_id) {
349 memset(e_data.prepass_sh_cache, 0, sizeof(e_data.prepass_sh_cache));
350 memset(e_data.composite_sh_cache, 0, sizeof(e_data.composite_sh_cache));
351 e_data.next_object_id = 1;
352 #ifdef DEBUG_SHADOW_VOLUME
353 const char *shadow_frag = datatoc_workbench_shadow_debug_frag_glsl;
355 const char *shadow_frag = datatoc_gpu_shader_depth_only_frag_glsl;
357 /* TODO only compile on demand */
358 e_data.shadow_pass_sh = DRW_shader_create(
359 datatoc_workbench_shadow_vert_glsl,
360 datatoc_workbench_shadow_geom_glsl,
362 "#define SHADOW_PASS\n"
363 "#define DOUBLE_MANIFOLD\n");
364 e_data.shadow_pass_manifold_sh = DRW_shader_create(
365 datatoc_workbench_shadow_vert_glsl,
366 datatoc_workbench_shadow_geom_glsl,
368 "#define SHADOW_PASS\n");
369 e_data.shadow_fail_sh = DRW_shader_create(
370 datatoc_workbench_shadow_vert_glsl,
371 datatoc_workbench_shadow_geom_glsl,
373 "#define SHADOW_FAIL\n"
374 "#define DOUBLE_MANIFOLD\n");
375 e_data.shadow_fail_manifold_sh = DRW_shader_create(
376 datatoc_workbench_shadow_vert_glsl,
377 datatoc_workbench_shadow_geom_glsl,
379 "#define SHADOW_FAIL\n");
380 e_data.shadow_caps_sh = DRW_shader_create(
381 datatoc_workbench_shadow_vert_glsl,
382 datatoc_workbench_shadow_caps_geom_glsl,
384 "#define SHADOW_FAIL\n"
385 "#define DOUBLE_MANIFOLD\n");
386 e_data.shadow_caps_manifold_sh = DRW_shader_create(
387 datatoc_workbench_shadow_vert_glsl,
388 datatoc_workbench_shadow_caps_geom_glsl,
390 "#define SHADOW_FAIL\n");
392 e_data.ghost_resolve_sh = DRW_shader_create_fullscreen(datatoc_workbench_ghost_resolve_frag_glsl, NULL);
394 workbench_volume_engine_init();
395 workbench_fxaa_engine_init();
396 workbench_taa_engine_init(vedata);
398 WORKBENCH_PrivateData *wpd = stl->g_data;
399 workbench_private_data_init(wpd);
402 const float *viewport_size = DRW_viewport_size_get();
403 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
404 const GPUTextureFormat nor_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16 : GPU_RGBA32F;
405 const GPUTextureFormat comp_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F : GPU_R11F_G11F_B10F;
406 const GPUTextureFormat id_tex_format = OBJECT_ID_PASS_ENABLED(wpd) ? GPU_R32UI : GPU_R8;
408 e_data.object_id_tx = NULL;
409 e_data.color_buffer_tx = NULL;
410 e_data.composite_buffer_tx = NULL;
411 e_data.normal_buffer_tx = NULL;
412 e_data.cavity_buffer_tx = NULL;
414 e_data.composite_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], comp_tex_format, &draw_engine_workbench_solid);
416 if (MATDATA_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
417 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
419 if (OBJECT_ID_PASS_ENABLED(wpd) || GPU_unused_fb_slot_workaround()) {
420 e_data.object_id_tx = DRW_texture_pool_query_2D(size[0], size[1], id_tex_format, &draw_engine_workbench_solid);
422 if (NORMAL_VIEWPORT_PASS_ENABLED(wpd)) {
423 e_data.normal_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], nor_tex_format, &draw_engine_workbench_solid);
425 if (CAVITY_ENABLED(wpd)) {
426 e_data.cavity_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_R16, &draw_engine_workbench_solid);
429 GPU_framebuffer_ensure_config(&fbl->prepass_fb, {
430 GPU_ATTACHMENT_TEXTURE(dtxl->depth),
431 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
432 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
433 GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
435 GPU_framebuffer_ensure_config(&fbl->cavity_fb, {
437 GPU_ATTACHMENT_TEXTURE(e_data.cavity_buffer_tx),
439 GPU_framebuffer_ensure_config(&fbl->composite_fb, {
440 GPU_ATTACHMENT_TEXTURE(dtxl->depth),
441 GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
443 GPU_framebuffer_ensure_config(&fbl->volume_fb, {
445 GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
448 if (!MATDATA_PASS_ENABLED(wpd) && !GPU_unused_fb_slot_workaround()) {
449 e_data.color_buffer_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_RGBA8, &draw_engine_workbench_solid);
452 GPU_framebuffer_ensure_config(&fbl->effect_fb, {
454 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
457 if (OBJECT_ID_PASS_ENABLED(wpd)) {
458 GPU_framebuffer_ensure_config(&fbl->id_clear_fb, {
460 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
466 Scene *scene = draw_ctx->scene;
468 int num_iterations = workbench_taa_calculate_num_iterations(vedata);
470 const int ssao_samples_single_iteration = scene->display.matcap_ssao_samples;
471 const int ssao_samples = MIN2(num_iterations * ssao_samples_single_iteration, 500);
473 if (e_data.sampling_ubo && (e_data.cached_sample_num != ssao_samples)) {
474 DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
475 DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
478 if (e_data.sampling_ubo == NULL) {
479 float *samples = create_disk_samples(ssao_samples_single_iteration, num_iterations);
480 e_data.jitter_tx = create_jitter_texture(ssao_samples);
481 e_data.sampling_ubo = DRW_uniformbuffer_create(sizeof(float[4]) * ssao_samples, samples);
482 e_data.cached_sample_num = ssao_samples;
489 DRWShadingGroup *grp;
490 const bool do_cull = (draw_ctx->v3d && (draw_ctx->v3d->flag2 & V3D_BACKFACE_CULLING));
492 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
493 psl->prepass_pass = DRW_pass_create("Prepass", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
494 psl->prepass_hair_pass = DRW_pass_create("Prepass", state);
496 psl->ghost_prepass_pass = DRW_pass_create("Prepass Ghost", (do_cull) ? state | DRW_STATE_CULL_BACK : state);
497 psl->ghost_prepass_hair_pass = DRW_pass_create("Prepass Ghost", state);
499 psl->ghost_resolve_pass = DRW_pass_create("Resolve Ghost Depth", DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS);
500 grp = DRW_shgroup_create(e_data.ghost_resolve_sh, psl->ghost_resolve_pass);
501 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &e_data.ghost_depth_tx);
502 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
506 workbench_aa_create_pass(vedata, &e_data.color_buffer_tx);
509 if (CAVITY_ENABLED(wpd)) {
510 int state = DRW_STATE_WRITE_COLOR;
511 GPUShader *shader = workbench_cavity_shader_get(SSAO_ENABLED(wpd), CURVATURE_ENABLED(wpd));
512 psl->cavity_pass = DRW_pass_create("Cavity", state);
513 DRWShadingGroup *grp = DRW_shgroup_create(shader, psl->cavity_pass);
514 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
515 DRW_shgroup_uniform_block(grp, "samples_block", e_data.sampling_ubo);
517 if (SSAO_ENABLED(wpd)) {
518 DRW_shgroup_uniform_texture_ref(grp, "depthBuffer", &dtxl->depth);
519 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
520 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
521 DRW_shgroup_uniform_vec4(grp, "ssao_params", wpd->ssao_params, 1);
522 DRW_shgroup_uniform_vec4(grp, "ssao_settings", wpd->ssao_settings, 1);
523 DRW_shgroup_uniform_mat4(grp, "WinMatrix", wpd->winmat);
524 DRW_shgroup_uniform_texture(grp, "ssao_jitter", e_data.jitter_tx);
527 if (CURVATURE_ENABLED(wpd)) {
528 DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
529 DRW_shgroup_uniform_vec2(grp, "curvature_settings", &wpd->world_data.curvature_ridge, 1);
532 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
536 static void workbench_setup_ghost_framebuffer(WORKBENCH_FramebufferList *fbl)
538 const float *viewport_size = DRW_viewport_size_get();
539 const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
541 e_data.ghost_depth_tx = DRW_texture_pool_query_2D(size[0], size[1], GPU_DEPTH_COMPONENT24, &draw_engine_workbench_solid);
543 GPU_framebuffer_ensure_config(&fbl->ghost_prepass_fb, {
544 GPU_ATTACHMENT_TEXTURE(e_data.ghost_depth_tx),
545 GPU_ATTACHMENT_TEXTURE(e_data.color_buffer_tx),
546 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
547 GPU_ATTACHMENT_TEXTURE(e_data.normal_buffer_tx),
551 void workbench_deferred_engine_free(void)
553 for (int index = 0; index < MAX_PREPASS_SHADERS; index++) {
554 DRW_SHADER_FREE_SAFE(e_data.prepass_sh_cache[index]);
556 for (int index = 0; index < MAX_COMPOSITE_SHADERS; index++) {
557 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
559 for (int index = 0; index < MAX_CAVITY_SHADERS; ++index) {
560 DRW_SHADER_FREE_SAFE(e_data.cavity_sh[index]);
562 DRW_SHADER_FREE_SAFE(e_data.ghost_resolve_sh);
563 DRW_UBO_FREE_SAFE(e_data.sampling_ubo);
564 DRW_TEXTURE_FREE_SAFE(e_data.jitter_tx);
565 DRW_SHADER_FREE_SAFE(e_data.background_sh[0]);
566 DRW_SHADER_FREE_SAFE(e_data.background_sh[1]);
568 DRW_SHADER_FREE_SAFE(e_data.shadow_pass_sh);
569 DRW_SHADER_FREE_SAFE(e_data.shadow_pass_manifold_sh);
570 DRW_SHADER_FREE_SAFE(e_data.shadow_fail_sh);
571 DRW_SHADER_FREE_SAFE(e_data.shadow_fail_manifold_sh);
572 DRW_SHADER_FREE_SAFE(e_data.shadow_caps_sh);
573 DRW_SHADER_FREE_SAFE(e_data.shadow_caps_manifold_sh);
575 workbench_volume_engine_free();
576 workbench_fxaa_engine_free();
577 workbench_taa_engine_free();
580 static void workbench_composite_uniforms(WORKBENCH_PrivateData *wpd, DRWShadingGroup *grp)
582 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
583 if (MATDATA_PASS_ENABLED(wpd)) {
584 DRW_shgroup_uniform_texture_ref(grp, "materialBuffer", &e_data.color_buffer_tx);
587 DRW_shgroup_uniform_vec3(grp, "materialSingleColor", wpd->shading.single_color, 1);
589 if (OBJECT_OUTLINE_ENABLED(wpd)) {
590 DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
592 if (NORMAL_VIEWPORT_COMP_PASS_ENABLED(wpd)) {
593 DRW_shgroup_uniform_texture_ref(grp, "normalBuffer", &e_data.normal_buffer_tx);
595 if (CAVITY_ENABLED(wpd)) {
596 DRW_shgroup_uniform_texture_ref(grp, "cavityBuffer", &e_data.cavity_buffer_tx);
598 if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
599 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
601 if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
602 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
604 if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
605 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
606 DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture);
610 void workbench_deferred_cache_init(WORKBENCH_Data *vedata)
612 WORKBENCH_StorageList *stl = vedata->stl;
613 WORKBENCH_PassList *psl = vedata->psl;
614 WORKBENCH_PrivateData *wpd = stl->g_data;
615 DRWShadingGroup *grp;
616 const DRWContextState *draw_ctx = DRW_context_state_get();
618 Scene *scene = draw_ctx->scene;
620 workbench_volume_cache_init(vedata);
622 select_deferred_shaders(wpd);
624 /* Background Pass */
626 psl->background_pass = DRW_pass_create(
627 "Background", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
628 grp = DRW_shgroup_create(wpd->background_sh, psl->background_pass);
629 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
630 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
631 if (OBJECT_OUTLINE_ENABLED(wpd)) {
632 DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
634 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
637 /* Deferred Mix Pass */
639 workbench_private_data_get_light_direction(wpd, e_data.display.light_direction);
640 studiolight_update_light(wpd, e_data.display.light_direction);
642 float shadow_focus = scene->display.shadow_focus;
643 /* Clamp to avoid overshadowing and shading errors. */
644 CLAMP(shadow_focus, 0.0001f, 0.99999f);
645 shadow_focus = 1.0f - shadow_focus * (1.0f - scene->display.shadow_shift);
647 if (SHADOW_ENABLED(wpd)) {
648 psl->composite_pass = DRW_pass_create(
649 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_EQUAL | DRW_STATE_DEPTH_GREATER);
650 grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
651 workbench_composite_uniforms(wpd, grp);
652 DRW_shgroup_stencil_mask(grp, 0x00);
653 DRW_shgroup_uniform_float_copy(grp, "lightMultiplier", 1.0f);
654 DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
655 DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
656 DRW_shgroup_uniform_float_copy(grp, "shadowFocus", shadow_focus);
657 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
659 /* Stencil Shadow passes. */
660 #ifdef DEBUG_SHADOW_VOLUME
661 DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
662 DRWState depth_fail_state = DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_WRITE_COLOR | DRW_STATE_ADDITIVE;
664 DRWState depth_pass_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_PASS;
665 DRWState depth_fail_state = DRW_STATE_DEPTH_LESS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL;
667 psl->shadow_depth_pass_pass = DRW_pass_create("Shadow Pass", depth_pass_state);
668 psl->shadow_depth_pass_mani_pass = DRW_pass_create("Shadow Pass Mani", depth_pass_state);
669 psl->shadow_depth_fail_pass = DRW_pass_create("Shadow Fail", depth_fail_state);
670 psl->shadow_depth_fail_mani_pass = DRW_pass_create("Shadow Fail Mani", depth_fail_state);
671 psl->shadow_depth_fail_caps_pass = DRW_pass_create("Shadow Fail Caps", depth_fail_state);
672 psl->shadow_depth_fail_caps_mani_pass = DRW_pass_create("Shadow Fail Caps Mani", depth_fail_state);
674 #ifndef DEBUG_SHADOW_VOLUME
675 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
676 DRW_shgroup_stencil_mask(grp, 0xFF);
677 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
678 DRW_shgroup_stencil_mask(grp, 0xFF);
679 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
680 DRW_shgroup_stencil_mask(grp, 0xFF);
681 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
682 DRW_shgroup_stencil_mask(grp, 0xFF);
683 grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
684 DRW_shgroup_stencil_mask(grp, 0xFF);
685 grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
686 DRW_shgroup_stencil_mask(grp, 0xFF);
688 psl->composite_shadow_pass = DRW_pass_create(
689 "Composite Shadow", DRW_STATE_WRITE_COLOR | DRW_STATE_STENCIL_NEQUAL | DRW_STATE_DEPTH_GREATER);
690 grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_shadow_pass);
691 DRW_shgroup_stencil_mask(grp, 0x00);
692 workbench_composite_uniforms(wpd, grp);
693 DRW_shgroup_uniform_float(grp, "lightMultiplier", &wpd->shadow_multiplier, 1);
694 DRW_shgroup_uniform_float(grp, "shadowMultiplier", &wpd->shadow_multiplier, 1);
695 DRW_shgroup_uniform_float_copy(grp, "shadowShift", scene->display.shadow_shift);
696 DRW_shgroup_uniform_float_copy(grp, "shadowFocus", shadow_focus);
697 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
701 psl->composite_pass = DRW_pass_create(
702 "Composite", DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_GREATER);
703 grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
704 workbench_composite_uniforms(wpd, grp);
705 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
710 static WORKBENCH_MaterialData *get_or_create_material_data(
711 WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int color_type)
713 WORKBENCH_StorageList *stl = vedata->stl;
714 WORKBENCH_PassList *psl = vedata->psl;
715 WORKBENCH_PrivateData *wpd = stl->g_data;
716 WORKBENCH_MaterialData *material;
717 WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
718 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
719 WORKBENCH_MaterialData material_template;
720 const bool is_ghost = (ob->dtx & OB_DRAWXRAY);
723 workbench_material_update_data(wpd, ob, mat, &material_template);
724 material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
725 material_template.color_type = color_type;
726 material_template.ima = ima;
727 uint hash = workbench_material_get_hash(&material_template, is_ghost);
729 material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
730 if (material == NULL) {
731 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
732 material->shgrp = DRW_shgroup_create(
733 (color_type == V3D_SHADING_TEXTURE_COLOR) ? wpd->prepass_texture_sh: wpd->prepass_solid_sh,
734 (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_pass : psl->prepass_pass);
735 workbench_material_copy(material, &material_template);
736 DRW_shgroup_stencil_mask(material->shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
737 DRW_shgroup_uniform_int(material->shgrp, "object_id", &material->object_id, 1);
738 workbench_material_shgroup_uniform(wpd, material->shgrp, material, ob, true, true);
740 BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
745 static void workbench_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
747 WORKBENCH_StorageList *stl = vedata->stl;
748 WORKBENCH_PassList *psl = vedata->psl;
749 WORKBENCH_PrivateData *wpd = stl->g_data;
751 for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
752 if (md->type != eModifierType_ParticleSystem) {
755 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
756 if (!psys_check_enabled(ob, psys, false)) {
759 if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
762 ParticleSettings *part = psys->part;
763 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
765 if (draw_as == PART_DRAW_PATH) {
767 Material *mat = give_current_material(ob, part->omat);
768 ED_object_get_active_image(ob, part->omat, &image, NULL, NULL, NULL);
769 int color_type = workbench_material_determine_color_type(wpd, image, ob);
770 WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, color_type);
772 struct GPUShader *shader = (color_type != V3D_SHADING_TEXTURE_COLOR) ?
773 wpd->prepass_solid_hair_sh :
774 wpd->prepass_texture_hair_sh;
775 DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
777 (ob->dtx & OB_DRAWXRAY) ? psl->ghost_prepass_hair_pass : psl->prepass_hair_pass,
779 DRW_shgroup_stencil_mask(shgrp, (ob->dtx & OB_DRAWXRAY) ? 0x00 : 0xFF);
780 DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
781 workbench_material_shgroup_uniform(wpd, shgrp, material, ob, true, true);
786 void workbench_deferred_solid_cache_populate(WORKBENCH_Data *vedata, Object *ob)
788 WORKBENCH_StorageList *stl = vedata->stl;
789 WORKBENCH_PassList *psl = vedata->psl;
790 WORKBENCH_PrivateData *wpd = stl->g_data;
791 const DRWContextState *draw_ctx = DRW_context_state_get();
792 Scene *scene = draw_ctx->scene;
794 if (!DRW_object_is_renderable(ob))
797 if (ob->type == OB_MESH) {
798 workbench_cache_populate_particles(vedata, ob);
802 if (((ob->base_flag & BASE_FROMDUPLI) == 0) &&
803 (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
804 (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
805 (((SmokeModifierData *)md)->domain != NULL))
807 workbench_volume_cache_populate(vedata, scene, ob, md);
808 return; /* Do not draw solid in this case. */
811 if (!DRW_object_is_visible_in_active_context(ob) || (ob->dt < OB_SOLID)) {
815 WORKBENCH_MaterialData *material;
816 if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
817 const bool is_active = (ob == draw_ctx->obact);
818 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
819 const bool use_hide = is_active && DRW_object_use_hide_faces(ob);
820 bool is_drawn = false;
821 if (!is_sculpt_mode && TEXTURE_DRAWING_ENABLED(wpd) && ELEM(ob->type, OB_MESH)) {
822 const Mesh *me = ob->data;
824 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
825 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
826 struct GPUBatch **geom_array = me->totcol ? DRW_cache_mesh_surface_texpaint_get(ob, use_hide) : NULL;
827 if (materials_len > 0 && geom_array) {
828 for (int i = 0; i < materials_len; i++) {
829 if (geom_array[i] == NULL) {
833 Material *mat = give_current_material(ob, i + 1);
835 ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
836 int color_type = workbench_material_determine_color_type(wpd, image, ob);
837 material = get_or_create_material_data(vedata, ob, mat, image, color_type);
838 DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
845 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
847 if (ELEM(wpd->shading.color_type, V3D_SHADING_SINGLE_COLOR, V3D_SHADING_RANDOM_COLOR)) {
848 /* No material split needed */
849 struct GPUBatch *geom = DRW_cache_object_surface_get_ex(ob, use_hide);
851 material = get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type);
852 if (is_sculpt_mode) {
853 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
856 DRW_shgroup_call_object_add(material->shgrp, geom, ob);
860 else { /* MATERIAL colors */
861 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
862 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
863 for (int i = 0; i < materials_len; i++) {
864 gpumat_array[i] = NULL;
867 struct GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
868 ob, gpumat_array, materials_len, use_hide, NULL, NULL, NULL);
870 for (int i = 0; i < materials_len; ++i) {
871 if (mat_geom[i] == NULL) {
875 Material *mat = give_current_material(ob, i + 1);
876 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR);
877 if (is_sculpt_mode) {
878 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
881 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
888 if (SHADOW_ENABLED(wpd) && (ob->display.flag & OB_SHOW_SHADOW)) {
890 struct GPUBatch *geom_shadow = DRW_cache_object_edge_detection_get(ob, &is_manifold);
892 if (is_sculpt_mode || use_hide) {
893 /* Currently unsupported in sculpt mode. We could revert to the slow
894 * method in this case but I'm not sure if it's a good idea given that
895 * sculpted meshes are heavy to begin with. */
896 // DRW_shgroup_call_sculpt_add(wpd->shadow_shgrp, ob, ob->obmat);
899 WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
900 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
902 if (studiolight_object_cast_visible_shadow(wpd, ob, engine_object_data)) {
904 invert_m4_m4(ob->imat, ob->obmat);
905 mul_v3_mat3_m4v3(engine_object_data->shadow_dir, ob->imat, e_data.display.light_direction);
907 DRWShadingGroup *grp;
908 bool use_shadow_pass_technique = !studiolight_camera_in_object_shadow(wpd, ob, engine_object_data);
910 if (use_shadow_pass_technique) {
912 grp = DRW_shgroup_create(e_data.shadow_pass_manifold_sh, psl->shadow_depth_pass_mani_pass);
915 grp = DRW_shgroup_create(e_data.shadow_pass_sh, psl->shadow_depth_pass_pass);
917 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
918 DRW_shgroup_uniform_float_copy(grp, "lightDistance", 1e5f);
919 DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
920 #ifdef DEBUG_SHADOW_VOLUME
921 DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
925 float extrude_distance = studiolight_object_shadow_distance(wpd, ob, engine_object_data);
927 /* TODO(fclem): only use caps if they are in the view frustum. */
928 const bool need_caps = true;
931 grp = DRW_shgroup_create(e_data.shadow_caps_manifold_sh, psl->shadow_depth_fail_caps_mani_pass);
934 grp = DRW_shgroup_create(e_data.shadow_caps_sh, psl->shadow_depth_fail_caps_pass);
936 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
937 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
938 DRW_shgroup_call_add(grp, DRW_cache_object_surface_get(ob), ob->obmat);
942 grp = DRW_shgroup_create(e_data.shadow_fail_manifold_sh, psl->shadow_depth_fail_mani_pass);
945 grp = DRW_shgroup_create(e_data.shadow_fail_sh, psl->shadow_depth_fail_pass);
947 DRW_shgroup_uniform_vec3(grp, "lightDirection", engine_object_data->shadow_dir, 1);
948 DRW_shgroup_uniform_float_copy(grp, "lightDistance", extrude_distance);
949 DRW_shgroup_call_add(grp, geom_shadow, ob->obmat);
950 #ifdef DEBUG_SHADOW_VOLUME
951 DRW_debug_bbox(&engine_object_data->shadow_bbox, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
961 void workbench_deferred_cache_finish(WORKBENCH_Data *UNUSED(vedata))
965 void workbench_deferred_draw_background(WORKBENCH_Data *vedata)
967 WORKBENCH_StorageList *stl = vedata->stl;
968 WORKBENCH_FramebufferList *fbl = vedata->fbl;
969 WORKBENCH_PrivateData *wpd = stl->g_data;
970 const float clear_depth = 1.0f;
971 const float clear_color[4] = {0.0f, 0.0f, 0.0f, 0.0f};
972 uint clear_stencil = 0x00;
974 DRW_stats_group_start("Clear Background");
976 if (OBJECT_ID_PASS_ENABLED(wpd)) {
977 /* From all the color buffers, only object id needs to be cleared. */
978 GPU_framebuffer_bind(fbl->id_clear_fb);
979 GPU_framebuffer_clear_color(fbl->id_clear_fb, clear_color);
982 GPU_framebuffer_bind(fbl->prepass_fb);
983 int clear_bits = GPU_DEPTH_BIT;
984 SET_FLAG_FROM_TEST(clear_bits, SHADOW_ENABLED(wpd), GPU_STENCIL_BIT);
985 GPU_framebuffer_clear(fbl->prepass_fb, clear_bits, clear_color, clear_depth, clear_stencil);
986 DRW_stats_group_end();
989 void workbench_deferred_draw_scene(WORKBENCH_Data *vedata)
991 WORKBENCH_PassList *psl = vedata->psl;
992 WORKBENCH_StorageList *stl = vedata->stl;
993 WORKBENCH_FramebufferList *fbl = vedata->fbl;
994 WORKBENCH_PrivateData *wpd = stl->g_data;
995 DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
997 if (TAA_ENABLED(wpd)) {
998 workbench_taa_draw_scene_start(vedata);
1001 /* clear in background */
1002 GPU_framebuffer_bind(fbl->prepass_fb);
1003 DRW_draw_pass(psl->prepass_pass);
1004 DRW_draw_pass(psl->prepass_hair_pass);
1006 if (GHOST_ENABLED(psl)) {
1007 /* meh, late init to not request a depth buffer we won't use. */
1008 workbench_setup_ghost_framebuffer(fbl);
1010 GPU_framebuffer_bind(fbl->ghost_prepass_fb);
1011 GPU_framebuffer_clear_depth(fbl->ghost_prepass_fb, 1.0f);
1012 DRW_draw_pass(psl->ghost_prepass_pass);
1013 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1015 GPU_framebuffer_bind(dfbl->depth_only_fb);
1016 DRW_draw_pass(psl->ghost_resolve_pass);
1019 if (CAVITY_ENABLED(wpd)) {
1020 GPU_framebuffer_bind(fbl->cavity_fb);
1021 DRW_draw_pass(psl->cavity_pass);
1024 if (SHADOW_ENABLED(wpd)) {
1025 #ifdef DEBUG_SHADOW_VOLUME
1026 GPU_framebuffer_bind(fbl->composite_fb);
1027 DRW_draw_pass(psl->composite_pass);
1029 GPU_framebuffer_bind(dfbl->depth_only_fb);
1031 DRW_draw_pass(psl->shadow_depth_pass_pass);
1032 DRW_draw_pass(psl->shadow_depth_pass_mani_pass);
1033 DRW_draw_pass(psl->shadow_depth_fail_pass);
1034 DRW_draw_pass(psl->shadow_depth_fail_mani_pass);
1035 DRW_draw_pass(psl->shadow_depth_fail_caps_pass);
1036 DRW_draw_pass(psl->shadow_depth_fail_caps_mani_pass);
1038 if (GHOST_ENABLED(psl)) {
1039 /* We need to set the stencil buffer to 0 where Ghost objects
1040 * else they will get shadow and even badly shadowed. */
1041 DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1042 DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1044 DRW_draw_pass(psl->ghost_prepass_pass);
1045 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1047 #ifndef DEBUG_SHADOW_VOLUME
1048 GPU_framebuffer_bind(fbl->composite_fb);
1049 DRW_draw_pass(psl->composite_pass);
1050 DRW_draw_pass(psl->composite_shadow_pass);
1054 GPU_framebuffer_bind(fbl->composite_fb);
1055 DRW_draw_pass(psl->composite_pass);
1058 /* TODO(fclem): only enable when needed (when there is overlays). */
1059 if (GHOST_ENABLED(psl)) {
1060 /* In order to not draw on top of ghost objects, we clear the stencil
1061 * to 0xFF and the ghost object to 0x00 and only draw overlays on top if
1062 * stencil is not 0. */
1063 GPU_framebuffer_bind(dfbl->depth_only_fb);
1064 GPU_framebuffer_clear_stencil(dfbl->depth_only_fb, 0xFF);
1066 DRW_pass_state_set(psl->ghost_prepass_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1067 DRW_pass_state_set(psl->ghost_prepass_hair_pass, DRW_STATE_DEPTH_EQUAL | DRW_STATE_WRITE_STENCIL);
1069 DRW_draw_pass(psl->ghost_prepass_pass);
1070 DRW_draw_pass(psl->ghost_prepass_hair_pass);
1073 GPU_framebuffer_bind(fbl->composite_fb);
1074 DRW_draw_pass(psl->background_pass);
1076 if (wpd->volumes_do) {
1077 GPU_framebuffer_bind(fbl->volume_fb);
1078 DRW_draw_pass(psl->volume_pass);
1081 workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
1084 void workbench_deferred_draw_finish(WORKBENCH_Data *vedata)
1086 WORKBENCH_StorageList *stl = vedata->stl;
1087 WORKBENCH_PrivateData *wpd = stl->g_data;
1089 workbench_private_data_free(wpd);
1090 workbench_volume_smoke_textures_free(wpd);