Merge branch 'blender-v2.82-release'
[blender.git] / source / blender / draw / engines / workbench / workbench_forward.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file
20  * \ingroup draw_engine
21  */
22
23 #include "workbench_private.h"
24
25 #include "BLI_alloca.h"
26 #include "BLI_dynstr.h"
27 #include "BLI_string_utils.h"
28 #include "BLI_utildefines.h"
29
30 #include "BKE_modifier.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_particle.h"
34
35 #include "DNA_image_types.h"
36 #include "DNA_fluid_types.h"
37 #include "DNA_mesh_types.h"
38 #include "DNA_modifier_types.h"
39 #include "DNA_node_types.h"
40
41 #include "ED_view3d.h"
42
43 #include "GPU_shader.h"
44 #include "GPU_texture.h"
45
46 /* *********** STATIC *********** */
47
48 typedef struct WORKBENCH_FORWARD_Shaders {
49   struct GPUShader *transparent_accum_sh_cache[MAX_ACCUM_SHADERS];
50   struct GPUShader *object_outline_sh;
51   struct GPUShader *object_outline_texture_sh;
52   struct GPUShader *object_outline_hair_sh;
53 } WORKBENCH_FORWARD_Shaders;
54
55 static struct {
56   WORKBENCH_FORWARD_Shaders sh_data[GPU_SHADER_CFG_LEN];
57
58   struct GPUShader *composite_sh_cache[2];
59   struct GPUShader *checker_depth_sh;
60
61   struct GPUTexture *object_id_tx;             /* ref only, not alloced */
62   struct GPUTexture *transparent_accum_tx;     /* ref only, not alloced */
63   struct GPUTexture *transparent_revealage_tx; /* ref only, not alloced */
64   struct GPUTexture *composite_buffer_tx;      /* ref only, not alloced */
65 } e_data = {{{{NULL}}}};
66
67 /* Shaders */
68 extern char datatoc_common_hair_lib_glsl[];
69 extern char datatoc_common_view_lib_glsl[];
70
71 extern char datatoc_workbench_forward_composite_frag_glsl[];
72 extern char datatoc_workbench_forward_depth_frag_glsl[];
73 extern char datatoc_workbench_forward_transparent_accum_frag_glsl[];
74 extern char datatoc_workbench_data_lib_glsl[];
75 extern char datatoc_workbench_background_lib_glsl[];
76 extern char datatoc_workbench_checkerboard_depth_frag_glsl[];
77 extern char datatoc_workbench_object_outline_lib_glsl[];
78 extern char datatoc_workbench_curvature_lib_glsl[];
79 extern char datatoc_workbench_prepass_vert_glsl[];
80 extern char datatoc_workbench_common_lib_glsl[];
81 extern char datatoc_workbench_world_light_lib_glsl[];
82
83 /* static functions */
84 static char *workbench_build_forward_vert(bool is_hair)
85 {
86   DynStr *ds = BLI_dynstr_new();
87   if (is_hair) {
88     BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
89   }
90   BLI_dynstr_append(ds, datatoc_common_view_lib_glsl);
91   BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
92
93   char *str = BLI_dynstr_get_cstring(ds);
94   BLI_dynstr_free(ds);
95   return str;
96 }
97
98 static char *workbench_build_forward_outline_frag(void)
99 {
100   DynStr *ds = BLI_dynstr_new();
101
102   BLI_dynstr_append(ds, datatoc_common_view_lib_glsl);
103   BLI_dynstr_append(ds, datatoc_workbench_forward_depth_frag_glsl);
104
105   char *str = BLI_dynstr_get_cstring(ds);
106   BLI_dynstr_free(ds);
107   return str;
108 }
109
110 static char *workbench_build_forward_transparent_accum_frag(void)
111 {
112   DynStr *ds = BLI_dynstr_new();
113
114   BLI_dynstr_append(ds, datatoc_common_view_lib_glsl);
115   BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
116   BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
117   BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
118   BLI_dynstr_append(ds, datatoc_workbench_forward_transparent_accum_frag_glsl);
119
120   char *str = BLI_dynstr_get_cstring(ds);
121   BLI_dynstr_free(ds);
122   return str;
123 }
124
125 static char *workbench_build_forward_composite_frag(void)
126 {
127   DynStr *ds = BLI_dynstr_new();
128
129   BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
130   BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
131   BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
132   BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
133   BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
134   BLI_dynstr_append(ds, datatoc_workbench_forward_composite_frag_glsl);
135
136   char *str = BLI_dynstr_get_cstring(ds);
137   BLI_dynstr_free(ds);
138   return str;
139 }
140
141 WORKBENCH_MaterialData *workbench_forward_get_or_create_material_data(WORKBENCH_Data *vedata,
142                                                                       Object *ob,
143                                                                       Material *mat,
144                                                                       Image *ima,
145                                                                       ImageUser *iuser,
146                                                                       int color_type,
147                                                                       int interp)
148 {
149   const DRWContextState *draw_ctx = DRW_context_state_get();
150   WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[draw_ctx->sh_cfg];
151   WORKBENCH_StorageList *stl = vedata->stl;
152   WORKBENCH_PassList *psl = vedata->psl;
153   WORKBENCH_PrivateData *wpd = stl->g_data;
154   WORKBENCH_MaterialData *material;
155   WORKBENCH_MaterialData material_template;
156   DRWShadingGroup *grp;
157
158   /* Solid */
159   workbench_material_update_data(wpd, ob, mat, &material_template, color_type);
160   material_template.color_type = color_type;
161   material_template.ima = ima;
162   material_template.iuser = iuser;
163   material_template.interp = interp;
164   uint hash = workbench_material_get_hash(&material_template, false);
165
166   material = BLI_ghash_lookup(wpd->material_transp_hash, POINTER_FROM_UINT(hash));
167   if (material == NULL) {
168     material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
169
170     /* transparent accum */
171     /* select the correct transparent accum shader */
172     GPUShader *shader = (wpd->shading.color_type == color_type) ?
173                             wpd->transparent_accum_sh :
174                             wpd->transparent_accum_uniform_sh;
175     const bool is_tiled = (ima && ima->source == IMA_SRC_TILED);
176     if (color_type == V3D_SHADING_TEXTURE_COLOR) {
177       shader = is_tiled ? wpd->transparent_accum_textured_array_sh :
178                           wpd->transparent_accum_textured_sh;
179     }
180
181     grp = DRW_shgroup_create(shader, psl->transparent_accum_pass);
182     DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
183     DRW_shgroup_uniform_float_copy(grp, "alpha", material_template.alpha);
184     DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
185     workbench_material_copy(material, &material_template);
186     if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
187       BKE_studiolight_ensure_flag(wpd->studio_light,
188                                   STUDIOLIGHT_MATCAP_DIFFUSE_GPUTEXTURE |
189                                       STUDIOLIGHT_MATCAP_SPECULAR_GPUTEXTURE);
190       DRW_shgroup_uniform_texture(
191           grp, "matcapDiffuseImage", wpd->studio_light->matcap_diffuse.gputexture);
192       if (workbench_is_specular_highlight_enabled(wpd)) {
193         DRW_shgroup_uniform_texture(
194             grp, "matcapSpecularImage", wpd->studio_light->matcap_specular.gputexture);
195       }
196     }
197     if (workbench_is_specular_highlight_enabled(wpd) || MATCAP_ENABLED(wpd)) {
198       DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
199     }
200     if (SHADOW_ENABLED(wpd)) {
201       DRW_shgroup_uniform_float_copy(grp, "shadowMultiplier", wpd->shadow_multiplier);
202       DRW_shgroup_uniform_float_copy(grp, "shadowShift", wpd->shadow_shift);
203       DRW_shgroup_uniform_float_copy(grp, "shadowFocus", wpd->shadow_focus);
204     }
205
206     workbench_material_shgroup_uniform(wpd, grp, material, ob, false, is_tiled, interp);
207     material->shgrp = grp;
208
209     /* Depth */
210     if (color_type == V3D_SHADING_TEXTURE_COLOR) {
211       material->shgrp_object_outline = DRW_shgroup_create(sh_data->object_outline_texture_sh,
212                                                           psl->object_outline_pass);
213       GPUTexture *tex = GPU_texture_from_blender(
214           material->ima, material->iuser, NULL, GL_TEXTURE_2D);
215       DRW_shgroup_uniform_texture(material->shgrp_object_outline, "image", tex);
216     }
217     else {
218       material->shgrp_object_outline = DRW_shgroup_create(sh_data->object_outline_sh,
219                                                           psl->object_outline_pass);
220     }
221     if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
222       DRW_shgroup_state_enable(material->shgrp_object_outline, DRW_STATE_CLIP_PLANES);
223     }
224     BLI_ghash_insert(wpd->material_transp_hash, POINTER_FROM_UINT(hash), material);
225   }
226   return material;
227 }
228
229 static GPUShader *ensure_forward_accum_shaders(WORKBENCH_PrivateData *wpd,
230                                                bool is_uniform_color,
231                                                bool is_hair,
232                                                bool is_tiled,
233                                                const WORKBENCH_ColorOverride color_override,
234                                                eGPUShaderConfig sh_cfg)
235 {
236   WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[sh_cfg];
237   int index = workbench_material_get_accum_shader_index(
238       wpd, is_uniform_color, is_hair, is_tiled, color_override);
239   if (sh_data->transparent_accum_sh_cache[index] == NULL) {
240     const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
241     char *defines = workbench_material_build_defines(
242         wpd, is_uniform_color, is_hair, is_tiled, color_override);
243     char *transparent_accum_vert = workbench_build_forward_vert(is_hair);
244     char *transparent_accum_frag = workbench_build_forward_transparent_accum_frag();
245     sh_data->transparent_accum_sh_cache[index] = GPU_shader_create_from_arrays({
246         .vert = (const char *[]){sh_cfg_data->lib, transparent_accum_vert, NULL},
247         .frag = (const char *[]){transparent_accum_frag, NULL},
248         .defs = (const char *[]){sh_cfg_data->def, defines, NULL},
249     });
250     MEM_freeN(transparent_accum_vert);
251     MEM_freeN(transparent_accum_frag);
252     MEM_freeN(defines);
253   }
254   return sh_data->transparent_accum_sh_cache[index];
255 }
256
257 static GPUShader *ensure_forward_composite_shaders(WORKBENCH_PrivateData *wpd)
258 {
259   int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
260   if (e_data.composite_sh_cache[index] == NULL) {
261     char *defines = workbench_material_build_defines(
262         wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
263     char *composite_frag = workbench_build_forward_composite_frag();
264     e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
265     MEM_freeN(composite_frag);
266     MEM_freeN(defines);
267   }
268   return e_data.composite_sh_cache[index];
269 }
270
271 void workbench_forward_choose_shaders(WORKBENCH_PrivateData *wpd, eGPUShaderConfig sh_cfg)
272 {
273   wpd->composite_sh = ensure_forward_composite_shaders(wpd);
274   wpd->transparent_accum_sh = ensure_forward_accum_shaders(
275       wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
276   wpd->transparent_accum_hair_sh = ensure_forward_accum_shaders(
277       wpd, false, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
278   wpd->transparent_accum_uniform_sh = ensure_forward_accum_shaders(
279       wpd, true, false, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
280   wpd->transparent_accum_uniform_hair_sh = ensure_forward_accum_shaders(
281       wpd, true, true, false, WORKBENCH_COLOR_OVERRIDE_OFF, sh_cfg);
282   wpd->transparent_accum_textured_sh = ensure_forward_accum_shaders(
283       wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
284   wpd->transparent_accum_textured_array_sh = ensure_forward_accum_shaders(
285       wpd, false, false, true, WORKBENCH_COLOR_OVERRIDE_TEXTURE, sh_cfg);
286   wpd->transparent_accum_vertex_sh = ensure_forward_accum_shaders(
287       wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_VERTEX, sh_cfg);
288 }
289
290 void workbench_forward_outline_shaders_ensure(WORKBENCH_PrivateData *wpd, eGPUShaderConfig sh_cfg)
291 {
292   WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[sh_cfg];
293
294   if (sh_data->object_outline_sh == NULL) {
295     const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
296     char *defines = workbench_material_build_defines(
297         wpd, false, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
298     char *defines_texture = workbench_material_build_defines(
299         wpd, true, false, false, WORKBENCH_COLOR_OVERRIDE_OFF);
300     char *defines_hair = workbench_material_build_defines(
301         wpd, false, true, false, WORKBENCH_COLOR_OVERRIDE_OFF);
302     char *forward_vert = workbench_build_forward_vert(false);
303     char *forward_frag = workbench_build_forward_outline_frag();
304     char *forward_hair_vert = workbench_build_forward_vert(true);
305
306     const char *define_id_pass = "#define OBJECT_ID_PASS_ENABLED\n";
307
308     sh_data->object_outline_sh = GPU_shader_create_from_arrays({
309         .vert = (const char *[]){sh_cfg_data->lib, forward_vert, NULL},
310         .frag = (const char *[]){forward_frag, NULL},
311         .defs = (const char *[]){sh_cfg_data->def, defines, define_id_pass, NULL},
312     });
313     sh_data->object_outline_texture_sh = GPU_shader_create_from_arrays({
314         .vert = (const char *[]){sh_cfg_data->lib, forward_vert, NULL},
315         .frag = (const char *[]){forward_frag, NULL},
316         .defs = (const char *[]){sh_cfg_data->def, defines_texture, define_id_pass, NULL},
317     });
318     sh_data->object_outline_hair_sh = GPU_shader_create_from_arrays({
319         .vert = (const char *[]){sh_cfg_data->lib, forward_hair_vert, NULL},
320         .frag = (const char *[]){forward_frag, NULL},
321         .defs = (const char *[]){sh_cfg_data->def, defines_hair, define_id_pass, NULL},
322     });
323
324     MEM_freeN(forward_hair_vert);
325     MEM_freeN(forward_vert);
326     MEM_freeN(forward_frag);
327     MEM_freeN(defines);
328     MEM_freeN(defines_texture);
329     MEM_freeN(defines_hair);
330   }
331 }
332
333 /* public functions */
334 void workbench_forward_engine_init(WORKBENCH_Data *vedata)
335 {
336   WORKBENCH_FramebufferList *fbl = vedata->fbl;
337   WORKBENCH_PassList *psl = vedata->psl;
338   WORKBENCH_StorageList *stl = vedata->stl;
339   DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
340   const DRWContextState *draw_ctx = DRW_context_state_get();
341   DRWShadingGroup *grp;
342
343   if (!stl->g_data) {
344     /* Alloc transient pointers */
345     stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
346   }
347   if (!stl->effects) {
348     stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
349     workbench_effect_info_init(stl->effects);
350   }
351   WORKBENCH_PrivateData *wpd = stl->g_data;
352   workbench_private_data_init(wpd);
353
354   if (!e_data.checker_depth_sh) {
355     e_data.checker_depth_sh = DRW_shader_create_fullscreen(
356         datatoc_workbench_checkerboard_depth_frag_glsl, NULL);
357   }
358
359   workbench_forward_outline_shaders_ensure(wpd, draw_ctx->sh_cfg);
360
361   workbench_volume_engine_init();
362   workbench_fxaa_engine_init();
363   workbench_taa_engine_init(vedata);
364
365   workbench_forward_outline_shaders_ensure(wpd, draw_ctx->sh_cfg);
366   workbench_forward_choose_shaders(wpd, draw_ctx->sh_cfg);
367
368   const float *viewport_size = DRW_viewport_size_get();
369   const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
370   const eGPUTextureFormat comp_tex_format = DRW_state_is_image_render() ? GPU_RGBA16F :
371                                                                           GPU_R11F_G11F_B10F;
372
373   e_data.object_id_tx = DRW_texture_pool_query_2d(
374       size[0], size[1], GPU_R32UI, &draw_engine_workbench_transparent);
375   e_data.transparent_accum_tx = DRW_texture_pool_query_2d(
376       size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_transparent);
377   e_data.transparent_revealage_tx = DRW_texture_pool_query_2d(
378       size[0], size[1], GPU_R16F, &draw_engine_workbench_transparent);
379   e_data.composite_buffer_tx = DRW_texture_pool_query_2d(
380       size[0], size[1], comp_tex_format, &draw_engine_workbench_transparent);
381
382   GPU_framebuffer_ensure_config(&fbl->object_outline_fb,
383                                 {
384                                     GPU_ATTACHMENT_TEXTURE(dtxl->depth),
385                                     GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
386                                 });
387   GPU_framebuffer_ensure_config(&fbl->transparent_accum_fb,
388                                 {
389                                     GPU_ATTACHMENT_NONE,
390                                     GPU_ATTACHMENT_TEXTURE(e_data.transparent_accum_tx),
391                                     GPU_ATTACHMENT_TEXTURE(e_data.transparent_revealage_tx),
392                                 });
393   GPU_framebuffer_ensure_config(&fbl->composite_fb,
394                                 {
395                                     GPU_ATTACHMENT_NONE,
396                                     GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
397                                 });
398   GPU_framebuffer_ensure_config(&fbl->effect_fb,
399                                 {
400                                     GPU_ATTACHMENT_NONE,
401                                     GPU_ATTACHMENT_TEXTURE(e_data.transparent_accum_tx),
402                                 });
403
404   workbench_volume_cache_init(vedata);
405
406   DRWState clip_state = WORLD_CLIPPING_ENABLED(wpd) ? DRW_STATE_CLIP_PLANES : 0;
407   DRWState cull_state = CULL_BACKFACE_ENABLED(wpd) ? DRW_STATE_CULL_BACK : 0;
408
409   /* Transparency Accum */
410   {
411     int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_OIT | cull_state | clip_state;
412     psl->transparent_accum_pass = DRW_pass_create("Transparent Accum", state);
413   }
414   /* Depth */
415   {
416     int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS | cull_state |
417                 clip_state;
418     psl->object_outline_pass = DRW_pass_create("Object Outline Pass", state);
419   }
420   /* Composite */
421   {
422     int state = DRW_STATE_WRITE_COLOR;
423     psl->composite_pass = DRW_pass_create("Composite", state);
424
425     grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
426     if (OBJECT_ID_PASS_ENABLED(wpd)) {
427       DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
428     }
429     DRW_shgroup_uniform_texture_ref(grp, "transparentAccum", &e_data.transparent_accum_tx);
430     DRW_shgroup_uniform_texture_ref(grp, "transparentRevealage", &e_data.transparent_revealage_tx);
431     DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
432     DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
433     DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
434   }
435
436   /* TODO(campbell): displays but masks geometry,
437    * only use with wire or solid-without-xray for now. */
438   if ((wpd->shading.type != OB_WIRE && !XRAY_FLAG_ENABLED(wpd)) &&
439       RV3D_CLIPPING_ENABLED(draw_ctx->v3d, draw_ctx->rv3d)) {
440     psl->background_pass = DRW_pass_create("Background",
441                                            DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_EQUAL);
442     GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_3D_UNIFORM_COLOR_BACKGROUND);
443     grp = DRW_shgroup_create(shader, psl->background_pass);
444     wpd->world_clip_planes_batch = DRW_draw_background_clipping_batch_from_rv3d(draw_ctx->rv3d);
445     DRW_shgroup_call(grp, wpd->world_clip_planes_batch, NULL);
446     DRW_shgroup_uniform_vec4(grp, "color", &wpd->world_clip_planes_color[0], 1);
447   }
448
449   {
450     workbench_aa_create_pass(vedata, &e_data.transparent_accum_tx);
451   }
452
453   /* Checker Depth */
454   {
455     static float noise_offset = 0.0f;
456     float blend_threshold = 0.0f;
457
458     if (DRW_state_is_image_render()) {
459       /* TODO: Should be based on the number of samples used for render. */
460       noise_offset = fmodf(noise_offset + 1.0f / 8.0f, 1.0f);
461     }
462
463     if (XRAY_ENABLED(wpd)) {
464       blend_threshold = 1.0f - XRAY_ALPHA(wpd) * 0.9f;
465     }
466
467     if (wpd->shading.type == OB_WIRE) {
468       wpd->shading.xray_alpha = 0.0f;
469       wpd->shading.xray_alpha_wire = 0.0f;
470     }
471
472     int state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS;
473     psl->checker_depth_pass = DRW_pass_create("Checker Depth", state);
474     grp = DRW_shgroup_create(e_data.checker_depth_sh, psl->checker_depth_pass);
475     DRW_shgroup_call(grp, DRW_cache_fullscreen_quad_get(), NULL);
476     DRW_shgroup_uniform_float_copy(grp, "threshold", blend_threshold);
477     DRW_shgroup_uniform_float_copy(grp, "offset", noise_offset);
478   }
479 }
480
481 void workbench_forward_engine_free()
482 {
483   for (int sh_data_index = 0; sh_data_index < ARRAY_SIZE(e_data.sh_data); sh_data_index++) {
484     WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[sh_data_index];
485     for (int index = 0; index < MAX_ACCUM_SHADERS; index++) {
486       DRW_SHADER_FREE_SAFE(sh_data->transparent_accum_sh_cache[index]);
487     }
488     DRW_SHADER_FREE_SAFE(sh_data->object_outline_sh);
489     DRW_SHADER_FREE_SAFE(sh_data->object_outline_texture_sh);
490     DRW_SHADER_FREE_SAFE(sh_data->object_outline_hair_sh);
491   }
492
493   for (int index = 0; index < 2; index++) {
494     DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
495   }
496   DRW_SHADER_FREE_SAFE(e_data.checker_depth_sh);
497
498   workbench_volume_engine_free();
499   workbench_fxaa_engine_free();
500   workbench_taa_engine_free();
501   workbench_dof_engine_free();
502 }
503
504 void workbench_forward_cache_init(WORKBENCH_Data *UNUSED(vedata))
505 {
506 }
507
508 static void workbench_forward_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
509 {
510   WORKBENCH_StorageList *stl = vedata->stl;
511   WORKBENCH_PassList *psl = vedata->psl;
512   WORKBENCH_PrivateData *wpd = stl->g_data;
513
514   for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
515     if (md->type != eModifierType_ParticleSystem) {
516       continue;
517     }
518     ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
519     if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
520       continue;
521     }
522     ParticleSettings *part = psys->part;
523     const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
524
525     if (draw_as == PART_DRAW_PATH) {
526       const DRWContextState *draw_ctx = DRW_context_state_get();
527       Material *mat;
528       Image *image;
529       ImageUser *iuser;
530       int interp;
531       workbench_material_get_image_and_mat(ob, part->omat, &image, &iuser, &interp, &mat);
532       int color_type = workbench_material_determine_color_type(wpd, image, ob, false);
533       WORKBENCH_MaterialData *material = workbench_forward_get_or_create_material_data(
534           vedata, ob, mat, image, iuser, color_type, interp);
535
536       struct GPUShader *shader = (wpd->shading.color_type == color_type) ?
537                                      wpd->transparent_accum_hair_sh :
538                                      wpd->transparent_accum_uniform_hair_sh;
539       DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
540           ob, psys, md, psl->transparent_accum_pass, shader);
541       DRW_shgroup_uniform_block(shgrp, "world_block", wpd->world_ubo);
542       workbench_material_shgroup_uniform(wpd, shgrp, material, ob, false, false, interp);
543       DRW_shgroup_uniform_vec4(shgrp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
544       /* Hairs have lots of layer and can rapidly become the most prominent surface.
545        * So lower their alpha artificially. */
546       float hair_alpha = XRAY_ALPHA(wpd) * 0.33f;
547       DRW_shgroup_uniform_float_copy(shgrp, "alpha", hair_alpha);
548       if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
549         BKE_studiolight_ensure_flag(wpd->studio_light,
550                                     STUDIOLIGHT_MATCAP_DIFFUSE_GPUTEXTURE |
551                                         STUDIOLIGHT_MATCAP_SPECULAR_GPUTEXTURE);
552         DRW_shgroup_uniform_texture(
553             shgrp, "matcapDiffuseImage", wpd->studio_light->matcap_diffuse.gputexture);
554         if (workbench_is_specular_highlight_enabled(wpd)) {
555           DRW_shgroup_uniform_texture(
556               shgrp, "matcapSpecularImage", wpd->studio_light->matcap_specular.gputexture);
557         }
558       }
559       if (workbench_is_specular_highlight_enabled(wpd) || MATCAP_ENABLED(wpd)) {
560         DRW_shgroup_uniform_vec2(shgrp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
561       }
562
563       WORKBENCH_FORWARD_Shaders *sh_data = &e_data.sh_data[draw_ctx->sh_cfg];
564       shgrp = DRW_shgroup_hair_create(
565           ob, psys, md, vedata->psl->object_outline_pass, sh_data->object_outline_hair_sh);
566     }
567   }
568 }
569 static void workbench_forward_cache_populate_texture_paint_mode(WORKBENCH_Data *vedata, Object *ob)
570 {
571   WORKBENCH_StorageList *stl = vedata->stl;
572   WORKBENCH_PrivateData *wpd = stl->g_data;
573   const DRWContextState *draw_ctx = DRW_context_state_get();
574
575   Scene *scene = draw_ctx->scene;
576   const bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->v3d) &&
577                                !DRW_state_is_image_render();
578   WORKBENCH_MaterialData *material;
579
580   /* Force workbench to render active object textured when in texture paint mode */
581   const ImagePaintSettings *imapaint = &scene->toolsettings->imapaint;
582
583   /* Single Image mode */
584   if (imapaint->mode == IMAGEPAINT_MODE_IMAGE) {
585     Image *image = imapaint->canvas;
586     int interp = (imapaint->interp == IMAGEPAINT_INTERP_LINEAR) ? SHD_INTERP_LINEAR :
587                                                                   SHD_INTERP_CLOSEST;
588     int color_type = workbench_material_determine_color_type(wpd, image, ob, use_sculpt_pbvh);
589     struct GPUBatch *geom = DRW_cache_mesh_surface_texpaint_single_get(ob);
590     material = workbench_forward_get_or_create_material_data(
591         vedata, ob, NULL, image, NULL, color_type, interp);
592
593     DRW_shgroup_call(material->shgrp, geom, ob);
594     DRW_shgroup_call(material->shgrp_object_outline, geom, ob);
595   }
596   else {
597     /* IMAGEPAINT_MODE_MATERIAL */
598     const int materials_len = DRW_cache_object_material_count_get(ob);
599     struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
600     for (int i = 0; i < materials_len; i++) {
601       if (geom_array != NULL && geom_array[i] != NULL) {
602         Material *mat;
603         Image *image;
604         ImageUser *iuser;
605         int interp;
606         workbench_material_get_image_and_mat(ob, i + 1, &image, &iuser, &interp, &mat);
607         int color_type = workbench_material_determine_color_type(wpd, image, ob, use_sculpt_pbvh);
608         material = workbench_forward_get_or_create_material_data(
609             vedata, ob, mat, image, iuser, color_type, interp);
610
611         DRW_shgroup_call(material->shgrp, geom_array[i], ob);
612         DRW_shgroup_call(material->shgrp_object_outline, geom_array[i], ob);
613       }
614     }
615   }
616 }
617 static void workbench_forward_cache_populate_vertex_paint_mode(WORKBENCH_Data *vedata, Object *ob)
618 {
619   WORKBENCH_StorageList *stl = vedata->stl;
620   WORKBENCH_PrivateData *wpd = stl->g_data;
621   const DRWContextState *draw_ctx = DRW_context_state_get();
622
623   const bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->v3d) &&
624                                !DRW_state_is_image_render();
625   WORKBENCH_MaterialData *material;
626
627   int color_type = workbench_material_determine_color_type(wpd, NULL, ob, use_sculpt_pbvh);
628   struct GPUBatch *geom = DRW_cache_mesh_surface_vertpaint_get(ob);
629   material = workbench_forward_get_or_create_material_data(
630       vedata, ob, NULL, NULL, NULL, color_type, false);
631   DRW_shgroup_call(material->shgrp, geom, ob);
632   DRW_shgroup_call(material->shgrp_object_outline, geom, ob);
633 }
634
635 void workbench_forward_cache_populate(WORKBENCH_Data *vedata, Object *ob)
636 {
637   WORKBENCH_StorageList *stl = vedata->stl;
638   WORKBENCH_PrivateData *wpd = stl->g_data;
639   const DRWContextState *draw_ctx = DRW_context_state_get();
640   Scene *scene = draw_ctx->scene;
641   const bool is_wire = (ob->dt == OB_WIRE);
642
643   if (!DRW_object_is_renderable(ob)) {
644     return;
645   }
646
647   if (ob->type == OB_MESH) {
648     workbench_forward_cache_populate_particles(vedata, ob);
649   }
650
651   ModifierData *md;
652   if (((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
653       (md = modifiers_findByType(ob, eModifierType_Fluid)) &&
654       (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
655       (((FluidModifierData *)md)->domain != NULL) &&
656       (((FluidModifierData *)md)->domain->type == FLUID_DOMAIN_TYPE_GAS)) {
657     workbench_volume_cache_populate(vedata, scene, ob, md);
658     return; /* Do not draw solid in this case. */
659   }
660
661   if (!(DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF)) {
662     return;
663   }
664   if (ob->dt < OB_WIRE) {
665     return;
666   }
667
668   WORKBENCH_MaterialData *material;
669   if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
670     const bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->v3d) &&
671                                  !DRW_state_is_image_render();
672     const int materials_len = DRW_cache_object_material_count_get(ob);
673     const Mesh *me = (ob->type == OB_MESH) ? ob->data : NULL;
674     const WORKBENCH_ColorOverride color_override = workbench_object_color_override_get(ob);
675     const bool use_texture_paint_drawing = !(DRW_state_is_image_render() &&
676                                              draw_ctx->v3d == NULL) &&
677                                            (color_override == WORKBENCH_COLOR_OVERRIDE_TEXTURE) &&
678                                            me && me->mloopuv;
679     const bool use_vertex_paint_drawing = !(DRW_state_is_image_render() &&
680                                             draw_ctx->v3d == NULL) &&
681                                           (color_override == WORKBENCH_COLOR_OVERRIDE_VERTEX) &&
682                                           me && me->mloopcol;
683
684     if (use_texture_paint_drawing) {
685       workbench_forward_cache_populate_texture_paint_mode(vedata, ob);
686     }
687     else if (use_vertex_paint_drawing) {
688       workbench_forward_cache_populate_vertex_paint_mode(vedata, ob);
689     }
690     else if (!use_sculpt_pbvh && TEXTURE_DRAWING_ENABLED(wpd) && me && me->mloopuv) {
691       struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
692       for (int i = 0; i < materials_len; i++) {
693         Material *mat;
694         Image *image;
695         ImageUser *iuser;
696         int interp;
697         workbench_material_get_image_and_mat(ob, i + 1, &image, &iuser, &interp, &mat);
698         int color_type = workbench_material_determine_color_type(wpd, image, ob, use_sculpt_pbvh);
699         material = workbench_forward_get_or_create_material_data(
700             vedata, ob, mat, image, iuser, color_type, interp);
701         DRW_shgroup_call(material->shgrp_object_outline, geom_array[i], ob);
702         DRW_shgroup_call(material->shgrp, geom_array[i], ob);
703       }
704     }
705     else if (ELEM(wpd->shading.color_type,
706                   V3D_SHADING_SINGLE_COLOR,
707                   V3D_SHADING_OBJECT_COLOR,
708                   V3D_SHADING_RANDOM_COLOR,
709                   V3D_SHADING_VERTEX_COLOR)) {
710       /* No material split needed */
711       int color_type = workbench_material_determine_color_type(wpd, NULL, ob, use_sculpt_pbvh);
712
713       if (use_sculpt_pbvh) {
714         material = workbench_forward_get_or_create_material_data(
715             vedata, ob, NULL, NULL, NULL, color_type, 0);
716         bool use_vcol = (color_type == V3D_SHADING_VERTEX_COLOR);
717         /* TODO(fclem) make this call optional */
718         DRW_shgroup_call_sculpt(material->shgrp_object_outline, ob, false, false, false);
719         if (!is_wire) {
720           DRW_shgroup_call_sculpt(material->shgrp, ob, false, false, use_vcol);
721         }
722       }
723       else {
724         struct GPUBatch *geom = (color_type == V3D_SHADING_VERTEX_COLOR) ?
725                                     DRW_cache_mesh_surface_vertpaint_get(ob) :
726                                     DRW_cache_object_surface_get(ob);
727         if (geom) {
728           material = workbench_forward_get_or_create_material_data(
729               vedata, ob, NULL, NULL, NULL, color_type, 0);
730           /* TODO(fclem) make this call optional */
731           DRW_shgroup_call(material->shgrp_object_outline, geom, ob);
732           if (!is_wire) {
733             DRW_shgroup_call(material->shgrp, geom, ob);
734           }
735         }
736       }
737     }
738     else {
739       /* Draw material color */
740       if (use_sculpt_pbvh) {
741         struct DRWShadingGroup **shgrps = BLI_array_alloca(shgrps, materials_len);
742
743         for (int i = 0; i < materials_len; i++) {
744           struct Material *mat = give_current_material(ob, i + 1);
745           material = workbench_forward_get_or_create_material_data(
746               vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
747           shgrps[i] = material->shgrp;
748         }
749         /* TODO(fclem) make this call optional */
750         DRW_shgroup_call_sculpt(material->shgrp_object_outline, ob, false, false, false);
751         if (!is_wire) {
752           DRW_shgroup_call_sculpt_with_materials(shgrps, ob, false);
753         }
754       }
755       else {
756         struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
757         memset(gpumat_array, 0, sizeof(*gpumat_array) * materials_len);
758
759         struct GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
760             ob, gpumat_array, materials_len);
761         if (mat_geom) {
762           for (int i = 0; i < materials_len; i++) {
763             if (mat_geom[i] == NULL) {
764               continue;
765             }
766
767             Material *mat = give_current_material(ob, i + 1);
768             material = workbench_forward_get_or_create_material_data(
769                 vedata, ob, mat, NULL, NULL, V3D_SHADING_MATERIAL_COLOR, 0);
770             /* TODO(fclem) make this call optional */
771             DRW_shgroup_call(material->shgrp_object_outline, mat_geom[i], ob);
772             if (!is_wire) {
773               DRW_shgroup_call(material->shgrp, mat_geom[i], ob);
774             }
775           }
776         }
777       }
778     }
779   }
780 }
781
782 void workbench_forward_cache_finish(WORKBENCH_Data *UNUSED(vedata))
783 {
784 }
785
786 void workbench_forward_draw_background(WORKBENCH_Data *UNUSED(vedata))
787 {
788   const float clear_depth = 1.0f;
789   DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
790   DRW_stats_group_start("Clear depth");
791   GPU_framebuffer_bind(dfbl->default_fb);
792   GPU_framebuffer_clear_depth_stencil(dfbl->default_fb, clear_depth, 0xFF);
793   DRW_stats_group_end();
794 }
795
796 void workbench_forward_draw_scene(WORKBENCH_Data *vedata)
797 {
798   WORKBENCH_PassList *psl = vedata->psl;
799   WORKBENCH_StorageList *stl = vedata->stl;
800   WORKBENCH_FramebufferList *fbl = vedata->fbl;
801   WORKBENCH_PrivateData *wpd = stl->g_data;
802   DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
803
804   if (workbench_is_taa_enabled(wpd)) {
805     workbench_taa_draw_scene_start(vedata);
806   }
807
808   /* Write Depth + Object ID */
809   const float clear_outline[4] = {0.0f};
810   GPU_framebuffer_bind(fbl->object_outline_fb);
811   GPU_framebuffer_clear_color(fbl->object_outline_fb, clear_outline);
812   DRW_draw_pass(psl->object_outline_pass);
813
814   if (XRAY_ALPHA(wpd) > 0.0) {
815     const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
816     GPU_framebuffer_bind(fbl->transparent_accum_fb);
817     GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
818     DRW_draw_pass(psl->transparent_accum_pass);
819   }
820   else {
821     /* TODO(fclem): this is unnecessary and takes up perf.
822      * Better change the composite frag shader to not use the tx. */
823     const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
824     GPU_framebuffer_bind(fbl->transparent_accum_fb);
825     GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
826   }
827
828   /* Composite */
829   GPU_framebuffer_bind(fbl->composite_fb);
830   DRW_draw_pass(psl->composite_pass);
831   DRW_draw_pass(psl->volume_pass);
832
833   /* Only when clipping is enabled. */
834   if (psl->background_pass) {
835     DRW_draw_pass(psl->background_pass);
836   }
837
838   /* Color correct and Anti aliasing */
839   workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
840
841   /* Apply checker pattern */
842   GPU_framebuffer_bind(dfbl->depth_only_fb);
843   DRW_draw_pass(psl->checker_depth_pass);
844 }
845
846 void workbench_forward_draw_finish(WORKBENCH_Data *vedata)
847 {
848   WORKBENCH_StorageList *stl = vedata->stl;
849   WORKBENCH_PrivateData *wpd = stl->g_data;
850
851   workbench_private_data_free(wpd);
852   workbench_volume_smoke_textures_free(wpd);
853 }