Mesh Batch Cache: Port Texture paint batches to new batch request
[blender.git] / source / blender / draw / engines / workbench / workbench_forward.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file workbench_forward.c
23  *  \ingroup draw_engine
24  */
25
26 #include "workbench_private.h"
27
28 #include "BIF_gl.h"
29
30 #include "BLI_alloca.h"
31 #include "BLI_dynstr.h"
32 #include "BLI_utildefines.h"
33
34 #include "BKE_node.h"
35 #include "BKE_particle.h"
36 #include "BKE_modifier.h"
37
38 #include "DNA_image_types.h"
39 #include "DNA_mesh_types.h"
40 #include "DNA_modifier_types.h"
41 #include "DNA_node_types.h"
42
43 #include "ED_uvedit.h"
44
45 #include "GPU_shader.h"
46 #include "GPU_texture.h"
47
48 #include "UI_resources.h"
49
50 /* *********** STATIC *********** */
51 static struct {
52         struct GPUShader *composite_sh_cache[2];
53         struct GPUShader *transparent_accum_sh_cache[MAX_ACCUM_SHADERS];
54         struct GPUShader *object_outline_sh;
55         struct GPUShader *object_outline_texture_sh;
56         struct GPUShader *object_outline_hair_sh;
57         struct GPUShader *checker_depth_sh;
58
59         struct GPUTexture *object_id_tx; /* ref only, not alloced */
60         struct GPUTexture *transparent_accum_tx; /* ref only, not alloced */
61         struct GPUTexture *transparent_revealage_tx; /* ref only, not alloced */
62         struct GPUTexture *composite_buffer_tx; /* ref only, not alloced */
63
64         int next_object_id;
65 } e_data = {{NULL}};
66
67 /* Shaders */
68 extern char datatoc_common_hair_lib_glsl[];
69
70 extern char datatoc_workbench_forward_composite_frag_glsl[];
71 extern char datatoc_workbench_forward_depth_frag_glsl[];
72 extern char datatoc_workbench_forward_transparent_accum_frag_glsl[];
73 extern char datatoc_workbench_data_lib_glsl[];
74 extern char datatoc_workbench_background_lib_glsl[];
75 extern char datatoc_workbench_checkerboard_depth_frag_glsl[];
76 extern char datatoc_workbench_object_outline_lib_glsl[];
77 extern char datatoc_workbench_curvature_lib_glsl[];
78 extern char datatoc_workbench_prepass_vert_glsl[];
79 extern char datatoc_workbench_common_lib_glsl[];
80 extern char datatoc_workbench_world_light_lib_glsl[];
81
82 /* static functions */
83 static char *workbench_build_forward_vert(bool is_hair)
84 {
85         char *str = NULL;
86         if (!is_hair) {
87                 return BLI_strdup(datatoc_workbench_prepass_vert_glsl);
88         }
89
90         DynStr *ds = BLI_dynstr_new();
91
92         BLI_dynstr_append(ds, datatoc_common_hair_lib_glsl);
93         BLI_dynstr_append(ds, datatoc_workbench_prepass_vert_glsl);
94
95         str = BLI_dynstr_get_cstring(ds);
96         BLI_dynstr_free(ds);
97         return str;
98 }
99
100 static char *workbench_build_forward_transparent_accum_frag(void)
101 {
102         char *str = NULL;
103
104         DynStr *ds = BLI_dynstr_new();
105
106         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
107         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
108         BLI_dynstr_append(ds, datatoc_workbench_world_light_lib_glsl);
109         BLI_dynstr_append(ds, datatoc_workbench_forward_transparent_accum_frag_glsl);
110
111         str = BLI_dynstr_get_cstring(ds);
112         BLI_dynstr_free(ds);
113         return str;
114 }
115
116 static char *workbench_build_forward_composite_frag(void)
117 {
118         char *str = NULL;
119
120         DynStr *ds = BLI_dynstr_new();
121
122         BLI_dynstr_append(ds, datatoc_workbench_data_lib_glsl);
123         BLI_dynstr_append(ds, datatoc_workbench_common_lib_glsl);
124         BLI_dynstr_append(ds, datatoc_workbench_background_lib_glsl);
125         BLI_dynstr_append(ds, datatoc_workbench_object_outline_lib_glsl);
126         BLI_dynstr_append(ds, datatoc_workbench_curvature_lib_glsl);
127         BLI_dynstr_append(ds, datatoc_workbench_forward_composite_frag_glsl);
128
129         str = BLI_dynstr_get_cstring(ds);
130         BLI_dynstr_free(ds);
131         return str;
132 }
133
134 static void workbench_init_object_data(DrawData *dd)
135 {
136         WORKBENCH_ObjectData *data = (WORKBENCH_ObjectData *)dd;
137         data->object_id = ((e_data.next_object_id++) & 0xff) + 1;
138 }
139
140 static WORKBENCH_MaterialData *get_or_create_material_data(
141         WORKBENCH_Data *vedata, Object *ob, Material *mat, Image *ima, int color_type)
142 {
143         WORKBENCH_StorageList *stl = vedata->stl;
144         WORKBENCH_PassList *psl = vedata->psl;
145         WORKBENCH_PrivateData *wpd = stl->g_data;
146         WORKBENCH_MaterialData *material;
147         WORKBENCH_ObjectData *engine_object_data = (WORKBENCH_ObjectData *)DRW_drawdata_ensure(
148                 &ob->id, &draw_engine_workbench_solid, sizeof(WORKBENCH_ObjectData), &workbench_init_object_data, NULL);
149         WORKBENCH_MaterialData material_template;
150         DRWShadingGroup *grp;
151
152         /* Solid */
153         workbench_material_update_data(wpd, ob, mat, &material_template);
154         material_template.object_id = OBJECT_ID_PASS_ENABLED(wpd) ? engine_object_data->object_id : 1;
155         material_template.color_type = color_type;
156         material_template.ima = ima;
157         uint hash = workbench_material_get_hash(&material_template, false);
158
159         material = BLI_ghash_lookup(wpd->material_hash, POINTER_FROM_UINT(hash));
160         if (material == NULL) {
161                 material = MEM_mallocN(sizeof(WORKBENCH_MaterialData), __func__);
162
163                 /* transparent accum */
164                 grp = DRW_shgroup_create(
165                         color_type == V3D_SHADING_TEXTURE_COLOR ? wpd->transparent_accum_texture_sh: wpd->transparent_accum_sh,
166                         psl->transparent_accum_pass);
167                 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
168                 DRW_shgroup_uniform_float(grp, "alpha", &wpd->shading.xray_alpha, 1);
169                 DRW_shgroup_uniform_vec4(grp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
170                 workbench_material_copy(material, &material_template);
171                 if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
172                         BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
173                         DRW_shgroup_uniform_texture(grp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture );
174                 }
175                 if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || MATCAP_ENABLED(wpd)) {
176                         DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
177                 }
178
179                 workbench_material_shgroup_uniform(wpd, grp, material, ob, false, false);
180                 material->shgrp = grp;
181
182                 /* Depth */
183                 if (workbench_material_determine_color_type(wpd, material->ima, ob) == V3D_SHADING_TEXTURE_COLOR) {
184                         material->shgrp_object_outline = DRW_shgroup_create(
185                                 e_data.object_outline_texture_sh, psl->object_outline_pass);
186                         GPUTexture *tex = GPU_texture_from_blender(material->ima, NULL, GL_TEXTURE_2D, false, 0.0f);
187                         DRW_shgroup_uniform_texture(material->shgrp_object_outline, "image", tex);
188                 }
189                 else {
190                         material->shgrp_object_outline = DRW_shgroup_create(
191                                 e_data.object_outline_sh, psl->object_outline_pass);
192                 }
193                 material->object_id = engine_object_data->object_id;
194                 DRW_shgroup_uniform_int(material->shgrp_object_outline, "object_id", &material->object_id, 1);
195                 BLI_ghash_insert(wpd->material_hash, POINTER_FROM_UINT(hash), material);
196         }
197         return material;
198 }
199
200 static GPUShader *ensure_forward_accum_shaders(WORKBENCH_PrivateData *wpd, bool use_textures, bool is_hair)
201 {
202         int index = workbench_material_get_accum_shader_index(wpd, use_textures, is_hair);
203         if (e_data.transparent_accum_sh_cache[index] == NULL) {
204                 char *defines = workbench_material_build_defines(wpd, use_textures, is_hair);
205                 char *transparent_accum_vert = workbench_build_forward_vert(is_hair);
206                 char *transparent_accum_frag = workbench_build_forward_transparent_accum_frag();
207                 e_data.transparent_accum_sh_cache[index] = DRW_shader_create(
208                         transparent_accum_vert, NULL,
209                         transparent_accum_frag, defines);
210                 MEM_freeN(transparent_accum_vert);
211                 MEM_freeN(transparent_accum_frag);
212                 MEM_freeN(defines);
213         }
214         return e_data.transparent_accum_sh_cache[index];
215 }
216
217 static GPUShader *ensure_forward_composite_shaders(WORKBENCH_PrivateData *wpd)
218 {
219         int index = OBJECT_OUTLINE_ENABLED(wpd) ? 1 : 0;
220         if (e_data.composite_sh_cache[index] == NULL) {
221                 char *defines = workbench_material_build_defines(wpd, false, false);
222                 char *composite_frag = workbench_build_forward_composite_frag();
223                 e_data.composite_sh_cache[index] = DRW_shader_create_fullscreen(composite_frag, defines);
224                 MEM_freeN(composite_frag);
225                 MEM_freeN(defines);
226         }
227         return e_data.composite_sh_cache[index];
228 }
229
230 static void select_forward_shaders(WORKBENCH_PrivateData *wpd)
231 {
232         wpd->composite_sh = ensure_forward_composite_shaders(wpd);
233         wpd->transparent_accum_sh = ensure_forward_accum_shaders(wpd, false, false);
234         wpd->transparent_accum_hair_sh = ensure_forward_accum_shaders(wpd, false, true);
235         wpd->transparent_accum_texture_sh = ensure_forward_accum_shaders(wpd, true, false);
236         wpd->transparent_accum_texture_hair_sh = ensure_forward_accum_shaders(wpd, true, true);
237 }
238
239 /* public functions */
240 void workbench_forward_engine_init(WORKBENCH_Data *vedata)
241 {
242         WORKBENCH_FramebufferList *fbl = vedata->fbl;
243         WORKBENCH_PassList *psl = vedata->psl;
244         WORKBENCH_StorageList *stl = vedata->stl;
245         DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
246         const DRWContextState *draw_ctx = DRW_context_state_get();
247         DRWShadingGroup *grp;
248
249         if (!stl->g_data) {
250                 /* Alloc transient pointers */
251                 stl->g_data = MEM_callocN(sizeof(*stl->g_data), __func__);
252         }
253         if (!stl->effects) {
254                 stl->effects = MEM_callocN(sizeof(*stl->effects), __func__);
255                 workbench_effect_info_init(stl->effects);
256         }
257         WORKBENCH_PrivateData *wpd = stl->g_data;
258         workbench_private_data_init(wpd);
259         float light_direction[3];
260         workbench_private_data_get_light_direction(wpd, light_direction);
261
262         if (!e_data.next_object_id) {
263                 e_data.next_object_id = 1;
264                 memset(e_data.composite_sh_cache, 0x00, sizeof(e_data.composite_sh_cache));
265                 memset(e_data.transparent_accum_sh_cache, 0x00, sizeof(e_data.transparent_accum_sh_cache));
266
267                 char *defines = workbench_material_build_defines(wpd, false, false);
268                 char *defines_texture = workbench_material_build_defines(wpd, true, false);
269                 char *defines_hair = workbench_material_build_defines(wpd, false, true);
270                 char *forward_vert = workbench_build_forward_vert(false);
271                 char *forward_hair_vert = workbench_build_forward_vert(true);
272                 e_data.object_outline_sh = DRW_shader_create(
273                         forward_vert, NULL,
274                         datatoc_workbench_forward_depth_frag_glsl, defines);
275                 e_data.object_outline_texture_sh = DRW_shader_create(
276                         forward_vert, NULL,
277                         datatoc_workbench_forward_depth_frag_glsl, defines_texture);
278                 e_data.object_outline_hair_sh = DRW_shader_create(
279                         forward_hair_vert, NULL,
280                         datatoc_workbench_forward_depth_frag_glsl, defines_hair);
281
282
283                 e_data.checker_depth_sh = DRW_shader_create_fullscreen(
284                         datatoc_workbench_checkerboard_depth_frag_glsl, NULL);
285                 MEM_freeN(forward_hair_vert);
286                 MEM_freeN(forward_vert);
287                 MEM_freeN(defines);
288                 MEM_freeN(defines_texture);
289                 MEM_freeN(defines_hair);
290         }
291         workbench_volume_engine_init();
292         workbench_fxaa_engine_init();
293         workbench_taa_engine_init(vedata);
294
295         select_forward_shaders(wpd);
296
297         const float *viewport_size = DRW_viewport_size_get();
298         const int size[2] = {(int)viewport_size[0], (int)viewport_size[1]};
299
300         e_data.object_id_tx = DRW_texture_pool_query_2D(
301                 size[0], size[1], GPU_R32UI, &draw_engine_workbench_transparent);
302         e_data.transparent_accum_tx = DRW_texture_pool_query_2D(
303                 size[0], size[1], GPU_RGBA16F, &draw_engine_workbench_transparent);
304         e_data.transparent_revealage_tx = DRW_texture_pool_query_2D(
305                 size[0], size[1], GPU_R16F, &draw_engine_workbench_transparent);
306         e_data.composite_buffer_tx = DRW_texture_pool_query_2D(
307                 size[0], size[1], GPU_R11F_G11F_B10F, &draw_engine_workbench_transparent);
308
309         GPU_framebuffer_ensure_config(&fbl->object_outline_fb, {
310                 GPU_ATTACHMENT_TEXTURE(dtxl->depth),
311                 GPU_ATTACHMENT_TEXTURE(e_data.object_id_tx),
312         });
313         GPU_framebuffer_ensure_config(&fbl->transparent_accum_fb, {
314                 GPU_ATTACHMENT_NONE,
315                 GPU_ATTACHMENT_TEXTURE(e_data.transparent_accum_tx),
316                 GPU_ATTACHMENT_TEXTURE(e_data.transparent_revealage_tx),
317         });
318         GPU_framebuffer_ensure_config(&fbl->composite_fb, {
319                 GPU_ATTACHMENT_NONE,
320                 GPU_ATTACHMENT_TEXTURE(e_data.composite_buffer_tx),
321         });
322         GPU_framebuffer_ensure_config(&fbl->effect_fb, {
323                 GPU_ATTACHMENT_NONE,
324                 GPU_ATTACHMENT_TEXTURE(e_data.transparent_accum_tx),
325         });
326
327         workbench_volume_cache_init(vedata);
328         const bool do_cull = (draw_ctx->v3d && (draw_ctx->v3d->flag2 & V3D_BACKFACE_CULLING));
329         const int cull_state = (do_cull) ? DRW_STATE_CULL_BACK : 0;
330
331         /* Transparency Accum */
332         {
333                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_OIT | cull_state;
334                 psl->transparent_accum_pass = DRW_pass_create("Transparent Accum", state);
335         }
336         /* Depth */
337         {
338                 int state = DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS | cull_state;
339                 psl->object_outline_pass = DRW_pass_create("Object Outline Pass", state);
340         }
341         /* Composite */
342         {
343                 int state = DRW_STATE_WRITE_COLOR;
344                 psl->composite_pass = DRW_pass_create("Composite", state);
345
346                 grp = DRW_shgroup_create(wpd->composite_sh, psl->composite_pass);
347                 if (OBJECT_ID_PASS_ENABLED(wpd)) {
348                         DRW_shgroup_uniform_texture_ref(grp, "objectId", &e_data.object_id_tx);
349                 }
350                 DRW_shgroup_uniform_texture_ref(grp, "transparentAccum", &e_data.transparent_accum_tx);
351                 DRW_shgroup_uniform_texture_ref(grp, "transparentRevealage", &e_data.transparent_revealage_tx);
352                 DRW_shgroup_uniform_block(grp, "world_block", wpd->world_ubo);
353                 DRW_shgroup_uniform_vec2(grp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
354                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
355         }
356
357         {
358                 workbench_aa_create_pass(vedata, &e_data.transparent_accum_tx);
359         }
360
361         /* Checker Depth */
362         {
363                 static float noise_offset = 0.0f;
364                 float blend_threshold = 0.0f;
365
366                 if (DRW_state_is_image_render()) {
367                         /* TODO: Should be based on the number of samples used for render. */
368                         noise_offset = fmodf(noise_offset + 1.0f / 8.0f, 1.0f);
369                 }
370
371                 if (wpd->shading.flag & XRAY_FLAG(wpd)) {
372                         blend_threshold = 1.0f - XRAY_ALPHA(wpd) * 0.9f;
373                 }
374
375                 if (wpd->shading.type == OB_WIRE) {
376                         wpd->shading.xray_alpha = 0.0f;
377                         wpd->shading.xray_alpha_wire = 0.0f;
378                 }
379
380                 int state = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS;
381                 psl->checker_depth_pass = DRW_pass_create("Checker Depth", state);
382                 grp = DRW_shgroup_create(e_data.checker_depth_sh, psl->checker_depth_pass);
383                 DRW_shgroup_call_add(grp, DRW_cache_fullscreen_quad_get(), NULL);
384                 DRW_shgroup_uniform_float_copy(grp, "threshold", blend_threshold);
385                 DRW_shgroup_uniform_float_copy(grp, "offset", noise_offset);
386         }
387 }
388
389 void workbench_forward_engine_free()
390 {
391         for (int index = 0; index < 2; index++) {
392                 DRW_SHADER_FREE_SAFE(e_data.composite_sh_cache[index]);
393         }
394         for (int index = 0; index < MAX_ACCUM_SHADERS; index++) {
395                 DRW_SHADER_FREE_SAFE(e_data.transparent_accum_sh_cache[index]);
396         }
397         DRW_SHADER_FREE_SAFE(e_data.object_outline_sh);
398         DRW_SHADER_FREE_SAFE(e_data.object_outline_texture_sh);
399         DRW_SHADER_FREE_SAFE(e_data.object_outline_hair_sh);
400         DRW_SHADER_FREE_SAFE(e_data.checker_depth_sh);
401
402         workbench_volume_engine_free();
403         workbench_fxaa_engine_free();
404         workbench_taa_engine_free();
405 }
406
407 void workbench_forward_cache_init(WORKBENCH_Data *UNUSED(vedata))
408 {
409 }
410
411 static void workbench_forward_cache_populate_particles(WORKBENCH_Data *vedata, Object *ob)
412 {
413         WORKBENCH_StorageList *stl = vedata->stl;
414         WORKBENCH_PassList *psl = vedata->psl;
415         WORKBENCH_PrivateData *wpd = stl->g_data;
416
417         for (ModifierData *md = ob->modifiers.first; md; md = md->next) {
418                 if (md->type != eModifierType_ParticleSystem) {
419                         continue;
420                 }
421                 ParticleSystem *psys = ((ParticleSystemModifierData *)md)->psys;
422                 if (!psys_check_enabled(ob, psys, false)) {
423                         continue;
424                 }
425                 if (!DRW_object_is_visible_psys_in_active_context(ob, psys)) {
426                         continue;
427                 }
428                 ParticleSettings *part = psys->part;
429                 const int draw_as = (part->draw_as == PART_DRAW_REND) ? part->ren_as : part->draw_as;
430
431                 if (draw_as == PART_DRAW_PATH) {
432                         Image *image = NULL;
433                         Material *mat = give_current_material(ob, part->omat);
434                         ED_object_get_active_image(ob, part->omat, &image, NULL, NULL, NULL);
435                         int color_type = workbench_material_determine_color_type(wpd, image, ob);
436                         WORKBENCH_MaterialData *material = get_or_create_material_data(vedata, ob, mat, image, color_type);
437
438                         struct GPUShader *shader = (color_type != V3D_SHADING_TEXTURE_COLOR)
439                                                    ? wpd->transparent_accum_hair_sh
440                                                    : wpd->transparent_accum_texture_hair_sh;
441                         DRWShadingGroup *shgrp = DRW_shgroup_hair_create(
442                                                         ob, psys, md,
443                                                         psl->transparent_accum_pass,
444                                                         shader);
445                         DRW_shgroup_uniform_block(shgrp, "world_block", wpd->world_ubo);
446                         workbench_material_shgroup_uniform(wpd, shgrp, material, ob, false, false);
447                         DRW_shgroup_uniform_vec4(shgrp, "viewvecs[0]", (float *)wpd->viewvecs, 3);
448                         /* Hairs have lots of layer and can rapidly become the most prominent surface.
449                          * So lower their alpha artificially. */
450                         float hair_alpha = XRAY_ALPHA(wpd) * 0.33f;
451                         DRW_shgroup_uniform_float_copy(shgrp, "alpha", hair_alpha);
452                         if (STUDIOLIGHT_TYPE_MATCAP_ENABLED(wpd)) {
453                                 BKE_studiolight_ensure_flag(wpd->studio_light, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
454                                 DRW_shgroup_uniform_texture(shgrp, "matcapImage", wpd->studio_light->equirect_radiance_gputexture );
455                         }
456                         if (SPECULAR_HIGHLIGHT_ENABLED(wpd) || MATCAP_ENABLED(wpd)) {
457                                 DRW_shgroup_uniform_vec2(shgrp, "invertedViewportSize", DRW_viewport_invert_size_get(), 1);
458                         }
459                         shgrp = DRW_shgroup_hair_create(ob, psys, md,
460                                                 vedata->psl->object_outline_pass,
461                                                 e_data.object_outline_hair_sh);
462                         DRW_shgroup_uniform_int(shgrp, "object_id", &material->object_id, 1);
463                 }
464         }
465 }
466
467 void workbench_forward_cache_populate(WORKBENCH_Data *vedata, Object *ob)
468 {
469         WORKBENCH_StorageList *stl = vedata->stl;
470         WORKBENCH_PrivateData *wpd = stl->g_data;
471         const DRWContextState *draw_ctx = DRW_context_state_get();
472         Scene *scene = draw_ctx->scene;
473         const bool is_wire = (ob->dt == OB_WIRE);
474
475         if (!DRW_object_is_renderable(ob))
476                 return;
477
478         if (ob->type == OB_MESH) {
479                 workbench_forward_cache_populate_particles(vedata, ob);
480         }
481
482         ModifierData *md;
483         if (((ob->base_flag & BASE_FROMDUPLI) == 0) &&
484             (md = modifiers_findByType(ob, eModifierType_Smoke)) &&
485             (modifier_isEnabled(scene, md, eModifierMode_Realtime)) &&
486             (((SmokeModifierData *)md)->domain != NULL))
487         {
488                 workbench_volume_cache_populate(vedata, scene, ob, md);
489                 return; /* Do not draw solid in this case. */
490         }
491
492         if (!DRW_object_is_visible_in_active_context(ob) || (ob->dt < OB_WIRE)) {
493                 return;
494         }
495
496         WORKBENCH_MaterialData *material;
497         if (ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
498                 const bool is_active = (ob == draw_ctx->obact);
499                 const bool is_sculpt_mode = is_active && (draw_ctx->object_mode & OB_MODE_SCULPT) != 0;
500                 bool is_drawn = false;
501
502                 if (!is_sculpt_mode && TEXTURE_DRAWING_ENABLED(wpd) && ELEM(ob->type, OB_MESH)) {
503                         const Mesh *me = ob->data;
504                         if (me->mloopuv) {
505                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
506                                 struct GPUBatch **geom_array = DRW_cache_mesh_surface_texpaint_get(ob);
507                                 for (int i = 0; i < materials_len; i++) {
508                                         Material *mat = give_current_material(ob, i + 1);
509                                         Image *image;
510                                         ED_object_get_active_image(ob, i + 1, &image, NULL, NULL, NULL);
511                                         /* use OB_SOLID when no texture could be determined */
512
513                                         int color_type = wpd->shading.color_type;
514                                         if (color_type == V3D_SHADING_TEXTURE_COLOR) {
515                                                 /* use OB_SOLID when no texture could be determined */
516                                                 if (image == NULL) {
517                                                         color_type = V3D_SHADING_MATERIAL_COLOR;
518                                                 }
519                                         }
520
521                                         material = get_or_create_material_data(vedata, ob, mat, image, color_type);
522                                         DRW_shgroup_call_object_add(material->shgrp_object_outline, geom_array[i], ob);
523                                         DRW_shgroup_call_object_add(material->shgrp, geom_array[i], ob);
524                                 }
525                                 is_drawn = true;
526                         }
527                 }
528
529                 /* Fallback from not drawn OB_TEXTURE mode or just OB_SOLID mode */
530                 if (!is_drawn) {
531                         if (ELEM(wpd->shading.color_type, V3D_SHADING_SINGLE_COLOR, V3D_SHADING_RANDOM_COLOR)) {
532                                 /* No material split needed */
533                                 struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
534                                 if (geom) {
535                                         material = get_or_create_material_data(vedata, ob, NULL, NULL, wpd->shading.color_type);
536                                         if (is_sculpt_mode) {
537                                                 DRW_shgroup_call_sculpt_add(material->shgrp_object_outline, ob, ob->obmat);
538                                                 if (!is_wire) {
539                                                         DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
540                                                 }
541                                         }
542                                         else {
543                                                 DRW_shgroup_call_object_add(material->shgrp_object_outline, geom, ob);
544                                                 if (!is_wire) {
545                                                         DRW_shgroup_call_object_add(material->shgrp, geom, ob);
546                                                 }
547                                         }
548                                 }
549                         }
550                         else {
551                                 const int materials_len = MAX2(1, (is_sculpt_mode ? 1 : ob->totcol));
552                                 struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
553                                 for (int i = 0; i < materials_len; i++) {
554                                         gpumat_array[i] = NULL;
555                                 }
556
557                                 struct GPUBatch **mat_geom = DRW_cache_object_surface_material_get(
558                                         ob, gpumat_array, materials_len, NULL, NULL, NULL);
559                                 if (mat_geom) {
560                                         for (int i = 0; i < materials_len; ++i) {
561                                                 if (mat_geom[i] == NULL) {
562                                                         continue;
563                                                 }
564
565                                                 Material *mat = give_current_material(ob, i + 1);
566                                                 material = get_or_create_material_data(vedata, ob, mat, NULL, V3D_SHADING_MATERIAL_COLOR);
567                                                 if (is_sculpt_mode) {
568                                                         DRW_shgroup_call_sculpt_add(material->shgrp_object_outline, ob, ob->obmat);
569                                                         if (!is_wire) {
570                                                                 DRW_shgroup_call_sculpt_add(material->shgrp, ob, ob->obmat);
571                                                         }
572                                                 }
573                                                 else {
574                                                         DRW_shgroup_call_object_add(material->shgrp_object_outline, mat_geom[i], ob);
575                                                         if (!is_wire) {
576                                                                 DRW_shgroup_call_object_add(material->shgrp, mat_geom[i], ob);
577                                                         }
578                                                 }
579                                         }
580                                 }
581                         }
582                 }
583         }
584 }
585
586 void workbench_forward_cache_finish(WORKBENCH_Data *UNUSED(vedata))
587 {
588 }
589
590 void workbench_forward_draw_background(WORKBENCH_Data *UNUSED(vedata))
591 {
592         const float clear_depth = 1.0f;
593         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
594         DRW_stats_group_start("Clear depth");
595         GPU_framebuffer_bind(dfbl->default_fb);
596         GPU_framebuffer_clear_depth(dfbl->default_fb, clear_depth);
597         DRW_stats_group_end();
598 }
599
600 void workbench_forward_draw_scene(WORKBENCH_Data *vedata)
601 {
602         WORKBENCH_PassList *psl = vedata->psl;
603         WORKBENCH_StorageList *stl = vedata->stl;
604         WORKBENCH_FramebufferList *fbl = vedata->fbl;
605         WORKBENCH_PrivateData *wpd = stl->g_data;
606         DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
607
608         if (TAA_ENABLED(wpd)) {
609                 workbench_taa_draw_scene_start(vedata);
610         }
611
612         /* Write Depth + Object ID */
613         const float clear_outline[4] = {0.0f};
614         GPU_framebuffer_bind(fbl->object_outline_fb);
615         GPU_framebuffer_clear_color(fbl->object_outline_fb, clear_outline);
616         DRW_draw_pass(psl->object_outline_pass);
617
618         if (XRAY_ALPHA(wpd) > 0.0) {
619                 const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
620                 GPU_framebuffer_bind(fbl->transparent_accum_fb);
621                 GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
622                 DRW_draw_pass(psl->transparent_accum_pass);
623         }
624         else {
625                 /* TODO(fclem): this is unnecessary and takes up perf.
626                  * Better change the composite frag shader to not use the tx. */
627                 const float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
628                 GPU_framebuffer_bind(fbl->transparent_accum_fb);
629                 GPU_framebuffer_clear_color(fbl->transparent_accum_fb, clear_color);
630         }
631
632         /* Composite */
633         GPU_framebuffer_bind(fbl->composite_fb);
634         DRW_draw_pass(psl->composite_pass);
635         DRW_draw_pass(psl->volume_pass);
636
637         /* Color correct and Anti aliasing */
638         workbench_aa_draw_pass(vedata, e_data.composite_buffer_tx);
639
640         /* Apply checker pattern */
641         GPU_framebuffer_bind(dfbl->depth_only_fb);
642         DRW_draw_pass(psl->checker_depth_pass);
643 }
644
645 void workbench_forward_draw_finish(WORKBENCH_Data *vedata)
646 {
647         WORKBENCH_StorageList *stl = vedata->stl;
648         WORKBENCH_PrivateData *wpd = stl->g_data;
649
650         workbench_private_data_free(wpd);
651         workbench_volume_smoke_textures_free(wpd);
652 }