Fix T62178 Eevee: Texture Box mapping not matching Cycles if object is scaled
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * Copyright 2016, Blender Foundation.
17  */
18
19 /** \file
20  * \ingroup draw
21  */
22
23 #include "draw_manager.h"
24
25 #include "BKE_anim.h"
26 #include "BKE_curve.h"
27 #include "BKE_global.h"
28 #include "BKE_mesh.h"
29 #include "BKE_object.h"
30 #include "BKE_paint.h"
31 #include "BKE_pbvh.h"
32
33 #include "DNA_curve_types.h"
34 #include "DNA_mesh_types.h"
35 #include "DNA_meta_types.h"
36
37 #include "BLI_hash.h"
38 #include "BLI_link_utils.h"
39 #include "BLI_mempool.h"
40
41 #include "intern/gpu_codegen.h"
42
43 struct GPUVertFormat *g_pos_format = NULL;
44
45 /* -------------------------------------------------------------------- */
46 /** \name Uniform Buffer Object (DRW_uniformbuffer)
47  * \{ */
48
49 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
50 {
51         return GPU_uniformbuffer_create(size, data, NULL);
52 }
53
54 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
55 {
56         GPU_uniformbuffer_update(ubo, data);
57 }
58
59 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
60 {
61         GPU_uniformbuffer_free(ubo);
62 }
63
64 /** \} */
65
66 /* -------------------------------------------------------------------- */
67 /** \name Uniforms (DRW_shgroup_uniform)
68  * \{ */
69
70 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
71                                             DRWUniformType type, const void *value, int length, int arraysize)
72 {
73         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
74         uni->location = loc;
75         uni->type = type;
76         uni->length = length;
77         uni->arraysize = arraysize;
78
79         switch (type) {
80                 case DRW_UNIFORM_INT_COPY:
81                         uni->ivalue = *((int *)value);
82                         break;
83                 case DRW_UNIFORM_BOOL_COPY:
84                         uni->ivalue = (int)*((bool *)value);
85                         break;
86                 case DRW_UNIFORM_FLOAT_COPY:
87                         uni->fvalue = *((float *)value);
88                         break;
89                 default:
90                         uni->pvalue = value;
91                         break;
92         }
93
94         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
95 }
96
97 static void drw_shgroup_builtin_uniform(
98         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
99 {
100         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
101
102         if (loc != -1) {
103                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
104         }
105 }
106
107 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
108                                   DRWUniformType type, const void *value, int length, int arraysize)
109 {
110         int location;
111         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
112                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
113         }
114         else {
115                 location = GPU_shader_get_uniform(shgroup->shader, name);
116         }
117
118         if (location == -1) {
119                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
120                 // BLI_assert(0);
121                 return;
122         }
123
124         BLI_assert(arraysize > 0 && arraysize <= 16);
125         BLI_assert(length >= 0 && length <= 16);
126
127         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
128
129         /* If location is -2, the uniform has not yet been queried.
130          * We save the name for query just before drawing. */
131         if (location == -2 || DRW_DEBUG_USE_UNIFORM_NAME) {
132                 int ofs = DST.uniform_names.buffer_ofs;
133                 int max_len = DST.uniform_names.buffer_len - ofs;
134                 size_t len = strlen(name) + 1;
135
136                 if (len >= max_len) {
137                         DST.uniform_names.buffer_len += DRW_UNIFORM_BUFFER_NAME_INC;
138                         DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer, DST.uniform_names.buffer_len);
139                 }
140
141                 char *dst = DST.uniform_names.buffer + ofs;
142                 memcpy(dst, name, len); /* Copies NULL terminator. */
143
144                 DST.uniform_names.buffer_ofs += len;
145                 shgroup->uniforms->name_ofs = ofs;
146         }
147 }
148
149 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
150 {
151         BLI_assert(tex != NULL);
152         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
153 }
154
155 /* Same as DRW_shgroup_uniform_texture but is guaranteed to be bound if shader does not change between shgrp. */
156 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
157 {
158         BLI_assert(tex != NULL);
159         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
160 }
161
162 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
163 {
164         BLI_assert(ubo != NULL);
165         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
166 }
167
168 /* Same as DRW_shgroup_uniform_block but is guaranteed to be bound if shader does not change between shgrp. */
169 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
170 {
171         BLI_assert(ubo != NULL);
172         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
173 }
174
175 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
176 {
177         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
178 }
179
180 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
181 {
182         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
183 }
184
185 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
186 {
187         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
188 }
189
190 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
191 {
192         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
193 }
194
195 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
196 {
197         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
198 }
199
200 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
201 {
202         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
203 }
204
205 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
206 {
207         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
208 }
209
210 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
211 {
212         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
213 }
214
215 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
216 {
217         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
218 }
219
220 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
221 {
222         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
223 }
224
225 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
226 {
227         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
228 }
229
230 void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
231 {
232         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
233 }
234
235 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
236 {
237         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
238 }
239
240 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
241 {
242         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
243 }
244
245 /* Stores the int instead of a pointer. */
246 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
247 {
248         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
249 }
250
251 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
252 {
253         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
254 }
255
256 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
257 {
258         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
259 }
260
261
262 /** \} */
263
264 /* -------------------------------------------------------------------- */
265 /** \name Draw Call (DRW_calls)
266  * \{ */
267
268 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
269 {
270         ID *ob_data = (ob) ? ob->data : NULL;
271         float *texcoloc = NULL;
272         float *texcosize = NULL;
273         if (ob_data != NULL) {
274                 switch (GS(ob_data->name)) {
275                         case ID_ME:
276                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
277                                 break;
278                         case ID_CU:
279                         {
280                                 Curve *cu = (Curve *)ob_data;
281                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
282                                         BKE_curve_texspace_calc(cu);
283                                 }
284                                 texcoloc = cu->loc;
285                                 texcosize = cu->size;
286                                 break;
287                         }
288                         case ID_MB:
289                         {
290                                 MetaBall *mb = (MetaBall *)ob_data;
291                                 texcoloc = mb->loc;
292                                 texcosize = mb->size;
293                                 break;
294                         }
295                         default:
296                                 break;
297                 }
298         }
299
300         if ((texcoloc != NULL) && (texcosize != NULL)) {
301                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
302                 invert_v3(r_orcofacs[1]);
303                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
304                 negate_v3(r_orcofacs[0]);
305                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
306         }
307         else {
308                 copy_v3_fl(r_orcofacs[0], 0.0f);
309                 copy_v3_fl(r_orcofacs[1], 1.0f);
310         }
311 }
312
313 static void drw_call_state_update_matflag(DRWCallState *state, DRWShadingGroup *shgroup, Object *ob)
314 {
315         uint16_t new_flags = ((state->matflag ^ shgroup->matflag) & shgroup->matflag);
316
317         /* HACK: Here we set the matflags bit to 1 when computing the value
318          * so that it's not recomputed for other drawcalls.
319          * This is the opposite of what draw_matrices_model_prepare() does. */
320         state->matflag |= shgroup->matflag;
321
322         /* Orco factors: We compute this at creation to not have to save the *ob_data */
323         if ((new_flags & DRW_CALL_ORCOTEXFAC) != 0) {
324                 drw_call_calc_orco(ob, state->orcotexfac);
325         }
326
327         if ((new_flags & DRW_CALL_OBJECTINFO) != 0) {
328                 state->objectinfo[0] = ob ? ob->index : 0;
329                 uint random;
330                 if (DST.dupli_source) {
331                         random = DST.dupli_source->random_id;
332                 }
333                 else {
334                         random = BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
335                 }
336                 state->objectinfo[1] = random * (1.0f / (float)0xFFFFFFFF);
337         }
338 }
339
340 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
341 {
342         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
343         state->flag = 0;
344         state->cache_id = 0;
345         state->visibility_cb = NULL;
346         state->matflag = 0;
347
348         /* Matrices */
349         if (obmat != NULL) {
350                 copy_m4_m4(state->model, obmat);
351
352                 if (is_negative_m4(state->model)) {
353                         state->flag |= DRW_CALL_NEGSCALE;
354                 }
355         }
356         else {
357                 unit_m4(state->model);
358         }
359
360         if (ob != NULL) {
361                 float corner[3];
362                 BoundBox *bbox = BKE_object_boundbox_get(ob);
363                 /* Get BoundSphere center and radius from the BoundBox. */
364                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
365                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
366                 mul_m4_v3(obmat, state->bsphere.center);
367                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
368         }
369         else {
370                 /* Bypass test. */
371                 state->bsphere.radius = -1.0f;
372         }
373
374         drw_call_state_update_matflag(state, shgroup, ob);
375
376         return state;
377 }
378
379 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
380 {
381         if (DST.ob_state == NULL) {
382                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
383         }
384         else {
385                 /* If the DRWCallState is reused, add necessary matrices. */
386                 drw_call_state_update_matflag(DST.ob_state, shgroup, ob);
387         }
388
389         return DST.ob_state;
390 }
391
392 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
393 {
394         BLI_assert(geom != NULL);
395         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
396
397         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
398         call->state = drw_call_state_create(shgroup, obmat, NULL);
399         call->type = DRW_CALL_SINGLE;
400         call->single.geometry = geom;
401 #ifdef USE_GPU_SELECT
402         call->select_id = DST.select_id;
403 #endif
404
405         BLI_LINKS_APPEND(&shgroup->calls, call);
406 }
407
408 void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
409 {
410         BLI_assert(geom != NULL);
411         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
412         BLI_assert(v_count);
413
414         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
415         call->state = drw_call_state_create(shgroup, obmat, NULL);
416         call->type = DRW_CALL_RANGE;
417         call->range.geometry = geom;
418         call->range.start = v_sta;
419         call->range.count = v_count;
420 #ifdef USE_GPU_SELECT
421         call->select_id = DST.select_id;
422 #endif
423
424         BLI_LINKS_APPEND(&shgroup->calls, call);
425 }
426
427 static void drw_shgroup_call_procedural_add_ex(
428         DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
429 {
430         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
431
432         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
433         if (ob) {
434                 call->state = drw_call_state_object(shgroup, ob->obmat, ob);
435         }
436         else {
437                 call->state = drw_call_state_create(shgroup, obmat, NULL);
438         }
439         call->type = DRW_CALL_PROCEDURAL;
440         call->procedural.prim_type = prim_type;
441         call->procedural.vert_count = vert_count;
442 #ifdef USE_GPU_SELECT
443         call->select_id = DST.select_id;
444 #endif
445
446         BLI_LINKS_APPEND(&shgroup->calls, call);
447 }
448
449 void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
450 {
451         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
452 }
453
454 void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
455 {
456         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
457 }
458
459 void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
460 {
461         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
462 }
463
464 /* TODO (fclem): this is a sign that the api is starting to be limiting.
465  * Maybe add special function that general purpose for special cases. */
466 void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
467 {
468         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
469 }
470
471 /* These calls can be culled and are optimized for redraw */
472 void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma, bool bypass_culling)
473 {
474         BLI_assert(geom != NULL);
475         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
476
477         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
478         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
479         call->type = DRW_CALL_SINGLE;
480         call->single.geometry = geom;
481         call->single.ma_index = ma ? ma->index : 0;
482 #ifdef USE_GPU_SELECT
483         call->select_id = DST.select_id;
484 #endif
485
486         /* NOTE this will disable culling for the whole object. */
487         call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
488
489         BLI_LINKS_APPEND(&shgroup->calls, call);
490 }
491
492 void DRW_shgroup_call_object_add_with_callback(
493         DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma,
494         DRWCallVisibilityFn *callback, void *user_data)
495 {
496         BLI_assert(geom != NULL);
497         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
498
499         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
500         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
501         call->state->visibility_cb = callback;
502         call->state->user_data = user_data;
503         call->type = DRW_CALL_SINGLE;
504         call->single.geometry = geom;
505         call->single.ma_index = ma ? ma->index : 0;
506 #ifdef USE_GPU_SELECT
507         call->select_id = DST.select_id;
508 #endif
509
510         BLI_LINKS_APPEND(&shgroup->calls, call);
511 }
512
513 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
514 {
515         BLI_assert(geom != NULL);
516         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
517
518         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
519         call->state = drw_call_state_create(shgroup, obmat, NULL);
520         call->type = DRW_CALL_INSTANCES;
521         call->instances.geometry = geom;
522         call->instances.count = count;
523 #ifdef USE_GPU_SELECT
524         call->select_id = DST.select_id;
525 #endif
526
527         BLI_LINKS_APPEND(&shgroup->calls, call);
528 }
529
530 /* These calls can be culled and are optimized for redraw */
531 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
532 {
533         BLI_assert(geom != NULL);
534         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
535
536         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
537         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
538         call->type = DRW_CALL_INSTANCES;
539         call->instances.geometry = geom;
540         call->instances.count = count;
541 #ifdef USE_GPU_SELECT
542         call->select_id = DST.select_id;
543 #endif
544
545         BLI_LINKS_APPEND(&shgroup->calls, call);
546 }
547
548 void DRW_shgroup_call_generate_add(
549         DRWShadingGroup *shgroup,
550         DRWCallGenerateFn *geometry_fn, void *user_data,
551         float (*obmat)[4])
552 {
553         BLI_assert(geometry_fn != NULL);
554         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
555
556         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
557         call->state = drw_call_state_create(shgroup, obmat, NULL);
558         call->type = DRW_CALL_GENERATE;
559         call->generate.geometry_fn = geometry_fn;
560         call->generate.user_data = user_data;
561 #ifdef USE_GPU_SELECT
562         call->select_id = DST.select_id;
563 #endif
564
565         BLI_LINKS_APPEND(&shgroup->calls, call);
566 }
567
568 static void sculpt_draw_cb(
569         DRWShadingGroup *shgroup,
570         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
571         void *user_data)
572 {
573         Object *ob = user_data;
574
575         /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
576         PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
577
578         const DRWContextState *drwctx = DRW_context_state_get();
579         int fast_mode = 0;
580
581         if (drwctx->evil_C != NULL) {
582                 Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
583                 if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
584                         fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
585                 }
586         }
587
588         if (pbvh) {
589                 BKE_pbvh_draw_cb(
590                         pbvh, NULL, NULL, fast_mode, false, false,
591                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
592         }
593 }
594
595 static void sculpt_draw_wires_cb(
596         DRWShadingGroup *shgroup,
597         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
598         void *user_data)
599 {
600         Object *ob = user_data;
601
602         /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
603         PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
604
605         const DRWContextState *drwctx = DRW_context_state_get();
606         int fast_mode = 0;
607
608         if (drwctx->evil_C != NULL) {
609                 Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
610                 if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
611                         fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
612                 }
613         }
614
615         if (pbvh) {
616                 BKE_pbvh_draw_cb(
617                         pbvh, NULL, NULL, fast_mode, true, false,
618                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
619         }
620 }
621
622 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
623 {
624         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
625 }
626
627 void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
628 {
629         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_wires_cb, ob, obmat);
630 }
631
632 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
633 {
634 #ifdef USE_GPU_SELECT
635         if (G.f & G_FLAG_PICKSEL) {
636                 if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
637                         GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
638                 }
639                 GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
640         }
641 #endif
642
643         BLI_assert(attr_len == shgroup->attrs_count);
644         UNUSED_VARS_NDEBUG(attr_len);
645
646         for (int i = 0; i < attr_len; ++i) {
647                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
648                         GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
649                 }
650                 GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
651         }
652
653         shgroup->instance_count += 1;
654 }
655
656 /** \} */
657
658 /* -------------------------------------------------------------------- */
659 /** \name Shading Groups (DRW_shgroup)
660  * \{ */
661
662 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
663 {
664         shgroup->instance_geom = NULL;
665         shgroup->instance_vbo = NULL;
666         shgroup->instance_count = 0;
667         shgroup->uniforms = NULL;
668 #ifdef USE_GPU_SELECT
669         shgroup->inst_selectid = NULL;
670         shgroup->override_selectid = -1;
671 #endif
672 #ifndef NDEBUG
673         shgroup->attrs_count = 0;
674 #endif
675
676         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
677
678         if (view_ubo_location != -1) {
679                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, G_draw.view_ubo, 0, 1);
680         }
681         else {
682                 /* Only here to support builtin shaders. This should not be used by engines. */
683                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
684                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
685                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
686                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
687                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
688                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
689                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
690         }
691
692         shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
693         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
694         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
695         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
696         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
697         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
698         shgroup->normalviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL_INV);
699         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
700         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
701         shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
702         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
703         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
704
705         shgroup->matflag = 0;
706         if (shgroup->modelinverse > -1) {
707                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
708         }
709         if (shgroup->modelview > -1) {
710                 shgroup->matflag |= DRW_CALL_MODELVIEW;
711         }
712         if (shgroup->modelviewinverse > -1) {
713                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
714         }
715         if (shgroup->modelviewprojection > -1) {
716                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
717         }
718         if (shgroup->normalview > -1) {
719                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
720         }
721         if (shgroup->normalviewinverse > -1) {
722                 shgroup->matflag |= DRW_CALL_NORMALVIEWINVERSE;
723         }
724         if (shgroup->normalworld > -1) {
725                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
726         }
727         if (shgroup->orcotexfac > -1) {
728                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
729         }
730         if (shgroup->objectinfo > -1) {
731                 shgroup->matflag |= DRW_CALL_OBJECTINFO;
732         }
733         if (shgroup->eye > -1) {
734                 shgroup->matflag |= DRW_CALL_EYEVEC;
735         }
736 }
737
738 static void drw_shgroup_instance_init(
739         DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
740 {
741         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
742         BLI_assert(batch != NULL);
743         BLI_assert(format != NULL);
744
745         drw_shgroup_init(shgroup, shader);
746
747         shgroup->instance_geom = batch;
748 #ifndef NDEBUG
749         shgroup->attrs_count = format->attr_len;
750 #endif
751
752         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
753                                       &shgroup->instance_geom, &shgroup->instance_vbo);
754
755 #ifdef USE_GPU_SELECT
756         if (G.f & G_FLAG_PICKSEL) {
757                 /* Not actually used for rendering but alloced in one chunk.
758                  * Plus we don't have to care about ownership. */
759                 static GPUVertFormat inst_select_format = {0};
760                 if (inst_select_format.attr_len == 0) {
761                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
762                 }
763                 GPUBatch *batch_dummy; /* Not used */
764                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
765                                             GPU_PRIM_POINTS, shgroup,
766                                             &batch_dummy, &shgroup->inst_selectid);
767         }
768 #endif
769 }
770
771 static void drw_shgroup_batching_init(
772         DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
773 {
774         drw_shgroup_init(shgroup, shader);
775
776 #ifndef NDEBUG
777         shgroup->attrs_count = (format != NULL) ? format->attr_len : 0;
778 #endif
779         BLI_assert(format != NULL);
780
781         GPUPrimType type;
782         switch (shgroup->type) {
783                 case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
784                 case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
785                 case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
786                 default: type = GPU_PRIM_NONE; BLI_assert(0); break;
787         }
788
789         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
790                                     &shgroup->batch_geom, &shgroup->batch_vbo);
791
792 #ifdef USE_GPU_SELECT
793         if (G.f & G_FLAG_PICKSEL) {
794                 /* Not actually used for rendering but alloced in one chunk. */
795                 static GPUVertFormat inst_select_format = {0};
796                 if (inst_select_format.attr_len == 0) {
797                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
798                 }
799                 GPUBatch *batch; /* Not used */
800                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
801                                             GPU_PRIM_POINTS, shgroup,
802                                             &batch, &shgroup->inst_selectid);
803         }
804 #endif
805 }
806
807 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
808 {
809         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
810
811         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
812
813         shgroup->type = DRW_SHG_NORMAL;
814         shgroup->shader = shader;
815         shgroup->state_extra = 0;
816         shgroup->state_extra_disable = ~0x0;
817         shgroup->stencil_mask = 0;
818         shgroup->calls.first = NULL;
819         shgroup->calls.last = NULL;
820 #if 0 /* All the same in the union! */
821         shgroup->batch_geom = NULL;
822         shgroup->batch_vbo = NULL;
823
824         shgroup->instance_geom = NULL;
825         shgroup->instance_vbo = NULL;
826 #endif
827         shgroup->pass_parent = pass;
828
829         return shgroup;
830 }
831
832 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
833 {
834         if (!gpupass) {
835                 /* Shader compilation error */
836                 return NULL;
837         }
838
839         GPUShader *sh = GPU_pass_shader_get(gpupass);
840
841         if (!sh) {
842                 /* Shader not yet compiled */
843                 return NULL;
844         }
845
846         DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
847         return grp;
848 }
849
850 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
851 {
852         ListBase *inputs = GPU_material_get_inputs(material);
853
854         /* Converting dynamic GPUInput to DRWUniform */
855         for (GPUInput *input = inputs->first; input; input = input->next) {
856                 /* Textures */
857                 if (input->source == GPU_SOURCE_TEX) {
858                         GPUTexture *tex = NULL;
859
860                         if (input->ima) {
861                                 tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata);
862                         }
863                         else {
864                                 /* Color Ramps */
865                                 tex = *input->coba;
866                         }
867
868                         if (input->bindtex) {
869                                 drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
870                         }
871                 }
872         }
873
874         GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
875         if (ubo != NULL) {
876                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
877         }
878
879         return grp;
880 }
881
882 GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], int arraysize)
883 {
884         GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
885
886         for (int i = 0; i < arraysize; ++i) {
887                 GPU_vertformat_attr_add(
888                         format, attrs[i].name,
889                         (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
890                         attrs[i].components,
891                         (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
892         }
893         return format;
894 }
895
896 DRWShadingGroup *DRW_shgroup_material_create(
897         struct GPUMaterial *material, DRWPass *pass)
898 {
899         GPUPass *gpupass = GPU_material_get_pass(material);
900         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
901
902         if (shgroup) {
903                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
904                 drw_shgroup_material_inputs(shgroup, material);
905         }
906
907         return shgroup;
908 }
909
910 DRWShadingGroup *DRW_shgroup_material_instance_create(
911         struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
912 {
913         GPUPass *gpupass = GPU_material_get_pass(material);
914         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
915
916         if (shgroup) {
917                 shgroup->type = DRW_SHG_INSTANCE;
918                 shgroup->instance_geom = geom;
919                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
920                 drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
921                 drw_shgroup_material_inputs(shgroup, material);
922         }
923
924         return shgroup;
925 }
926
927 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
928         struct GPUMaterial *material, DRWPass *pass, int tri_count)
929 {
930 #ifdef USE_GPU_SELECT
931         BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
932 #endif
933         GPUPass *gpupass = GPU_material_get_pass(material);
934         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
935
936         if (shgroup) {
937                 /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
938                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
939                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
940                 shgroup->instance_count = tri_count * 3;
941                 drw_shgroup_material_inputs(shgroup, material);
942         }
943
944         return shgroup;
945 }
946
947 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
948 {
949         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
950         drw_shgroup_init(shgroup, shader);
951         return shgroup;
952 }
953
954 DRWShadingGroup *DRW_shgroup_instance_create(
955         struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
956 {
957         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
958         shgroup->type = DRW_SHG_INSTANCE;
959         shgroup->instance_geom = geom;
960         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
961         drw_shgroup_instance_init(shgroup, shader, geom, format);
962
963         return shgroup;
964 }
965
966 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
967 {
968         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
969
970         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
971         shgroup->type = DRW_SHG_POINT_BATCH;
972
973         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
974
975         return shgroup;
976 }
977
978 DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
979         struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
980 {
981         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
982         shgroup->type = DRW_SHG_LINE_BATCH;
983
984         drw_shgroup_batching_init(shgroup, shader, format);
985
986         return shgroup;
987 }
988
989 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
990 {
991         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
992
993         return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
994 }
995
996 /**
997  * Very special batch. Use this if you position
998  * your vertices with the vertex shader
999  * and dont need any VBO attribute.
1000  */
1001 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
1002 {
1003 #ifdef USE_GPU_SELECT
1004         BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
1005 #endif
1006         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1007
1008         /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
1009         drw_shgroup_init(shgroup, shader);
1010
1011         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
1012         shgroup->instance_count = tri_count * 3;
1013
1014         return shgroup;
1015 }
1016
1017 DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
1018 {
1019         BLI_assert(tf_target != NULL);
1020         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
1021         shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
1022
1023         drw_shgroup_init(shgroup, shader);
1024
1025         shgroup->tfeedback_target = tf_target;
1026
1027         return shgroup;
1028 }
1029
1030 /**
1031  * Specify an external batch instead of adding each attribute one by one.
1032  */
1033 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
1034 {
1035         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
1036         BLI_assert(shgroup->instance_count == 0);
1037         /* You cannot use external instancing batch without a dummy format. */
1038         BLI_assert(shgroup->attrs_count != 0);
1039
1040         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
1041         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
1042         /* PERF : This destroys the vaos cache so better check if it's necessary. */
1043         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
1044          * at the same address. Bindings/VAOs would remain obsolete. */
1045         //if (shgroup->instancing_geom->inst != batch->verts[0])
1046         GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
1047
1048 #ifdef USE_GPU_SELECT
1049         shgroup->override_selectid = DST.select_id;
1050 #endif
1051 }
1052
1053 uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
1054 {
1055         return shgroup->instance_count;
1056 }
1057
1058 /**
1059  * State is added to #Pass.state while drawing.
1060  * Use to temporarily enable draw options.
1061  */
1062 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1063 {
1064         shgroup->state_extra |= state;
1065 }
1066
1067 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1068 {
1069         shgroup->state_extra_disable &= ~state;
1070 }
1071
1072 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
1073 {
1074         BLI_assert(mask <= 255);
1075         shgroup->stencil_mask = mask;
1076 }
1077
1078 bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
1079 {
1080         switch (shgroup->type) {
1081                 case DRW_SHG_NORMAL:
1082                 case DRW_SHG_FEEDBACK_TRANSFORM:
1083                         return shgroup->calls.first == NULL;
1084                 case DRW_SHG_POINT_BATCH:
1085                 case DRW_SHG_LINE_BATCH:
1086                 case DRW_SHG_TRIANGLE_BATCH:
1087                 case DRW_SHG_INSTANCE:
1088                 case DRW_SHG_INSTANCE_EXTERNAL:
1089                         return shgroup->instance_count == 0;
1090         }
1091         BLI_assert(!"Shading Group type not supported");
1092         return true;
1093 }
1094
1095 DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
1096 {
1097         /* Remove this assertion if needed but implement the other cases first! */
1098         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1099
1100         DRWShadingGroup *shgroup_new = BLI_mempool_alloc(DST.vmempool->shgroups);
1101
1102         *shgroup_new = *shgroup;
1103         shgroup_new->uniforms = NULL;
1104         shgroup_new->calls.first = NULL;
1105         shgroup_new->calls.last = NULL;
1106
1107         BLI_LINKS_INSERT_AFTER(&shgroup->pass_parent->shgroups, shgroup, shgroup_new);
1108
1109         return shgroup_new;
1110 }
1111
1112 /** \} */
1113
1114 /* -------------------------------------------------------------------- */
1115 /** \name Passes (DRW_pass)
1116  * \{ */
1117
1118 DRWPass *DRW_pass_create(const char *name, DRWState state)
1119 {
1120         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1121         pass->state = state;
1122         if (((G.debug_value > 20) && (G.debug_value < 30)) ||
1123              (G.debug & G_DEBUG))
1124         {
1125                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1126         }
1127
1128         pass->shgroups.first = NULL;
1129         pass->shgroups.last = NULL;
1130
1131         return pass;
1132 }
1133
1134 bool DRW_pass_is_empty(DRWPass *pass)
1135 {
1136         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1137                 if (!DRW_shgroup_is_empty(shgroup)) {
1138                         return false;
1139                 }
1140         }
1141         return true;
1142 }
1143
1144 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1145 {
1146         pass->state = state;
1147 }
1148
1149 void DRW_pass_state_add(DRWPass *pass, DRWState state)
1150 {
1151         pass->state |= state;
1152 }
1153
1154 void DRW_pass_state_remove(DRWPass *pass, DRWState state)
1155 {
1156         pass->state &= ~state;
1157 }
1158
1159 void DRW_pass_free(DRWPass *pass)
1160 {
1161         pass->shgroups.first = NULL;
1162         pass->shgroups.last = NULL;
1163 }
1164
1165 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1166 {
1167         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1168                 callback(userData, shgroup);
1169         }
1170 }
1171
1172 typedef struct ZSortData {
1173         float *axis;
1174         float *origin;
1175 } ZSortData;
1176
1177 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1178 {
1179         const ZSortData *zsortdata = (ZSortData *)thunk;
1180         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1181         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1182
1183         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
1184         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
1185
1186         if (call_a == NULL) { return -1; }
1187         if (call_b == NULL) { return -1; }
1188
1189         float tmp[3];
1190         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
1191         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1192         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
1193         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1194
1195         if      (a_sq < b_sq) { return  1; }
1196         else if (a_sq > b_sq) { return -1; }
1197         else {
1198                 /* If there is a depth prepass put it before */
1199                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1200                         return -1;
1201                 }
1202                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1203                         return  1;
1204                 }
1205                 else {
1206                         return  0;
1207                 }
1208         }
1209 }
1210
1211 /* ------------------ Shading group sorting --------------------- */
1212
1213 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1214
1215 #define SORT_IMPL_USE_THUNK
1216 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1217 #include "../../blenlib/intern/list_sort_impl.h"
1218 #undef SORT_IMPL_FUNC
1219 #undef SORT_IMPL_USE_THUNK
1220
1221 #undef SORT_IMPL_LINKTYPE
1222
1223 /**
1224  * Sort Shading groups by decreasing Z of their first draw call.
1225  * This is useful for order dependent effect such as transparency.
1226  */
1227 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1228 {
1229         float (*viewinv)[4];
1230         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
1231
1232         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1233
1234         if (pass->shgroups.first && pass->shgroups.first->next) {
1235                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
1236
1237                 /* Find the next last */
1238                 DRWShadingGroup *last = pass->shgroups.first;
1239                 while ((last = last->next)) {
1240                         /* Do nothing */
1241                 }
1242                 pass->shgroups.last = last;
1243         }
1244 }
1245
1246 /** \} */