Edit Mesh: Add support for draw option parameters
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_pbvh.h"
34
35 #include "DNA_curve_types.h"
36 #include "DNA_mesh_types.h"
37 #include "DNA_meta_types.h"
38
39 #include "BLI_link_utils.h"
40 #include "BLI_mempool.h"
41
42 #include "intern/gpu_codegen.h"
43
44 struct GPUVertFormat *g_pos_format = NULL;
45
46 extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
47
48 /* -------------------------------------------------------------------- */
49
50 /** \name Uniform Buffer Object (DRW_uniformbuffer)
51  * \{ */
52
53 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
54 {
55         return GPU_uniformbuffer_create(size, data, NULL);
56 }
57
58 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
59 {
60         GPU_uniformbuffer_update(ubo, data);
61 }
62
63 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
64 {
65         GPU_uniformbuffer_free(ubo);
66 }
67
68 /** \} */
69
70 /* -------------------------------------------------------------------- */
71
72 /** \name Uniforms (DRW_shgroup_uniform)
73  * \{ */
74
75 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
76                                             DRWUniformType type, const void *value, int length, int arraysize)
77 {
78         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
79         uni->location = loc;
80         uni->type = type;
81         uni->length = length;
82         uni->arraysize = arraysize;
83
84         switch (type) {
85                 case DRW_UNIFORM_INT_COPY:
86                         uni->ivalue = *((int *)value);
87                         break;
88                 case DRW_UNIFORM_BOOL_COPY:
89                         uni->ivalue = (int)*((bool *)value);
90                         break;
91                 case DRW_UNIFORM_FLOAT_COPY:
92                         uni->fvalue = *((float *)value);
93                         break;
94                 default:
95                         uni->pvalue = value;
96                         break;
97         }
98
99         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
100 }
101
102 static void drw_shgroup_builtin_uniform(
103         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
104 {
105         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
106
107         if (loc != -1) {
108                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
109         }
110 }
111
112 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
113                                   DRWUniformType type, const void *value, int length, int arraysize)
114 {
115         int location;
116         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
117                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
118         }
119         else {
120                 location = GPU_shader_get_uniform(shgroup->shader, name);
121         }
122
123         if (location == -1) {
124                 if (G.debug & G_DEBUG)
125                         fprintf(stderr, "Pass : %s, Uniform '%s' not found!\n", shgroup->pass_parent->name, name);
126                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
127                 // BLI_assert(0);
128                 return;
129         }
130
131         BLI_assert(arraysize > 0 && arraysize <= 16);
132         BLI_assert(length >= 0 && length <= 16);
133
134         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
135
136 #ifndef NDEBUG
137         /* Save uniform name to easily identify it when debugging. */
138         BLI_strncpy(shgroup->uniforms->name, name, MAX_UNIFORM_NAME);
139 #endif
140 }
141
142 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
143 {
144         BLI_assert(tex != NULL);
145         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
146 }
147
148 /* Same as DRW_shgroup_uniform_texture but is garanteed to be bound if shader does not change between shgrp. */
149 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
150 {
151         BLI_assert(tex != NULL);
152         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
153 }
154
155 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
156 {
157         BLI_assert(ubo != NULL);
158         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
159 }
160
161 /* Same as DRW_shgroup_uniform_block but is garanteed to be bound if shader does not change between shgrp. */
162 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
163 {
164         BLI_assert(ubo != NULL);
165         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
166 }
167
168 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
169 {
170         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
171 }
172
173 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
174 {
175         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
176 }
177
178 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
179 {
180         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
181 }
182
183 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
184 {
185         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
186 }
187
188 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
189 {
190         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
191 }
192
193 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
194 {
195         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
196 }
197
198 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
199 {
200         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
201 }
202
203 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
204 {
205         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
206 }
207
208 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
209 {
210         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
211 }
212
213 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
214 {
215         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
216 }
217
218 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
219 {
220         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
221 }
222
223 void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
224 {
225         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
226 }
227
228 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
229 {
230         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
231 }
232
233 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
234 {
235         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
236 }
237
238 /* Stores the int instead of a pointer. */
239 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
240 {
241         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
242 }
243
244 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
245 {
246         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
247 }
248
249 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
250 {
251         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
252 }
253
254
255 /** \} */
256
257 /* -------------------------------------------------------------------- */
258
259 /** \name Draw Call (DRW_calls)
260  * \{ */
261
262 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
263 {
264         ID *ob_data = (ob) ? ob->data : NULL;
265         float *texcoloc = NULL;
266         float *texcosize = NULL;
267         if (ob_data != NULL) {
268                 switch (GS(ob_data->name)) {
269                         case ID_ME:
270                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
271                                 break;
272                         case ID_CU:
273                         {
274                                 Curve *cu = (Curve *)ob_data;
275                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
276                                         BKE_curve_texspace_calc(cu);
277                                 }
278                                 texcoloc = cu->loc;
279                                 texcosize = cu->size;
280                                 break;
281                         }
282                         case ID_MB:
283                         {
284                                 MetaBall *mb = (MetaBall *)ob_data;
285                                 texcoloc = mb->loc;
286                                 texcosize = mb->size;
287                                 break;
288                         }
289                         default:
290                                 break;
291                 }
292         }
293
294         if ((texcoloc != NULL) && (texcosize != NULL)) {
295                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
296                 invert_v3(r_orcofacs[1]);
297                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
298                 negate_v3(r_orcofacs[0]);
299                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
300         }
301         else {
302                 copy_v3_fl(r_orcofacs[0], 0.0f);
303                 copy_v3_fl(r_orcofacs[1], 1.0f);
304         }
305 }
306
307 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
308 {
309         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
310         state->flag = 0;
311         state->cache_id = 0;
312         state->visibility_cb = NULL;
313         state->matflag = shgroup->matflag;
314
315         /* Matrices */
316         if (obmat != NULL) {
317                 copy_m4_m4(state->model, obmat);
318
319                 if (is_negative_m4(state->model)) {
320                         state->flag |= DRW_CALL_NEGSCALE;
321                 }
322         }
323         else {
324                 unit_m4(state->model);
325         }
326
327         if (ob != NULL) {
328                 float corner[3];
329                 BoundBox *bbox = BKE_object_boundbox_get(ob);
330                 /* Get BoundSphere center and radius from the BoundBox. */
331                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
332                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
333                 mul_m4_v3(obmat, state->bsphere.center);
334                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
335         }
336         else {
337                 /* Bypass test. */
338                 state->bsphere.radius = -1.0f;
339         }
340
341         /* Orco factors: We compute this at creation to not have to save the *ob_data */
342         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
343                 drw_call_calc_orco(ob, state->orcotexfac);
344                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
345         }
346
347         return state;
348 }
349
350 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
351 {
352         if (DST.ob_state == NULL) {
353                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
354         }
355         else {
356                 /* If the DRWCallState is reused, add necessary matrices. */
357                 DST.ob_state->matflag |= shgroup->matflag;
358         }
359
360         return DST.ob_state;
361 }
362
363 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
364 {
365         BLI_assert(geom != NULL);
366         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
367
368         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
369         call->state = drw_call_state_create(shgroup, obmat, NULL);
370         call->type = DRW_CALL_SINGLE;
371         call->single.geometry = geom;
372 #ifdef USE_GPU_SELECT
373         call->select_id = DST.select_id;
374 #endif
375
376         BLI_LINKS_APPEND(&shgroup->calls, call);
377 }
378
379 void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
380 {
381         BLI_assert(geom != NULL);
382         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
383         BLI_assert(v_count);
384
385         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
386         call->state = drw_call_state_create(shgroup, obmat, NULL);
387         call->type = DRW_CALL_RANGE;
388         call->range.geometry = geom;
389         call->range.start = v_sta;
390         call->range.count = v_count;
391 #ifdef USE_GPU_SELECT
392         call->select_id = DST.select_id;
393 #endif
394
395         BLI_LINKS_APPEND(&shgroup->calls, call);
396 }
397
398 static void drw_shgroup_call_procedural_add_ex(
399         DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
400 {
401         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
402
403         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
404         if (ob) {
405                 call->state = drw_call_state_object(shgroup, ob->obmat, ob);
406         }
407         else {
408                 call->state = drw_call_state_create(shgroup, obmat, NULL);
409         }
410         call->type = DRW_CALL_PROCEDURAL;
411         call->procedural.prim_type = prim_type;
412         call->procedural.vert_count = vert_count;
413 #ifdef USE_GPU_SELECT
414         call->select_id = DST.select_id;
415 #endif
416
417         BLI_LINKS_APPEND(&shgroup->calls, call);
418 }
419
420 void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
421 {
422         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
423 }
424
425 void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
426 {
427         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
428 }
429
430 void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
431 {
432         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
433 }
434
435 /* TODO (fclem): this is a sign that the api is starting to be limiting.
436  * Maybe add special function that general purpose for special cases. */
437 void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
438 {
439         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
440 }
441
442 /* These calls can be culled and are optimized for redraw */
443 void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, bool bypass_culling)
444 {
445         BLI_assert(geom != NULL);
446         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
447
448         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
449         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
450         call->type = DRW_CALL_SINGLE;
451         call->single.geometry = geom;
452 #ifdef USE_GPU_SELECT
453         call->select_id = DST.select_id;
454 #endif
455
456         /* NOTE this will disable culling for the whole object. */
457         call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
458
459         BLI_LINKS_APPEND(&shgroup->calls, call);
460 }
461
462 void DRW_shgroup_call_object_add_with_callback(
463         DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob,
464         DRWCallVisibilityFn *callback, void *user_data)
465 {
466         BLI_assert(geom != NULL);
467         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
468
469         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
470         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
471         call->state->visibility_cb = callback;
472         call->state->user_data = user_data;
473         call->type = DRW_CALL_SINGLE;
474         call->single.geometry = geom;
475 #ifdef USE_GPU_SELECT
476         call->select_id = DST.select_id;
477 #endif
478
479         BLI_LINKS_APPEND(&shgroup->calls, call);
480 }
481
482 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
483 {
484         BLI_assert(geom != NULL);
485         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
486
487         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
488         call->state = drw_call_state_create(shgroup, obmat, NULL);
489         call->type = DRW_CALL_INSTANCES;
490         call->instances.geometry = geom;
491         call->instances.count = count;
492 #ifdef USE_GPU_SELECT
493         call->select_id = DST.select_id;
494 #endif
495
496         BLI_LINKS_APPEND(&shgroup->calls, call);
497 }
498
499 /* These calls can be culled and are optimized for redraw */
500 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
501 {
502         BLI_assert(geom != NULL);
503         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
504
505         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
506         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
507         call->type = DRW_CALL_INSTANCES;
508         call->instances.geometry = geom;
509         call->instances.count = count;
510 #ifdef USE_GPU_SELECT
511         call->select_id = DST.select_id;
512 #endif
513
514         BLI_LINKS_APPEND(&shgroup->calls, call);
515 }
516
517 void DRW_shgroup_call_generate_add(
518         DRWShadingGroup *shgroup,
519         DRWCallGenerateFn *geometry_fn, void *user_data,
520         float (*obmat)[4])
521 {
522         BLI_assert(geometry_fn != NULL);
523         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
524
525         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
526         call->state = drw_call_state_create(shgroup, obmat, NULL);
527         call->type = DRW_CALL_GENERATE;
528         call->generate.geometry_fn = geometry_fn;
529         call->generate.user_data = user_data;
530 #ifdef USE_GPU_SELECT
531         call->select_id = DST.select_id;
532 #endif
533
534         BLI_LINKS_APPEND(&shgroup->calls, call);
535 }
536
537 static void sculpt_draw_cb(
538         DRWShadingGroup *shgroup,
539         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
540         void *user_data)
541 {
542         Object *ob = user_data;
543         PBVH *pbvh = ob->sculpt->pbvh;
544
545         if (pbvh) {
546                 BKE_pbvh_draw_cb(
547                         pbvh, NULL, NULL, false, false,
548                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
549         }
550 }
551
552 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
553 {
554         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
555 }
556
557 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
558 {
559 #ifdef USE_GPU_SELECT
560         if (G.f & G_PICKSEL) {
561                 if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
562                         GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
563                 }
564                 GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
565         }
566 #endif
567
568         BLI_assert(attr_len == shgroup->attribs_count);
569         UNUSED_VARS_NDEBUG(attr_len);
570
571         for (int i = 0; i < attr_len; ++i) {
572                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
573                         GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
574                 }
575                 GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
576         }
577
578         shgroup->instance_count += 1;
579 }
580
581 /** \} */
582
583 /* -------------------------------------------------------------------- */
584
585 /** \name Shading Groups (DRW_shgroup)
586  * \{ */
587
588 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
589 {
590         shgroup->instance_geom = NULL;
591         shgroup->instance_vbo = NULL;
592         shgroup->instance_count = 0;
593         shgroup->uniforms = NULL;
594 #ifdef USE_GPU_SELECT
595         shgroup->inst_selectid = NULL;
596         shgroup->override_selectid = -1;
597 #endif
598 #ifndef NDEBUG
599         shgroup->attribs_count = 0;
600 #endif
601
602         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
603
604         if (view_ubo_location != -1) {
605                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
606         }
607         else {
608                 /* Only here to support builtin shaders. This should not be used by engines. */
609                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
610                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
611                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
612                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
613                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
614                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
615                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
616         }
617
618         shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
619         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
620         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
621         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
622         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
623         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
624         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
625         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
626         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
627         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
628
629         shgroup->matflag = 0;
630         if (shgroup->modelinverse > -1)
631                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
632         if (shgroup->modelview > -1)
633                 shgroup->matflag |= DRW_CALL_MODELVIEW;
634         if (shgroup->modelviewinverse > -1)
635                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
636         if (shgroup->modelviewprojection > -1)
637                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
638         if (shgroup->normalview > -1)
639                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
640         if (shgroup->normalworld > -1)
641                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
642         if (shgroup->orcotexfac > -1)
643                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
644         if (shgroup->eye > -1)
645                 shgroup->matflag |= DRW_CALL_EYEVEC;
646 }
647
648 static void drw_shgroup_instance_init(
649         DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
650 {
651         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
652         BLI_assert(batch != NULL);
653         BLI_assert(format != NULL);
654
655         drw_shgroup_init(shgroup, shader);
656
657         shgroup->instance_geom = batch;
658 #ifndef NDEBUG
659         shgroup->attribs_count = format->attr_len;
660 #endif
661
662         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
663                                       &shgroup->instance_geom, &shgroup->instance_vbo);
664
665 #ifdef USE_GPU_SELECT
666         if (G.f & G_PICKSEL) {
667                 /* Not actually used for rendering but alloced in one chunk.
668                  * Plus we don't have to care about ownership. */
669                 static GPUVertFormat inst_select_format = {0};
670                 if (inst_select_format.attr_len == 0) {
671                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
672                 }
673                 GPUBatch *batch_dummy; /* Not used */
674                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
675                                             GPU_PRIM_POINTS, shgroup,
676                                             &batch_dummy, &shgroup->inst_selectid);
677         }
678 #endif
679 }
680
681 static void drw_shgroup_batching_init(
682         DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
683 {
684         drw_shgroup_init(shgroup, shader);
685
686 #ifndef NDEBUG
687         shgroup->attribs_count = (format != NULL) ? format->attr_len : 0;
688 #endif
689         BLI_assert(format != NULL);
690
691         GPUPrimType type;
692         switch (shgroup->type) {
693                 case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
694                 case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
695                 case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
696                 default: type = GPU_PRIM_NONE; BLI_assert(0); break;
697         }
698
699         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
700                                     &shgroup->batch_geom, &shgroup->batch_vbo);
701
702 #ifdef USE_GPU_SELECT
703         if (G.f & G_PICKSEL) {
704                 /* Not actually used for rendering but alloced in one chunk. */
705                 static GPUVertFormat inst_select_format = {0};
706                 if (inst_select_format.attr_len == 0) {
707                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
708                 }
709                 GPUBatch *batch; /* Not used */
710                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
711                                             GPU_PRIM_POINTS, shgroup,
712                                             &batch, &shgroup->inst_selectid);
713         }
714 #endif
715 }
716
717 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
718 {
719         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
720
721         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
722
723         shgroup->type = DRW_SHG_NORMAL;
724         shgroup->shader = shader;
725         shgroup->state_extra = 0;
726         shgroup->state_extra_disable = ~0x0;
727         shgroup->stencil_mask = 0;
728         shgroup->calls.first = NULL;
729         shgroup->calls.last = NULL;
730 #if 0 /* All the same in the union! */
731         shgroup->batch_geom = NULL;
732         shgroup->batch_vbo = NULL;
733
734         shgroup->instance_geom = NULL;
735         shgroup->instance_vbo = NULL;
736 #endif
737
738 #if !defined(NDEBUG) || defined(USE_GPU_SELECT)
739         shgroup->pass_parent = pass;
740 #endif
741
742         return shgroup;
743 }
744
745 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
746 {
747         if (!gpupass) {
748                 /* Shader compilation error */
749                 return NULL;
750         }
751
752         GPUShader *sh = GPU_pass_shader_get(gpupass);
753
754         if (!sh) {
755                 /* Shader not yet compiled */
756                 return NULL;
757         }
758
759         DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
760         return grp;
761 }
762
763 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
764 {
765         ListBase *inputs = GPU_material_get_inputs(material);
766
767         /* Converting dynamic GPUInput to DRWUniform */
768         for (GPUInput *input = inputs->first; input; input = input->next) {
769                 /* Textures */
770                 if (input->source == GPU_SOURCE_TEX) {
771                         GPUTexture *tex = NULL;
772
773                         if (input->ima) {
774                                 double time = 0.0; /* TODO make time variable */
775                                 tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata, time);
776                         }
777                         else {
778                                 /* Color Ramps */
779                                 tex = *input->coba;
780                         }
781
782                         if (input->bindtex) {
783                                 drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
784                         }
785                 }
786         }
787
788         GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
789         if (ubo != NULL) {
790                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
791         }
792
793         return grp;
794 }
795
796 GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
797 {
798         GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
799
800         for (int i = 0; i < arraysize; ++i) {
801                 GPU_vertformat_attr_add(format, attribs[i].name,
802                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
803                                         attribs[i].components,
804                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
805         }
806         return format;
807 }
808
809 DRWShadingGroup *DRW_shgroup_material_create(
810         struct GPUMaterial *material, DRWPass *pass)
811 {
812         GPUPass *gpupass = GPU_material_get_pass(material);
813         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
814
815         if (shgroup) {
816                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
817                 drw_shgroup_material_inputs(shgroup, material);
818         }
819
820         return shgroup;
821 }
822
823 DRWShadingGroup *DRW_shgroup_material_instance_create(
824         struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
825 {
826         GPUPass *gpupass = GPU_material_get_pass(material);
827         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
828
829         if (shgroup) {
830                 shgroup->type = DRW_SHG_INSTANCE;
831                 shgroup->instance_geom = geom;
832                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
833                 drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
834                 drw_shgroup_material_inputs(shgroup, material);
835         }
836
837         return shgroup;
838 }
839
840 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
841         struct GPUMaterial *material, DRWPass *pass, int tri_count)
842 {
843 #ifdef USE_GPU_SELECT
844         BLI_assert((G.f & G_PICKSEL) == 0);
845 #endif
846         GPUPass *gpupass = GPU_material_get_pass(material);
847         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
848
849         if (shgroup) {
850                 /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
851                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
852                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
853                 shgroup->instance_count = tri_count * 3;
854                 drw_shgroup_material_inputs(shgroup, material);
855         }
856
857         return shgroup;
858 }
859
860 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
861 {
862         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
863         drw_shgroup_init(shgroup, shader);
864         return shgroup;
865 }
866
867 DRWShadingGroup *DRW_shgroup_instance_create(
868         struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
869 {
870         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
871         shgroup->type = DRW_SHG_INSTANCE;
872         shgroup->instance_geom = geom;
873         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
874         drw_shgroup_instance_init(shgroup, shader, geom, format);
875
876         return shgroup;
877 }
878
879 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
880 {
881         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
882
883         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
884         shgroup->type = DRW_SHG_POINT_BATCH;
885
886         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
887
888         return shgroup;
889 }
890
891 DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
892         struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
893 {
894         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
895         shgroup->type = DRW_SHG_LINE_BATCH;
896
897         drw_shgroup_batching_init(shgroup, shader, format);
898
899         return shgroup;
900 }
901
902 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
903 {
904         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
905
906         return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
907 }
908
909 /* Very special batch. Use this if you position
910  * your vertices with the vertex shader
911  * and dont need any VBO attrib */
912 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
913 {
914 #ifdef USE_GPU_SELECT
915         BLI_assert((G.f & G_PICKSEL) == 0);
916 #endif
917         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
918
919         /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
920         drw_shgroup_init(shgroup, shader);
921
922         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
923         shgroup->instance_count = tri_count * 3;
924
925         return shgroup;
926 }
927
928 DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
929 {
930         BLI_assert(tf_target != NULL);
931         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
932         shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
933
934         drw_shgroup_init(shgroup, shader);
935
936         shgroup->tfeedback_target = tf_target;
937
938         return shgroup;
939 }
940
941 /* Specify an external batch instead of adding each attrib one by one. */
942 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
943 {
944         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
945         BLI_assert(shgroup->instance_count == 0);
946         /* You cannot use external instancing batch without a dummy format. */
947         BLI_assert(shgroup->attribs_count != 0);
948
949         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
950         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
951         /* PERF : This destroys the vaos cache so better check if it's necessary. */
952         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
953          * at the same adress. Bindings/VAOs would remain obsolete. */
954         //if (shgroup->instancing_geom->inst != batch->verts[0])
955         GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
956
957 #ifdef USE_GPU_SELECT
958         shgroup->override_selectid = DST.select_id;
959 #endif
960 }
961
962 uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
963 {
964         return shgroup->instance_count;
965 }
966
967 /**
968  * State is added to #Pass.state while drawing.
969  * Use to temporarily enable draw options.
970  */
971 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
972 {
973         shgroup->state_extra |= state;
974 }
975
976 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
977 {
978         shgroup->state_extra_disable &= ~state;
979 }
980
981 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
982 {
983         BLI_assert(mask <= 255);
984         shgroup->stencil_mask = mask;
985 }
986
987 bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
988 {
989         switch (shgroup->type) {
990                 case DRW_SHG_NORMAL:
991                 case DRW_SHG_FEEDBACK_TRANSFORM:
992                         return shgroup->calls.first == NULL;
993                 case DRW_SHG_POINT_BATCH:
994                 case DRW_SHG_LINE_BATCH:
995                 case DRW_SHG_TRIANGLE_BATCH:
996                 case DRW_SHG_INSTANCE:
997                 case DRW_SHG_INSTANCE_EXTERNAL:
998                         return shgroup->instance_count == 0;
999         }
1000         BLI_assert(!"Shading Group type not supported");
1001         return true;
1002 }
1003
1004 /** \} */
1005
1006 /* -------------------------------------------------------------------- */
1007
1008 /** \name Passes (DRW_pass)
1009  * \{ */
1010
1011 DRWPass *DRW_pass_create(const char *name, DRWState state)
1012 {
1013         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1014         pass->state = state;
1015         if ((G.debug_value > 20) || (G.debug & G_DEBUG)) {
1016                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1017         }
1018
1019         pass->shgroups.first = NULL;
1020         pass->shgroups.last = NULL;
1021
1022         return pass;
1023 }
1024
1025 bool DRW_pass_is_empty(DRWPass *pass)
1026 {
1027         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1028                 if (!DRW_shgroup_is_empty(shgroup)) {
1029                         return false;
1030                 }
1031         }
1032         return true;
1033 }
1034
1035 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1036 {
1037         pass->state = state;
1038 }
1039
1040 void DRW_pass_state_add(DRWPass *pass, DRWState state)
1041 {
1042         pass->state |= state;
1043 }
1044
1045 void DRW_pass_state_remove(DRWPass *pass, DRWState state)
1046 {
1047         pass->state &= ~state;
1048 }
1049
1050 void DRW_pass_free(DRWPass *pass)
1051 {
1052         pass->shgroups.first = NULL;
1053         pass->shgroups.last = NULL;
1054 }
1055
1056 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1057 {
1058         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1059                 callback(userData, shgroup);
1060         }
1061 }
1062
1063 typedef struct ZSortData {
1064         float *axis;
1065         float *origin;
1066 } ZSortData;
1067
1068 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1069 {
1070         const ZSortData *zsortdata = (ZSortData *)thunk;
1071         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1072         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1073
1074         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
1075         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
1076
1077         if (call_a == NULL) return -1;
1078         if (call_b == NULL) return -1;
1079
1080         float tmp[3];
1081         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
1082         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1083         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
1084         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1085
1086         if      (a_sq < b_sq) return  1;
1087         else if (a_sq > b_sq) return -1;
1088         else {
1089                 /* If there is a depth prepass put it before */
1090                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1091                         return -1;
1092                 }
1093                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1094                         return  1;
1095                 }
1096                 else return  0;
1097         }
1098 }
1099
1100 /* ------------------ Shading group sorting --------------------- */
1101
1102 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1103
1104 #define SORT_IMPL_USE_THUNK
1105 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1106 #include "../../blenlib/intern/list_sort_impl.h"
1107 #undef SORT_IMPL_FUNC
1108 #undef SORT_IMPL_USE_THUNK
1109
1110 #undef SORT_IMPL_LINKTYPE
1111
1112 /**
1113  * Sort Shading groups by decreasing Z of their first draw call.
1114  * This is usefull for order dependant effect such as transparency.
1115  **/
1116 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1117 {
1118         float (*viewinv)[4];
1119         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
1120
1121         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1122
1123         if (pass->shgroups.first && pass->shgroups.first->next) {
1124                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
1125
1126                 /* Find the next last */
1127                 DRWShadingGroup *last = pass->shgroups.first;
1128                 while ((last = last->next)) {
1129                         /* Do nothing */
1130                 }
1131                 pass->shgroups.last = last;
1132         }
1133 }
1134
1135 /** \} */