96e862d243924bbe042a82ec9812de57a5c11318
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_pbvh.h"
34
35 #include "DNA_curve_types.h"
36 #include "DNA_mesh_types.h"
37 #include "DNA_meta_types.h"
38
39 #include "BLI_link_utils.h"
40 #include "BLI_mempool.h"
41
42 #include "intern/gpu_codegen.h"
43
44 struct GPUVertFormat *g_pos_format = NULL;
45
46 extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
47
48 /* -------------------------------------------------------------------- */
49
50 /** \name Uniform Buffer Object (DRW_uniformbuffer)
51  * \{ */
52
53 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
54 {
55         return GPU_uniformbuffer_create(size, data, NULL);
56 }
57
58 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
59 {
60         GPU_uniformbuffer_update(ubo, data);
61 }
62
63 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
64 {
65         GPU_uniformbuffer_free(ubo);
66 }
67
68 /** \} */
69
70 /* -------------------------------------------------------------------- */
71
72 /** \name Uniforms (DRW_shgroup_uniform)
73  * \{ */
74
75 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
76                                             DRWUniformType type, const void *value, int length, int arraysize)
77 {
78         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
79         uni->location = loc;
80         uni->type = type;
81         uni->length = length;
82         uni->arraysize = arraysize;
83
84         switch (type) {
85                 case DRW_UNIFORM_INT_COPY:
86                         uni->ivalue = *((int *)value);
87                         break;
88                 case DRW_UNIFORM_BOOL_COPY:
89                         uni->ivalue = (int)*((bool *)value);
90                         break;
91                 case DRW_UNIFORM_FLOAT_COPY:
92                         uni->fvalue = *((float *)value);
93                         break;
94                 default:
95                         uni->pvalue = value;
96                         break;
97         }
98
99         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
100 }
101
102 static void drw_shgroup_builtin_uniform(
103         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
104 {
105         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
106
107         if (loc != -1) {
108                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
109         }
110 }
111
112 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
113                                   DRWUniformType type, const void *value, int length, int arraysize)
114 {
115         int location;
116         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
117                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
118         }
119         else {
120                 location = GPU_shader_get_uniform(shgroup->shader, name);
121         }
122
123         if (location == -1) {
124                 if (G.debug & G_DEBUG)
125                         fprintf(stderr, "Pass : %s, Uniform '%s' not found!\n", shgroup->pass_parent->name, name);
126                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
127                 // BLI_assert(0);
128                 return;
129         }
130
131         BLI_assert(arraysize > 0 && arraysize <= 16);
132         BLI_assert(length >= 0 && length <= 16);
133
134         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
135
136 #ifndef NDEBUG
137         /* Save uniform name to easily identify it when debugging. */
138         BLI_strncpy(shgroup->uniforms->name, name, MAX_UNIFORM_NAME);
139 #endif
140 }
141
142 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
143 {
144         BLI_assert(tex != NULL);
145         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
146 }
147
148 /* Same as DRW_shgroup_uniform_texture but is garanteed to be bound if shader does not change between shgrp. */
149 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
150 {
151         BLI_assert(tex != NULL);
152         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
153 }
154
155 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
156 {
157         BLI_assert(ubo != NULL);
158         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
159 }
160
161 /* Same as DRW_shgroup_uniform_block but is garanteed to be bound if shader does not change between shgrp. */
162 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
163 {
164         BLI_assert(ubo != NULL);
165         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
166 }
167
168 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
169 {
170         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
171 }
172
173 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
174 {
175         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
176 }
177
178 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
179 {
180         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
181 }
182
183 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
184 {
185         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
186 }
187
188 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
189 {
190         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
191 }
192
193 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
194 {
195         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
196 }
197
198 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
199 {
200         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
201 }
202
203 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
204 {
205         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
206 }
207
208 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
209 {
210         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
211 }
212
213 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
214 {
215         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
216 }
217
218 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
219 {
220         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
221 }
222
223 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
224 {
225         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
226 }
227
228 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
229 {
230         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
231 }
232
233 /* Stores the int instead of a pointer. */
234 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
235 {
236         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
237 }
238
239 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
240 {
241         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
242 }
243
244 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
245 {
246         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
247 }
248
249
250 /** \} */
251
252 /* -------------------------------------------------------------------- */
253
254 /** \name Draw Call (DRW_calls)
255  * \{ */
256
257 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
258 {
259         ID *ob_data = (ob) ? ob->data : NULL;
260         float *texcoloc = NULL;
261         float *texcosize = NULL;
262         if (ob_data != NULL) {
263                 switch (GS(ob_data->name)) {
264                         case ID_ME:
265                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
266                                 break;
267                         case ID_CU:
268                         {
269                                 Curve *cu = (Curve *)ob_data;
270                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
271                                         BKE_curve_texspace_calc(cu);
272                                 }
273                                 texcoloc = cu->loc;
274                                 texcosize = cu->size;
275                                 break;
276                         }
277                         case ID_MB:
278                         {
279                                 MetaBall *mb = (MetaBall *)ob_data;
280                                 texcoloc = mb->loc;
281                                 texcosize = mb->size;
282                                 break;
283                         }
284                         default:
285                                 break;
286                 }
287         }
288
289         if ((texcoloc != NULL) && (texcosize != NULL)) {
290                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
291                 invert_v3(r_orcofacs[1]);
292                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
293                 negate_v3(r_orcofacs[0]);
294                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
295         }
296         else {
297                 copy_v3_fl(r_orcofacs[0], 0.0f);
298                 copy_v3_fl(r_orcofacs[1], 1.0f);
299         }
300 }
301
302 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
303 {
304         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
305         state->flag = 0;
306         state->cache_id = 0;
307         state->visibility_cb = NULL;
308         state->matflag = shgroup->matflag;
309
310         /* Matrices */
311         if (obmat != NULL) {
312                 copy_m4_m4(state->model, obmat);
313
314                 if (is_negative_m4(state->model)) {
315                         state->flag |= DRW_CALL_NEGSCALE;
316                 }
317         }
318         else {
319                 unit_m4(state->model);
320         }
321
322         if (ob != NULL) {
323                 float corner[3];
324                 BoundBox *bbox = BKE_object_boundbox_get(ob);
325                 /* Get BoundSphere center and radius from the BoundBox. */
326                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
327                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
328                 mul_m4_v3(obmat, state->bsphere.center);
329                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
330         }
331         else {
332                 /* Bypass test. */
333                 state->bsphere.radius = -1.0f;
334         }
335
336         /* Orco factors: We compute this at creation to not have to save the *ob_data */
337         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
338                 drw_call_calc_orco(ob, state->orcotexfac);
339                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
340         }
341
342         return state;
343 }
344
345 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
346 {
347         if (DST.ob_state == NULL) {
348                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
349         }
350         else {
351                 /* If the DRWCallState is reused, add necessary matrices. */
352                 DST.ob_state->matflag |= shgroup->matflag;
353         }
354
355         return DST.ob_state;
356 }
357
358 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
359 {
360         BLI_assert(geom != NULL);
361         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
362
363         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
364         call->state = drw_call_state_create(shgroup, obmat, NULL);
365         call->type = DRW_CALL_SINGLE;
366         call->single.geometry = geom;
367 #ifdef USE_GPU_SELECT
368         call->select_id = DST.select_id;
369 #endif
370
371         BLI_LINKS_APPEND(&shgroup->calls, call);
372 }
373
374 void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
375 {
376         BLI_assert(geom != NULL);
377         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
378         BLI_assert(v_count);
379
380         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
381         call->state = drw_call_state_create(shgroup, obmat, NULL);
382         call->type = DRW_CALL_RANGE;
383         call->range.geometry = geom;
384         call->range.start = v_sta;
385         call->range.count = v_count;
386 #ifdef USE_GPU_SELECT
387         call->select_id = DST.select_id;
388 #endif
389
390         BLI_LINKS_APPEND(&shgroup->calls, call);
391 }
392
393 static void drw_shgroup_call_procedural_add_ex(
394         DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
395 {
396         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
397
398         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
399         if (ob) {
400                 call->state = drw_call_state_object(shgroup, ob->obmat, ob);
401         }
402         else {
403                 call->state = drw_call_state_create(shgroup, obmat, NULL);
404         }
405         call->type = DRW_CALL_PROCEDURAL;
406         call->procedural.prim_type = prim_type;
407         call->procedural.vert_count = vert_count;
408 #ifdef USE_GPU_SELECT
409         call->select_id = DST.select_id;
410 #endif
411
412         BLI_LINKS_APPEND(&shgroup->calls, call);
413 }
414
415 void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
416 {
417         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
418 }
419
420 void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
421 {
422         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
423 }
424
425 void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
426 {
427         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
428 }
429
430 /* TODO (fclem): this is a sign that the api is starting to be limiting.
431  * Maybe add special function that general purpose for special cases. */
432 void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
433 {
434         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
435 }
436
437 /* These calls can be culled and are optimized for redraw */
438 void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, bool bypass_culling)
439 {
440         BLI_assert(geom != NULL);
441         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
442
443         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
444         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
445         call->type = DRW_CALL_SINGLE;
446         call->single.geometry = geom;
447 #ifdef USE_GPU_SELECT
448         call->select_id = DST.select_id;
449 #endif
450
451         /* NOTE this will disable culling for the whole object. */
452         call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
453
454         BLI_LINKS_APPEND(&shgroup->calls, call);
455 }
456
457 void DRW_shgroup_call_object_add_with_callback(
458         DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob,
459         DRWCallVisibilityFn *callback, void *user_data)
460 {
461         BLI_assert(geom != NULL);
462         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
463
464         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
465         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
466         call->state->visibility_cb = callback;
467         call->state->user_data = user_data;
468         call->type = DRW_CALL_SINGLE;
469         call->single.geometry = geom;
470 #ifdef USE_GPU_SELECT
471         call->select_id = DST.select_id;
472 #endif
473
474         BLI_LINKS_APPEND(&shgroup->calls, call);
475 }
476
477 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
478 {
479         BLI_assert(geom != NULL);
480         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
481
482         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
483         call->state = drw_call_state_create(shgroup, obmat, NULL);
484         call->type = DRW_CALL_INSTANCES;
485         call->instances.geometry = geom;
486         call->instances.count = count;
487 #ifdef USE_GPU_SELECT
488         call->select_id = DST.select_id;
489 #endif
490
491         BLI_LINKS_APPEND(&shgroup->calls, call);
492 }
493
494 /* These calls can be culled and are optimized for redraw */
495 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
496 {
497         BLI_assert(geom != NULL);
498         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
499
500         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
501         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
502         call->type = DRW_CALL_INSTANCES;
503         call->instances.geometry = geom;
504         call->instances.count = count;
505 #ifdef USE_GPU_SELECT
506         call->select_id = DST.select_id;
507 #endif
508
509         BLI_LINKS_APPEND(&shgroup->calls, call);
510 }
511
512 void DRW_shgroup_call_generate_add(
513         DRWShadingGroup *shgroup,
514         DRWCallGenerateFn *geometry_fn, void *user_data,
515         float (*obmat)[4])
516 {
517         BLI_assert(geometry_fn != NULL);
518         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
519
520         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
521         call->state = drw_call_state_create(shgroup, obmat, NULL);
522         call->type = DRW_CALL_GENERATE;
523         call->generate.geometry_fn = geometry_fn;
524         call->generate.user_data = user_data;
525 #ifdef USE_GPU_SELECT
526         call->select_id = DST.select_id;
527 #endif
528
529         BLI_LINKS_APPEND(&shgroup->calls, call);
530 }
531
532 static void sculpt_draw_cb(
533         DRWShadingGroup *shgroup,
534         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
535         void *user_data)
536 {
537         Object *ob = user_data;
538         PBVH *pbvh = ob->sculpt->pbvh;
539
540         if (pbvh) {
541                 BKE_pbvh_draw_cb(
542                         pbvh, NULL, NULL, false, false,
543                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
544         }
545 }
546
547 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
548 {
549         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
550 }
551
552 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
553 {
554 #ifdef USE_GPU_SELECT
555         if (G.f & G_PICKSEL) {
556                 if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
557                         GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
558                 }
559                 GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
560         }
561 #endif
562
563         BLI_assert(attr_len == shgroup->attribs_count);
564         UNUSED_VARS_NDEBUG(attr_len);
565
566         for (int i = 0; i < attr_len; ++i) {
567                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
568                         GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
569                 }
570                 GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
571         }
572
573         shgroup->instance_count += 1;
574 }
575
576 /** \} */
577
578 /* -------------------------------------------------------------------- */
579
580 /** \name Shading Groups (DRW_shgroup)
581  * \{ */
582
583 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
584 {
585         shgroup->instance_geom = NULL;
586         shgroup->instance_vbo = NULL;
587         shgroup->instance_count = 0;
588         shgroup->uniforms = NULL;
589 #ifdef USE_GPU_SELECT
590         shgroup->inst_selectid = NULL;
591         shgroup->override_selectid = -1;
592 #endif
593 #ifndef NDEBUG
594         shgroup->attribs_count = 0;
595 #endif
596
597         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
598
599         if (view_ubo_location != -1) {
600                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
601         }
602         else {
603                 /* Only here to support builtin shaders. This should not be used by engines. */
604                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
605                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
606                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
607                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
608                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
609                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
610                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
611         }
612
613         shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
614         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
615         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
616         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
617         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
618         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
619         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
620         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
621         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
622         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
623
624         shgroup->matflag = 0;
625         if (shgroup->modelinverse > -1)
626                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
627         if (shgroup->modelview > -1)
628                 shgroup->matflag |= DRW_CALL_MODELVIEW;
629         if (shgroup->modelviewinverse > -1)
630                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
631         if (shgroup->modelviewprojection > -1)
632                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
633         if (shgroup->normalview > -1)
634                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
635         if (shgroup->normalworld > -1)
636                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
637         if (shgroup->orcotexfac > -1)
638                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
639         if (shgroup->eye > -1)
640                 shgroup->matflag |= DRW_CALL_EYEVEC;
641 }
642
643 static void drw_shgroup_instance_init(
644         DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
645 {
646         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
647         BLI_assert(batch != NULL);
648         BLI_assert(format != NULL);
649
650         drw_shgroup_init(shgroup, shader);
651
652         shgroup->instance_geom = batch;
653 #ifndef NDEBUG
654         shgroup->attribs_count = format->attr_len;
655 #endif
656
657         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
658                                       &shgroup->instance_geom, &shgroup->instance_vbo);
659
660 #ifdef USE_GPU_SELECT
661         if (G.f & G_PICKSEL) {
662                 /* Not actually used for rendering but alloced in one chunk.
663                  * Plus we don't have to care about ownership. */
664                 static GPUVertFormat inst_select_format = {0};
665                 if (inst_select_format.attr_len == 0) {
666                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
667                 }
668                 GPUBatch *batch_dummy; /* Not used */
669                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
670                                             GPU_PRIM_POINTS, shgroup,
671                                             &batch_dummy, &shgroup->inst_selectid);
672         }
673 #endif
674 }
675
676 static void drw_shgroup_batching_init(
677         DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
678 {
679         drw_shgroup_init(shgroup, shader);
680
681 #ifndef NDEBUG
682         shgroup->attribs_count = (format != NULL) ? format->attr_len : 0;
683 #endif
684         BLI_assert(format != NULL);
685
686         GPUPrimType type;
687         switch (shgroup->type) {
688                 case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
689                 case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
690                 case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
691                 default: type = GPU_PRIM_NONE; BLI_assert(0); break;
692         }
693
694         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
695                                     &shgroup->batch_geom, &shgroup->batch_vbo);
696
697 #ifdef USE_GPU_SELECT
698         if (G.f & G_PICKSEL) {
699                 /* Not actually used for rendering but alloced in one chunk. */
700                 static GPUVertFormat inst_select_format = {0};
701                 if (inst_select_format.attr_len == 0) {
702                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
703                 }
704                 GPUBatch *batch; /* Not used */
705                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
706                                             GPU_PRIM_POINTS, shgroup,
707                                             &batch, &shgroup->inst_selectid);
708         }
709 #endif
710 }
711
712 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
713 {
714         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
715
716         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
717
718         shgroup->type = DRW_SHG_NORMAL;
719         shgroup->shader = shader;
720         shgroup->state_extra = 0;
721         shgroup->state_extra_disable = ~0x0;
722         shgroup->stencil_mask = 0;
723         shgroup->calls.first = NULL;
724         shgroup->calls.last = NULL;
725 #if 0 /* All the same in the union! */
726         shgroup->batch_geom = NULL;
727         shgroup->batch_vbo = NULL;
728
729         shgroup->instance_geom = NULL;
730         shgroup->instance_vbo = NULL;
731 #endif
732
733 #if !defined(NDEBUG) || defined(USE_GPU_SELECT)
734         shgroup->pass_parent = pass;
735 #endif
736
737         return shgroup;
738 }
739
740 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
741 {
742         if (!gpupass) {
743                 /* Shader compilation error */
744                 return NULL;
745         }
746
747         GPUShader *sh = GPU_pass_shader_get(gpupass);
748
749         if (!sh) {
750                 /* Shader not yet compiled */
751                 return NULL;
752         }
753
754         DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
755         return grp;
756 }
757
758 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
759 {
760         ListBase *inputs = GPU_material_get_inputs(material);
761
762         /* Converting dynamic GPUInput to DRWUniform */
763         for (GPUInput *input = inputs->first; input; input = input->next) {
764                 /* Textures */
765                 if (input->source == GPU_SOURCE_TEX) {
766                         GPUTexture *tex = NULL;
767
768                         if (input->ima) {
769                                 double time = 0.0; /* TODO make time variable */
770                                 tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata, time);
771                         }
772                         else {
773                                 /* Color Ramps */
774                                 tex = *input->coba;
775                         }
776
777                         if (input->bindtex) {
778                                 drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
779                         }
780                 }
781         }
782
783         GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
784         if (ubo != NULL) {
785                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
786         }
787
788         return grp;
789 }
790
791 GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
792 {
793         GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
794
795         for (int i = 0; i < arraysize; ++i) {
796                 GPU_vertformat_attr_add(format, attribs[i].name,
797                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
798                                         attribs[i].components,
799                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
800         }
801         return format;
802 }
803
804 DRWShadingGroup *DRW_shgroup_material_create(
805         struct GPUMaterial *material, DRWPass *pass)
806 {
807         GPUPass *gpupass = GPU_material_get_pass(material);
808         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
809
810         if (shgroup) {
811                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
812                 drw_shgroup_material_inputs(shgroup, material);
813         }
814
815         return shgroup;
816 }
817
818 DRWShadingGroup *DRW_shgroup_material_instance_create(
819         struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
820 {
821         GPUPass *gpupass = GPU_material_get_pass(material);
822         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
823
824         if (shgroup) {
825                 shgroup->type = DRW_SHG_INSTANCE;
826                 shgroup->instance_geom = geom;
827                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
828                 drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
829                 drw_shgroup_material_inputs(shgroup, material);
830         }
831
832         return shgroup;
833 }
834
835 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
836         struct GPUMaterial *material, DRWPass *pass, int tri_count)
837 {
838 #ifdef USE_GPU_SELECT
839         BLI_assert((G.f & G_PICKSEL) == 0);
840 #endif
841         GPUPass *gpupass = GPU_material_get_pass(material);
842         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
843
844         if (shgroup) {
845                 /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
846                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
847                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
848                 shgroup->instance_count = tri_count * 3;
849                 drw_shgroup_material_inputs(shgroup, material);
850         }
851
852         return shgroup;
853 }
854
855 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
856 {
857         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
858         drw_shgroup_init(shgroup, shader);
859         return shgroup;
860 }
861
862 DRWShadingGroup *DRW_shgroup_instance_create(
863         struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
864 {
865         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
866         shgroup->type = DRW_SHG_INSTANCE;
867         shgroup->instance_geom = geom;
868         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
869         drw_shgroup_instance_init(shgroup, shader, geom, format);
870
871         return shgroup;
872 }
873
874 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
875 {
876         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
877
878         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
879         shgroup->type = DRW_SHG_POINT_BATCH;
880
881         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
882
883         return shgroup;
884 }
885
886 DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
887         struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
888 {
889         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
890         shgroup->type = DRW_SHG_LINE_BATCH;
891
892         drw_shgroup_batching_init(shgroup, shader, format);
893
894         return shgroup;
895 }
896
897 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
898 {
899         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
900
901         return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
902 }
903
904 /* Very special batch. Use this if you position
905  * your vertices with the vertex shader
906  * and dont need any VBO attrib */
907 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
908 {
909 #ifdef USE_GPU_SELECT
910         BLI_assert((G.f & G_PICKSEL) == 0);
911 #endif
912         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
913
914         /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
915         drw_shgroup_init(shgroup, shader);
916
917         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
918         shgroup->instance_count = tri_count * 3;
919
920         return shgroup;
921 }
922
923 DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
924 {
925         BLI_assert(tf_target != NULL);
926         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
927         shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
928
929         drw_shgroup_init(shgroup, shader);
930
931         shgroup->tfeedback_target = tf_target;
932
933         return shgroup;
934 }
935
936 /* Specify an external batch instead of adding each attrib one by one. */
937 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
938 {
939         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
940         BLI_assert(shgroup->instance_count == 0);
941         /* You cannot use external instancing batch without a dummy format. */
942         BLI_assert(shgroup->attribs_count != 0);
943
944         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
945         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
946         /* PERF : This destroys the vaos cache so better check if it's necessary. */
947         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
948          * at the same adress. Bindings/VAOs would remain obsolete. */
949         //if (shgroup->instancing_geom->inst != batch->verts[0])
950         GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
951
952 #ifdef USE_GPU_SELECT
953         shgroup->override_selectid = DST.select_id;
954 #endif
955 }
956
957 uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
958 {
959         return shgroup->instance_count;
960 }
961
962 /**
963  * State is added to #Pass.state while drawing.
964  * Use to temporarily enable draw options.
965  */
966 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
967 {
968         shgroup->state_extra |= state;
969 }
970
971 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
972 {
973         shgroup->state_extra_disable &= ~state;
974 }
975
976 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
977 {
978         BLI_assert(mask <= 255);
979         shgroup->stencil_mask = mask;
980 }
981
982 bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
983 {
984         switch (shgroup->type) {
985                 case DRW_SHG_NORMAL:
986                 case DRW_SHG_FEEDBACK_TRANSFORM:
987                         return shgroup->calls.first == NULL;
988                 case DRW_SHG_POINT_BATCH:
989                 case DRW_SHG_LINE_BATCH:
990                 case DRW_SHG_TRIANGLE_BATCH:
991                 case DRW_SHG_INSTANCE:
992                 case DRW_SHG_INSTANCE_EXTERNAL:
993                         return shgroup->instance_count == 0;
994         }
995         BLI_assert(!"Shading Group type not supported");
996         return true;
997 }
998
999 /** \} */
1000
1001 /* -------------------------------------------------------------------- */
1002
1003 /** \name Passes (DRW_pass)
1004  * \{ */
1005
1006 DRWPass *DRW_pass_create(const char *name, DRWState state)
1007 {
1008         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1009         pass->state = state;
1010         if ((G.debug_value > 20) || (G.debug & G_DEBUG)) {
1011                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1012         }
1013
1014         pass->shgroups.first = NULL;
1015         pass->shgroups.last = NULL;
1016
1017         return pass;
1018 }
1019
1020 bool DRW_pass_is_empty(DRWPass *pass)
1021 {
1022         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1023                 if (!DRW_shgroup_is_empty(shgroup)) {
1024                         return false;
1025                 }
1026         }
1027         return true;
1028 }
1029
1030 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1031 {
1032         pass->state = state;
1033 }
1034
1035 void DRW_pass_state_add(DRWPass *pass, DRWState state)
1036 {
1037         pass->state |= state;
1038 }
1039
1040 void DRW_pass_state_remove(DRWPass *pass, DRWState state)
1041 {
1042         pass->state &= ~state;
1043 }
1044
1045 void DRW_pass_free(DRWPass *pass)
1046 {
1047         pass->shgroups.first = NULL;
1048         pass->shgroups.last = NULL;
1049 }
1050
1051 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1052 {
1053         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1054                 callback(userData, shgroup);
1055         }
1056 }
1057
1058 typedef struct ZSortData {
1059         float *axis;
1060         float *origin;
1061 } ZSortData;
1062
1063 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1064 {
1065         const ZSortData *zsortdata = (ZSortData *)thunk;
1066         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1067         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1068
1069         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
1070         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
1071
1072         if (call_a == NULL) return -1;
1073         if (call_b == NULL) return -1;
1074
1075         float tmp[3];
1076         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
1077         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1078         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
1079         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1080
1081         if      (a_sq < b_sq) return  1;
1082         else if (a_sq > b_sq) return -1;
1083         else {
1084                 /* If there is a depth prepass put it before */
1085                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1086                         return -1;
1087                 }
1088                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1089                         return  1;
1090                 }
1091                 else return  0;
1092         }
1093 }
1094
1095 /* ------------------ Shading group sorting --------------------- */
1096
1097 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1098
1099 #define SORT_IMPL_USE_THUNK
1100 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1101 #include "../../blenlib/intern/list_sort_impl.h"
1102 #undef SORT_IMPL_FUNC
1103 #undef SORT_IMPL_USE_THUNK
1104
1105 #undef SORT_IMPL_LINKTYPE
1106
1107 /**
1108  * Sort Shading groups by decreasing Z of their first draw call.
1109  * This is usefull for order dependant effect such as transparency.
1110  **/
1111 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1112 {
1113         float (*viewinv)[4];
1114         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
1115
1116         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1117
1118         if (pass->shgroups.first && pass->shgroups.first->next) {
1119                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
1120
1121                 /* Find the next last */
1122                 DRWShadingGroup *last = pass->shgroups.first;
1123                 while ((last = last->next)) {
1124                         /* Do nothing */
1125                 }
1126                 pass->shgroups.last = last;
1127         }
1128 }
1129
1130 /** \} */