DRW: Make missing uniform debuging print only once
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_pbvh.h"
34
35 #include "DNA_curve_types.h"
36 #include "DNA_mesh_types.h"
37 #include "DNA_meta_types.h"
38
39 #include "BLI_hash.h"
40 #include "BLI_link_utils.h"
41 #include "BLI_mempool.h"
42
43 #include "intern/gpu_codegen.h"
44
45 struct GPUVertFormat *g_pos_format = NULL;
46
47 extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
48
49 /* -------------------------------------------------------------------- */
50
51 /** \name Uniform Buffer Object (DRW_uniformbuffer)
52  * \{ */
53
54 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
55 {
56         return GPU_uniformbuffer_create(size, data, NULL);
57 }
58
59 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
60 {
61         GPU_uniformbuffer_update(ubo, data);
62 }
63
64 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
65 {
66         GPU_uniformbuffer_free(ubo);
67 }
68
69 /** \} */
70
71 /* -------------------------------------------------------------------- */
72
73 /** \name Uniforms (DRW_shgroup_uniform)
74  * \{ */
75
76 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
77                                             DRWUniformType type, const void *value, int length, int arraysize)
78 {
79         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
80         uni->location = loc;
81         uni->type = type;
82         uni->length = length;
83         uni->arraysize = arraysize;
84
85         switch (type) {
86                 case DRW_UNIFORM_INT_COPY:
87                         uni->ivalue = *((int *)value);
88                         break;
89                 case DRW_UNIFORM_BOOL_COPY:
90                         uni->ivalue = (int)*((bool *)value);
91                         break;
92                 case DRW_UNIFORM_FLOAT_COPY:
93                         uni->fvalue = *((float *)value);
94                         break;
95                 default:
96                         uni->pvalue = value;
97                         break;
98         }
99
100         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
101 }
102
103 static void drw_shgroup_builtin_uniform(
104         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
105 {
106         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
107
108         if (loc != -1) {
109                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
110         }
111 }
112
113 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
114                                   DRWUniformType type, const void *value, int length, int arraysize)
115 {
116         int location;
117         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
118                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
119         }
120         else {
121                 location = GPU_shader_get_uniform(shgroup->shader, name);
122         }
123
124         if (location == -1) {
125                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
126                 // BLI_assert(0);
127                 return;
128         }
129
130         BLI_assert(arraysize > 0 && arraysize <= 16);
131         BLI_assert(length >= 0 && length <= 16);
132
133         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
134
135         /* If location is -2, the uniform has not yet been queried.
136          * We save the name for query just before drawing. */
137         if (location == -2 || DRW_DEBUG_USE_UNIFORM_NAME) {
138                 int ofs = DST.uniform_names.buffer_ofs;
139                 int max_len = DST.uniform_names.buffer_len - ofs;
140                 size_t len = strlen(name) + 1;
141
142                 if (len >= max_len) {
143                         DST.uniform_names.buffer_len += DRW_UNIFORM_BUFFER_NAME_INC;
144                         DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer, DST.uniform_names.buffer_len);
145                 }
146
147                 char *dst = DST.uniform_names.buffer + ofs;
148                 memcpy(dst, name, len); /* Copies NULL terminator. */
149
150                 DST.uniform_names.buffer_ofs += len;
151                 shgroup->uniforms->name_ofs = ofs;
152         }
153 }
154
155 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
156 {
157         BLI_assert(tex != NULL);
158         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
159 }
160
161 /* Same as DRW_shgroup_uniform_texture but is guaranteed to be bound if shader does not change between shgrp. */
162 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
163 {
164         BLI_assert(tex != NULL);
165         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
166 }
167
168 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
169 {
170         BLI_assert(ubo != NULL);
171         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
172 }
173
174 /* Same as DRW_shgroup_uniform_block but is guaranteed to be bound if shader does not change between shgrp. */
175 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
176 {
177         BLI_assert(ubo != NULL);
178         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
179 }
180
181 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
182 {
183         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
184 }
185
186 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
187 {
188         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
189 }
190
191 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
192 {
193         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
194 }
195
196 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
197 {
198         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
199 }
200
201 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
202 {
203         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
204 }
205
206 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
207 {
208         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
209 }
210
211 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
212 {
213         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
214 }
215
216 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
217 {
218         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
219 }
220
221 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
222 {
223         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
224 }
225
226 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
227 {
228         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
229 }
230
231 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
232 {
233         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
234 }
235
236 void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
237 {
238         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
239 }
240
241 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
242 {
243         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
244 }
245
246 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
247 {
248         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
249 }
250
251 /* Stores the int instead of a pointer. */
252 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
253 {
254         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
255 }
256
257 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
258 {
259         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
260 }
261
262 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
263 {
264         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
265 }
266
267
268 /** \} */
269
270 /* -------------------------------------------------------------------- */
271
272 /** \name Draw Call (DRW_calls)
273  * \{ */
274
275 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
276 {
277         ID *ob_data = (ob) ? ob->data : NULL;
278         float *texcoloc = NULL;
279         float *texcosize = NULL;
280         if (ob_data != NULL) {
281                 switch (GS(ob_data->name)) {
282                         case ID_ME:
283                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
284                                 break;
285                         case ID_CU:
286                         {
287                                 Curve *cu = (Curve *)ob_data;
288                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
289                                         BKE_curve_texspace_calc(cu);
290                                 }
291                                 texcoloc = cu->loc;
292                                 texcosize = cu->size;
293                                 break;
294                         }
295                         case ID_MB:
296                         {
297                                 MetaBall *mb = (MetaBall *)ob_data;
298                                 texcoloc = mb->loc;
299                                 texcosize = mb->size;
300                                 break;
301                         }
302                         default:
303                                 break;
304                 }
305         }
306
307         if ((texcoloc != NULL) && (texcosize != NULL)) {
308                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
309                 invert_v3(r_orcofacs[1]);
310                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
311                 negate_v3(r_orcofacs[0]);
312                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
313         }
314         else {
315                 copy_v3_fl(r_orcofacs[0], 0.0f);
316                 copy_v3_fl(r_orcofacs[1], 1.0f);
317         }
318 }
319
320 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
321 {
322         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
323         state->flag = 0;
324         state->cache_id = 0;
325         state->visibility_cb = NULL;
326         state->matflag = shgroup->matflag;
327
328         /* Matrices */
329         if (obmat != NULL) {
330                 copy_m4_m4(state->model, obmat);
331
332                 if (is_negative_m4(state->model)) {
333                         state->flag |= DRW_CALL_NEGSCALE;
334                 }
335         }
336         else {
337                 unit_m4(state->model);
338         }
339
340         if (ob != NULL) {
341                 float corner[3];
342                 BoundBox *bbox = BKE_object_boundbox_get(ob);
343                 /* Get BoundSphere center and radius from the BoundBox. */
344                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
345                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
346                 mul_m4_v3(obmat, state->bsphere.center);
347                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
348         }
349         else {
350                 /* Bypass test. */
351                 state->bsphere.radius = -1.0f;
352         }
353
354         /* Orco factors: We compute this at creation to not have to save the *ob_data */
355         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
356                 drw_call_calc_orco(ob, state->orcotexfac);
357                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
358         }
359
360         if ((state->matflag & DRW_CALL_OBJECTINFO) != 0) {
361                 state->objectinfo[0] = ob ? ob->index : 0;
362                 uint random;
363 #if 0 /* TODO(fclem) handle dupli objects */
364                 if (GMS.dob) {
365                         random = GMS.dob->random_id;
366                 }
367                 else
368 #endif
369                 {
370                         random = BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
371                 }
372                 state->objectinfo[1] = random * (1.0f / (float)0xFFFFFFFF);
373                 state->matflag &= ~DRW_CALL_OBJECTINFO;
374         }
375
376         return state;
377 }
378
379 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
380 {
381         if (DST.ob_state == NULL) {
382                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
383         }
384         else {
385                 /* If the DRWCallState is reused, add necessary matrices. */
386                 DST.ob_state->matflag |= shgroup->matflag;
387         }
388
389         return DST.ob_state;
390 }
391
392 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
393 {
394         BLI_assert(geom != NULL);
395         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
396
397         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
398         call->state = drw_call_state_create(shgroup, obmat, NULL);
399         call->type = DRW_CALL_SINGLE;
400         call->single.geometry = geom;
401 #ifdef USE_GPU_SELECT
402         call->select_id = DST.select_id;
403 #endif
404
405         BLI_LINKS_APPEND(&shgroup->calls, call);
406 }
407
408 void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
409 {
410         BLI_assert(geom != NULL);
411         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
412         BLI_assert(v_count);
413
414         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
415         call->state = drw_call_state_create(shgroup, obmat, NULL);
416         call->type = DRW_CALL_RANGE;
417         call->range.geometry = geom;
418         call->range.start = v_sta;
419         call->range.count = v_count;
420 #ifdef USE_GPU_SELECT
421         call->select_id = DST.select_id;
422 #endif
423
424         BLI_LINKS_APPEND(&shgroup->calls, call);
425 }
426
427 static void drw_shgroup_call_procedural_add_ex(
428         DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
429 {
430         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
431
432         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
433         if (ob) {
434                 call->state = drw_call_state_object(shgroup, ob->obmat, ob);
435         }
436         else {
437                 call->state = drw_call_state_create(shgroup, obmat, NULL);
438         }
439         call->type = DRW_CALL_PROCEDURAL;
440         call->procedural.prim_type = prim_type;
441         call->procedural.vert_count = vert_count;
442 #ifdef USE_GPU_SELECT
443         call->select_id = DST.select_id;
444 #endif
445
446         BLI_LINKS_APPEND(&shgroup->calls, call);
447 }
448
449 void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
450 {
451         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
452 }
453
454 void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
455 {
456         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
457 }
458
459 void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
460 {
461         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
462 }
463
464 /* TODO (fclem): this is a sign that the api is starting to be limiting.
465  * Maybe add special function that general purpose for special cases. */
466 void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
467 {
468         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
469 }
470
471 /* These calls can be culled and are optimized for redraw */
472 void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma, bool bypass_culling)
473 {
474         BLI_assert(geom != NULL);
475         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
476
477         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
478         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
479         call->type = DRW_CALL_SINGLE;
480         call->single.geometry = geom;
481         call->single.ma_index = ma ? ma->index : 0;
482 #ifdef USE_GPU_SELECT
483         call->select_id = DST.select_id;
484 #endif
485
486         /* NOTE this will disable culling for the whole object. */
487         call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
488
489         BLI_LINKS_APPEND(&shgroup->calls, call);
490 }
491
492 void DRW_shgroup_call_object_add_with_callback(
493         DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma,
494         DRWCallVisibilityFn *callback, void *user_data)
495 {
496         BLI_assert(geom != NULL);
497         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
498
499         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
500         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
501         call->state->visibility_cb = callback;
502         call->state->user_data = user_data;
503         call->type = DRW_CALL_SINGLE;
504         call->single.geometry = geom;
505         call->single.ma_index = ma ? ma->index : 0;
506 #ifdef USE_GPU_SELECT
507         call->select_id = DST.select_id;
508 #endif
509
510         BLI_LINKS_APPEND(&shgroup->calls, call);
511 }
512
513 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
514 {
515         BLI_assert(geom != NULL);
516         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
517
518         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
519         call->state = drw_call_state_create(shgroup, obmat, NULL);
520         call->type = DRW_CALL_INSTANCES;
521         call->instances.geometry = geom;
522         call->instances.count = count;
523 #ifdef USE_GPU_SELECT
524         call->select_id = DST.select_id;
525 #endif
526
527         BLI_LINKS_APPEND(&shgroup->calls, call);
528 }
529
530 /* These calls can be culled and are optimized for redraw */
531 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
532 {
533         BLI_assert(geom != NULL);
534         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
535
536         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
537         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
538         call->type = DRW_CALL_INSTANCES;
539         call->instances.geometry = geom;
540         call->instances.count = count;
541 #ifdef USE_GPU_SELECT
542         call->select_id = DST.select_id;
543 #endif
544
545         BLI_LINKS_APPEND(&shgroup->calls, call);
546 }
547
548 void DRW_shgroup_call_generate_add(
549         DRWShadingGroup *shgroup,
550         DRWCallGenerateFn *geometry_fn, void *user_data,
551         float (*obmat)[4])
552 {
553         BLI_assert(geometry_fn != NULL);
554         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
555
556         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
557         call->state = drw_call_state_create(shgroup, obmat, NULL);
558         call->type = DRW_CALL_GENERATE;
559         call->generate.geometry_fn = geometry_fn;
560         call->generate.user_data = user_data;
561 #ifdef USE_GPU_SELECT
562         call->select_id = DST.select_id;
563 #endif
564
565         BLI_LINKS_APPEND(&shgroup->calls, call);
566 }
567
568 static void sculpt_draw_cb(
569         DRWShadingGroup *shgroup,
570         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
571         void *user_data)
572 {
573         Object *ob = user_data;
574
575         /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
576         PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
577
578         const DRWContextState *drwctx = DRW_context_state_get();
579         int fast_mode = 0;
580
581         if (drwctx->evil_C != NULL) {
582                 Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
583                 if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
584                         fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
585                 }
586         }
587
588         if (pbvh) {
589                 BKE_pbvh_draw_cb(
590                         pbvh, NULL, NULL, fast_mode, false,
591                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
592         }
593 }
594
595 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
596 {
597         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
598 }
599
600 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
601 {
602 #ifdef USE_GPU_SELECT
603         if (G.f & G_PICKSEL) {
604                 if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
605                         GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
606                 }
607                 GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
608         }
609 #endif
610
611         BLI_assert(attr_len == shgroup->attribs_count);
612         UNUSED_VARS_NDEBUG(attr_len);
613
614         for (int i = 0; i < attr_len; ++i) {
615                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
616                         GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
617                 }
618                 GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
619         }
620
621         shgroup->instance_count += 1;
622 }
623
624 /** \} */
625
626 /* -------------------------------------------------------------------- */
627
628 /** \name Shading Groups (DRW_shgroup)
629  * \{ */
630
631 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
632 {
633         shgroup->instance_geom = NULL;
634         shgroup->instance_vbo = NULL;
635         shgroup->instance_count = 0;
636         shgroup->uniforms = NULL;
637 #ifdef USE_GPU_SELECT
638         shgroup->inst_selectid = NULL;
639         shgroup->override_selectid = -1;
640 #endif
641 #ifndef NDEBUG
642         shgroup->attribs_count = 0;
643 #endif
644
645         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
646
647         if (view_ubo_location != -1) {
648                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
649         }
650         else {
651                 /* Only here to support builtin shaders. This should not be used by engines. */
652                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
653                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
654                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
655                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
656                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
657                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
658                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
659         }
660
661         shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
662         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
663         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
664         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
665         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
666         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
667         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
668         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
669         shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
670         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
671         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
672
673         shgroup->matflag = 0;
674         if (shgroup->modelinverse > -1)
675                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
676         if (shgroup->modelview > -1)
677                 shgroup->matflag |= DRW_CALL_MODELVIEW;
678         if (shgroup->modelviewinverse > -1)
679                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
680         if (shgroup->modelviewprojection > -1)
681                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
682         if (shgroup->normalview > -1)
683                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
684         if (shgroup->normalworld > -1)
685                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
686         if (shgroup->orcotexfac > -1)
687                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
688         if (shgroup->objectinfo > -1)
689                 shgroup->matflag |= DRW_CALL_OBJECTINFO;
690         if (shgroup->eye > -1)
691                 shgroup->matflag |= DRW_CALL_EYEVEC;
692 }
693
694 static void drw_shgroup_instance_init(
695         DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
696 {
697         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
698         BLI_assert(batch != NULL);
699         BLI_assert(format != NULL);
700
701         drw_shgroup_init(shgroup, shader);
702
703         shgroup->instance_geom = batch;
704 #ifndef NDEBUG
705         shgroup->attribs_count = format->attr_len;
706 #endif
707
708         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
709                                       &shgroup->instance_geom, &shgroup->instance_vbo);
710
711 #ifdef USE_GPU_SELECT
712         if (G.f & G_PICKSEL) {
713                 /* Not actually used for rendering but alloced in one chunk.
714                  * Plus we don't have to care about ownership. */
715                 static GPUVertFormat inst_select_format = {0};
716                 if (inst_select_format.attr_len == 0) {
717                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
718                 }
719                 GPUBatch *batch_dummy; /* Not used */
720                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
721                                             GPU_PRIM_POINTS, shgroup,
722                                             &batch_dummy, &shgroup->inst_selectid);
723         }
724 #endif
725 }
726
727 static void drw_shgroup_batching_init(
728         DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
729 {
730         drw_shgroup_init(shgroup, shader);
731
732 #ifndef NDEBUG
733         shgroup->attribs_count = (format != NULL) ? format->attr_len : 0;
734 #endif
735         BLI_assert(format != NULL);
736
737         GPUPrimType type;
738         switch (shgroup->type) {
739                 case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
740                 case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
741                 case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
742                 default: type = GPU_PRIM_NONE; BLI_assert(0); break;
743         }
744
745         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
746                                     &shgroup->batch_geom, &shgroup->batch_vbo);
747
748 #ifdef USE_GPU_SELECT
749         if (G.f & G_PICKSEL) {
750                 /* Not actually used for rendering but alloced in one chunk. */
751                 static GPUVertFormat inst_select_format = {0};
752                 if (inst_select_format.attr_len == 0) {
753                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
754                 }
755                 GPUBatch *batch; /* Not used */
756                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
757                                             GPU_PRIM_POINTS, shgroup,
758                                             &batch, &shgroup->inst_selectid);
759         }
760 #endif
761 }
762
763 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
764 {
765         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
766
767         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
768
769         shgroup->type = DRW_SHG_NORMAL;
770         shgroup->shader = shader;
771         shgroup->state_extra = 0;
772         shgroup->state_extra_disable = ~0x0;
773         shgroup->stencil_mask = 0;
774         shgroup->calls.first = NULL;
775         shgroup->calls.last = NULL;
776 #if 0 /* All the same in the union! */
777         shgroup->batch_geom = NULL;
778         shgroup->batch_vbo = NULL;
779
780         shgroup->instance_geom = NULL;
781         shgroup->instance_vbo = NULL;
782 #endif
783         shgroup->pass_parent = pass;
784
785         return shgroup;
786 }
787
788 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
789 {
790         if (!gpupass) {
791                 /* Shader compilation error */
792                 return NULL;
793         }
794
795         GPUShader *sh = GPU_pass_shader_get(gpupass);
796
797         if (!sh) {
798                 /* Shader not yet compiled */
799                 return NULL;
800         }
801
802         DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
803         return grp;
804 }
805
806 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
807 {
808         ListBase *inputs = GPU_material_get_inputs(material);
809
810         /* Converting dynamic GPUInput to DRWUniform */
811         for (GPUInput *input = inputs->first; input; input = input->next) {
812                 /* Textures */
813                 if (input->source == GPU_SOURCE_TEX) {
814                         GPUTexture *tex = NULL;
815
816                         if (input->ima) {
817                                 double time = 0.0; /* TODO make time variable */
818                                 tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata, time);
819                         }
820                         else {
821                                 /* Color Ramps */
822                                 tex = *input->coba;
823                         }
824
825                         if (input->bindtex) {
826                                 drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
827                         }
828                 }
829         }
830
831         GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
832         if (ubo != NULL) {
833                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
834         }
835
836         return grp;
837 }
838
839 GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
840 {
841         GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
842
843         for (int i = 0; i < arraysize; ++i) {
844                 GPU_vertformat_attr_add(format, attribs[i].name,
845                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
846                                         attribs[i].components,
847                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
848         }
849         return format;
850 }
851
852 DRWShadingGroup *DRW_shgroup_material_create(
853         struct GPUMaterial *material, DRWPass *pass)
854 {
855         GPUPass *gpupass = GPU_material_get_pass(material);
856         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
857
858         if (shgroup) {
859                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
860                 drw_shgroup_material_inputs(shgroup, material);
861         }
862
863         return shgroup;
864 }
865
866 DRWShadingGroup *DRW_shgroup_material_instance_create(
867         struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
868 {
869         GPUPass *gpupass = GPU_material_get_pass(material);
870         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
871
872         if (shgroup) {
873                 shgroup->type = DRW_SHG_INSTANCE;
874                 shgroup->instance_geom = geom;
875                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
876                 drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
877                 drw_shgroup_material_inputs(shgroup, material);
878         }
879
880         return shgroup;
881 }
882
883 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
884         struct GPUMaterial *material, DRWPass *pass, int tri_count)
885 {
886 #ifdef USE_GPU_SELECT
887         BLI_assert((G.f & G_PICKSEL) == 0);
888 #endif
889         GPUPass *gpupass = GPU_material_get_pass(material);
890         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
891
892         if (shgroup) {
893                 /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
894                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
895                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
896                 shgroup->instance_count = tri_count * 3;
897                 drw_shgroup_material_inputs(shgroup, material);
898         }
899
900         return shgroup;
901 }
902
903 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
904 {
905         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
906         drw_shgroup_init(shgroup, shader);
907         return shgroup;
908 }
909
910 DRWShadingGroup *DRW_shgroup_instance_create(
911         struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
912 {
913         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
914         shgroup->type = DRW_SHG_INSTANCE;
915         shgroup->instance_geom = geom;
916         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
917         drw_shgroup_instance_init(shgroup, shader, geom, format);
918
919         return shgroup;
920 }
921
922 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
923 {
924         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
925
926         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
927         shgroup->type = DRW_SHG_POINT_BATCH;
928
929         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
930
931         return shgroup;
932 }
933
934 DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
935         struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
936 {
937         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
938         shgroup->type = DRW_SHG_LINE_BATCH;
939
940         drw_shgroup_batching_init(shgroup, shader, format);
941
942         return shgroup;
943 }
944
945 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
946 {
947         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
948
949         return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
950 }
951
952 /* Very special batch. Use this if you position
953  * your vertices with the vertex shader
954  * and dont need any VBO attrib */
955 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
956 {
957 #ifdef USE_GPU_SELECT
958         BLI_assert((G.f & G_PICKSEL) == 0);
959 #endif
960         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
961
962         /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
963         drw_shgroup_init(shgroup, shader);
964
965         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
966         shgroup->instance_count = tri_count * 3;
967
968         return shgroup;
969 }
970
971 DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
972 {
973         BLI_assert(tf_target != NULL);
974         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
975         shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
976
977         drw_shgroup_init(shgroup, shader);
978
979         shgroup->tfeedback_target = tf_target;
980
981         return shgroup;
982 }
983
984 /* Specify an external batch instead of adding each attrib one by one. */
985 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
986 {
987         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
988         BLI_assert(shgroup->instance_count == 0);
989         /* You cannot use external instancing batch without a dummy format. */
990         BLI_assert(shgroup->attribs_count != 0);
991
992         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
993         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
994         /* PERF : This destroys the vaos cache so better check if it's necessary. */
995         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
996          * at the same address. Bindings/VAOs would remain obsolete. */
997         //if (shgroup->instancing_geom->inst != batch->verts[0])
998         GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
999
1000 #ifdef USE_GPU_SELECT
1001         shgroup->override_selectid = DST.select_id;
1002 #endif
1003 }
1004
1005 uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
1006 {
1007         return shgroup->instance_count;
1008 }
1009
1010 /**
1011  * State is added to #Pass.state while drawing.
1012  * Use to temporarily enable draw options.
1013  */
1014 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1015 {
1016         shgroup->state_extra |= state;
1017 }
1018
1019 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1020 {
1021         shgroup->state_extra_disable &= ~state;
1022 }
1023
1024 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
1025 {
1026         BLI_assert(mask <= 255);
1027         shgroup->stencil_mask = mask;
1028 }
1029
1030 bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
1031 {
1032         switch (shgroup->type) {
1033                 case DRW_SHG_NORMAL:
1034                 case DRW_SHG_FEEDBACK_TRANSFORM:
1035                         return shgroup->calls.first == NULL;
1036                 case DRW_SHG_POINT_BATCH:
1037                 case DRW_SHG_LINE_BATCH:
1038                 case DRW_SHG_TRIANGLE_BATCH:
1039                 case DRW_SHG_INSTANCE:
1040                 case DRW_SHG_INSTANCE_EXTERNAL:
1041                         return shgroup->instance_count == 0;
1042         }
1043         BLI_assert(!"Shading Group type not supported");
1044         return true;
1045 }
1046
1047 DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
1048 {
1049         /* Remove this assertion if needed but implement the other cases first! */
1050         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1051
1052         DRWShadingGroup *shgroup_new = BLI_mempool_alloc(DST.vmempool->shgroups);
1053
1054         *shgroup_new = *shgroup;
1055         shgroup_new->uniforms = NULL;
1056         shgroup_new->calls.first = NULL;
1057         shgroup_new->calls.last = NULL;
1058
1059         BLI_LINKS_INSERT_AFTER(&shgroup->pass_parent->shgroups, shgroup, shgroup_new);
1060
1061         return shgroup_new;
1062 }
1063
1064 /** \} */
1065
1066 /* -------------------------------------------------------------------- */
1067
1068 /** \name Passes (DRW_pass)
1069  * \{ */
1070
1071 DRWPass *DRW_pass_create(const char *name, DRWState state)
1072 {
1073         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1074         pass->state = state;
1075         if (((G.debug_value > 20) && (G.debug_value < 30)) ||
1076              (G.debug & G_DEBUG))
1077         {
1078                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1079         }
1080
1081         pass->shgroups.first = NULL;
1082         pass->shgroups.last = NULL;
1083
1084         return pass;
1085 }
1086
1087 bool DRW_pass_is_empty(DRWPass *pass)
1088 {
1089         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1090                 if (!DRW_shgroup_is_empty(shgroup)) {
1091                         return false;
1092                 }
1093         }
1094         return true;
1095 }
1096
1097 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1098 {
1099         pass->state = state;
1100 }
1101
1102 void DRW_pass_state_add(DRWPass *pass, DRWState state)
1103 {
1104         pass->state |= state;
1105 }
1106
1107 void DRW_pass_state_remove(DRWPass *pass, DRWState state)
1108 {
1109         pass->state &= ~state;
1110 }
1111
1112 void DRW_pass_free(DRWPass *pass)
1113 {
1114         pass->shgroups.first = NULL;
1115         pass->shgroups.last = NULL;
1116 }
1117
1118 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1119 {
1120         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1121                 callback(userData, shgroup);
1122         }
1123 }
1124
1125 typedef struct ZSortData {
1126         float *axis;
1127         float *origin;
1128 } ZSortData;
1129
1130 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1131 {
1132         const ZSortData *zsortdata = (ZSortData *)thunk;
1133         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1134         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1135
1136         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
1137         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
1138
1139         if (call_a == NULL) return -1;
1140         if (call_b == NULL) return -1;
1141
1142         float tmp[3];
1143         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
1144         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1145         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
1146         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1147
1148         if      (a_sq < b_sq) return  1;
1149         else if (a_sq > b_sq) return -1;
1150         else {
1151                 /* If there is a depth prepass put it before */
1152                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1153                         return -1;
1154                 }
1155                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1156                         return  1;
1157                 }
1158                 else return  0;
1159         }
1160 }
1161
1162 /* ------------------ Shading group sorting --------------------- */
1163
1164 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1165
1166 #define SORT_IMPL_USE_THUNK
1167 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1168 #include "../../blenlib/intern/list_sort_impl.h"
1169 #undef SORT_IMPL_FUNC
1170 #undef SORT_IMPL_USE_THUNK
1171
1172 #undef SORT_IMPL_LINKTYPE
1173
1174 /**
1175  * Sort Shading groups by decreasing Z of their first draw call.
1176  * This is useful for order dependent effect such as transparency.
1177  **/
1178 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1179 {
1180         float (*viewinv)[4];
1181         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
1182
1183         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1184
1185         if (pass->shgroups.first && pass->shgroups.first->next) {
1186                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
1187
1188                 /* Find the next last */
1189                 DRWShadingGroup *last = pass->shgroups.first;
1190                 while ((last = last->next)) {
1191                         /* Do nothing */
1192                 }
1193                 pass->shgroups.last = last;
1194         }
1195 }
1196
1197 /** \} */