Merge branch 'blender2.7'
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_pbvh.h"
34
35 #include "DNA_curve_types.h"
36 #include "DNA_mesh_types.h"
37 #include "DNA_meta_types.h"
38
39 #include "BLI_hash.h"
40 #include "BLI_link_utils.h"
41 #include "BLI_mempool.h"
42
43 #include "intern/gpu_codegen.h"
44
45 struct GPUVertFormat *g_pos_format = NULL;
46
47 extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
48
49 /* -------------------------------------------------------------------- */
50
51 /** \name Uniform Buffer Object (DRW_uniformbuffer)
52  * \{ */
53
54 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
55 {
56         return GPU_uniformbuffer_create(size, data, NULL);
57 }
58
59 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
60 {
61         GPU_uniformbuffer_update(ubo, data);
62 }
63
64 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
65 {
66         GPU_uniformbuffer_free(ubo);
67 }
68
69 /** \} */
70
71 /* -------------------------------------------------------------------- */
72
73 /** \name Uniforms (DRW_shgroup_uniform)
74  * \{ */
75
76 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
77                                             DRWUniformType type, const void *value, int length, int arraysize)
78 {
79         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
80         uni->location = loc;
81         uni->type = type;
82         uni->length = length;
83         uni->arraysize = arraysize;
84
85         switch (type) {
86                 case DRW_UNIFORM_INT_COPY:
87                         uni->ivalue = *((int *)value);
88                         break;
89                 case DRW_UNIFORM_BOOL_COPY:
90                         uni->ivalue = (int)*((bool *)value);
91                         break;
92                 case DRW_UNIFORM_FLOAT_COPY:
93                         uni->fvalue = *((float *)value);
94                         break;
95                 default:
96                         uni->pvalue = value;
97                         break;
98         }
99
100         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
101 }
102
103 static void drw_shgroup_builtin_uniform(
104         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
105 {
106         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
107
108         if (loc != -1) {
109                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
110         }
111 }
112
113 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
114                                   DRWUniformType type, const void *value, int length, int arraysize)
115 {
116         int location;
117         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
118                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
119         }
120         else {
121                 location = GPU_shader_get_uniform(shgroup->shader, name);
122         }
123
124         if (location == -1) {
125                 if (G.debug & G_DEBUG_GPU)
126                         fprintf(stderr, "Warning: Pass : %s, Uniform '%s' not found!\n", shgroup->pass_parent->name, name);
127                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
128                 // BLI_assert(0);
129                 return;
130         }
131
132         BLI_assert(arraysize > 0 && arraysize <= 16);
133         BLI_assert(length >= 0 && length <= 16);
134
135         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
136
137 #ifndef NDEBUG
138         /* Save uniform name to easily identify it when debugging. */
139         BLI_strncpy(shgroup->uniforms->name, name, MAX_UNIFORM_NAME);
140 #endif
141 }
142
143 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
144 {
145         BLI_assert(tex != NULL);
146         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
147 }
148
149 /* Same as DRW_shgroup_uniform_texture but is guaranteed to be bound if shader does not change between shgrp. */
150 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
151 {
152         BLI_assert(tex != NULL);
153         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
154 }
155
156 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
157 {
158         BLI_assert(ubo != NULL);
159         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
160 }
161
162 /* Same as DRW_shgroup_uniform_block but is guaranteed to be bound if shader does not change between shgrp. */
163 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
164 {
165         BLI_assert(ubo != NULL);
166         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
167 }
168
169 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
170 {
171         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
172 }
173
174 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
175 {
176         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
177 }
178
179 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
180 {
181         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
182 }
183
184 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
185 {
186         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
187 }
188
189 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
190 {
191         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
192 }
193
194 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
195 {
196         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
197 }
198
199 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
200 {
201         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
202 }
203
204 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
205 {
206         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
207 }
208
209 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
210 {
211         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
212 }
213
214 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
215 {
216         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
217 }
218
219 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
220 {
221         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
222 }
223
224 void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
225 {
226         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
227 }
228
229 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
230 {
231         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
232 }
233
234 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
235 {
236         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
237 }
238
239 /* Stores the int instead of a pointer. */
240 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
241 {
242         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
243 }
244
245 void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
246 {
247         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
248 }
249
250 void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
251 {
252         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
253 }
254
255
256 /** \} */
257
258 /* -------------------------------------------------------------------- */
259
260 /** \name Draw Call (DRW_calls)
261  * \{ */
262
263 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
264 {
265         ID *ob_data = (ob) ? ob->data : NULL;
266         float *texcoloc = NULL;
267         float *texcosize = NULL;
268         if (ob_data != NULL) {
269                 switch (GS(ob_data->name)) {
270                         case ID_ME:
271                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
272                                 break;
273                         case ID_CU:
274                         {
275                                 Curve *cu = (Curve *)ob_data;
276                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
277                                         BKE_curve_texspace_calc(cu);
278                                 }
279                                 texcoloc = cu->loc;
280                                 texcosize = cu->size;
281                                 break;
282                         }
283                         case ID_MB:
284                         {
285                                 MetaBall *mb = (MetaBall *)ob_data;
286                                 texcoloc = mb->loc;
287                                 texcosize = mb->size;
288                                 break;
289                         }
290                         default:
291                                 break;
292                 }
293         }
294
295         if ((texcoloc != NULL) && (texcosize != NULL)) {
296                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
297                 invert_v3(r_orcofacs[1]);
298                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
299                 negate_v3(r_orcofacs[0]);
300                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
301         }
302         else {
303                 copy_v3_fl(r_orcofacs[0], 0.0f);
304                 copy_v3_fl(r_orcofacs[1], 1.0f);
305         }
306 }
307
308 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
309 {
310         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
311         state->flag = 0;
312         state->cache_id = 0;
313         state->visibility_cb = NULL;
314         state->matflag = shgroup->matflag;
315
316         /* Matrices */
317         if (obmat != NULL) {
318                 copy_m4_m4(state->model, obmat);
319
320                 if (is_negative_m4(state->model)) {
321                         state->flag |= DRW_CALL_NEGSCALE;
322                 }
323         }
324         else {
325                 unit_m4(state->model);
326         }
327
328         if (ob != NULL) {
329                 float corner[3];
330                 BoundBox *bbox = BKE_object_boundbox_get(ob);
331                 /* Get BoundSphere center and radius from the BoundBox. */
332                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
333                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
334                 mul_m4_v3(obmat, state->bsphere.center);
335                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
336         }
337         else {
338                 /* Bypass test. */
339                 state->bsphere.radius = -1.0f;
340         }
341
342         /* Orco factors: We compute this at creation to not have to save the *ob_data */
343         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
344                 drw_call_calc_orco(ob, state->orcotexfac);
345                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
346         }
347
348         if ((state->matflag & DRW_CALL_OBJECTINFO) != 0) {
349                 state->objectinfo[0] = ob ? ob->index : 0;
350                 uint random;
351 #if 0 /* TODO(fclem) handle dupli objects */
352                 if (GMS.dob) {
353                         random = GMS.dob->random_id;
354                 }
355                 else
356 #endif
357                 {
358                         random = BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
359                 }
360                 state->objectinfo[1] = random * (1.0f / (float)0xFFFFFFFF);
361                 state->matflag &= ~DRW_CALL_OBJECTINFO;
362         }
363
364         return state;
365 }
366
367 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
368 {
369         if (DST.ob_state == NULL) {
370                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
371         }
372         else {
373                 /* If the DRWCallState is reused, add necessary matrices. */
374                 DST.ob_state->matflag |= shgroup->matflag;
375         }
376
377         return DST.ob_state;
378 }
379
380 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
381 {
382         BLI_assert(geom != NULL);
383         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
384
385         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
386         call->state = drw_call_state_create(shgroup, obmat, NULL);
387         call->type = DRW_CALL_SINGLE;
388         call->single.geometry = geom;
389 #ifdef USE_GPU_SELECT
390         call->select_id = DST.select_id;
391 #endif
392
393         BLI_LINKS_APPEND(&shgroup->calls, call);
394 }
395
396 void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
397 {
398         BLI_assert(geom != NULL);
399         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
400         BLI_assert(v_count);
401
402         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
403         call->state = drw_call_state_create(shgroup, obmat, NULL);
404         call->type = DRW_CALL_RANGE;
405         call->range.geometry = geom;
406         call->range.start = v_sta;
407         call->range.count = v_count;
408 #ifdef USE_GPU_SELECT
409         call->select_id = DST.select_id;
410 #endif
411
412         BLI_LINKS_APPEND(&shgroup->calls, call);
413 }
414
415 static void drw_shgroup_call_procedural_add_ex(
416         DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
417 {
418         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
419
420         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
421         if (ob) {
422                 call->state = drw_call_state_object(shgroup, ob->obmat, ob);
423         }
424         else {
425                 call->state = drw_call_state_create(shgroup, obmat, NULL);
426         }
427         call->type = DRW_CALL_PROCEDURAL;
428         call->procedural.prim_type = prim_type;
429         call->procedural.vert_count = vert_count;
430 #ifdef USE_GPU_SELECT
431         call->select_id = DST.select_id;
432 #endif
433
434         BLI_LINKS_APPEND(&shgroup->calls, call);
435 }
436
437 void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
438 {
439         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
440 }
441
442 void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
443 {
444         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
445 }
446
447 void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
448 {
449         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
450 }
451
452 /* TODO (fclem): this is a sign that the api is starting to be limiting.
453  * Maybe add special function that general purpose for special cases. */
454 void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
455 {
456         drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
457 }
458
459 /* These calls can be culled and are optimized for redraw */
460 void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma, bool bypass_culling)
461 {
462         BLI_assert(geom != NULL);
463         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
464
465         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
466         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
467         call->type = DRW_CALL_SINGLE;
468         call->single.geometry = geom;
469         call->single.ma_index = ma ? ma->index : 0;
470 #ifdef USE_GPU_SELECT
471         call->select_id = DST.select_id;
472 #endif
473
474         /* NOTE this will disable culling for the whole object. */
475         call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
476
477         BLI_LINKS_APPEND(&shgroup->calls, call);
478 }
479
480 void DRW_shgroup_call_object_add_with_callback(
481         DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma,
482         DRWCallVisibilityFn *callback, void *user_data)
483 {
484         BLI_assert(geom != NULL);
485         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
486
487         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
488         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
489         call->state->visibility_cb = callback;
490         call->state->user_data = user_data;
491         call->type = DRW_CALL_SINGLE;
492         call->single.geometry = geom;
493         call->single.ma_index = ma ? ma->index : 0;
494 #ifdef USE_GPU_SELECT
495         call->select_id = DST.select_id;
496 #endif
497
498         BLI_LINKS_APPEND(&shgroup->calls, call);
499 }
500
501 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
502 {
503         BLI_assert(geom != NULL);
504         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
505
506         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
507         call->state = drw_call_state_create(shgroup, obmat, NULL);
508         call->type = DRW_CALL_INSTANCES;
509         call->instances.geometry = geom;
510         call->instances.count = count;
511 #ifdef USE_GPU_SELECT
512         call->select_id = DST.select_id;
513 #endif
514
515         BLI_LINKS_APPEND(&shgroup->calls, call);
516 }
517
518 /* These calls can be culled and are optimized for redraw */
519 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
520 {
521         BLI_assert(geom != NULL);
522         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
523
524         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
525         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
526         call->type = DRW_CALL_INSTANCES;
527         call->instances.geometry = geom;
528         call->instances.count = count;
529 #ifdef USE_GPU_SELECT
530         call->select_id = DST.select_id;
531 #endif
532
533         BLI_LINKS_APPEND(&shgroup->calls, call);
534 }
535
536 void DRW_shgroup_call_generate_add(
537         DRWShadingGroup *shgroup,
538         DRWCallGenerateFn *geometry_fn, void *user_data,
539         float (*obmat)[4])
540 {
541         BLI_assert(geometry_fn != NULL);
542         BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
543
544         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
545         call->state = drw_call_state_create(shgroup, obmat, NULL);
546         call->type = DRW_CALL_GENERATE;
547         call->generate.geometry_fn = geometry_fn;
548         call->generate.user_data = user_data;
549 #ifdef USE_GPU_SELECT
550         call->select_id = DST.select_id;
551 #endif
552
553         BLI_LINKS_APPEND(&shgroup->calls, call);
554 }
555
556 static void sculpt_draw_cb(
557         DRWShadingGroup *shgroup,
558         void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
559         void *user_data)
560 {
561         Object *ob = user_data;
562
563         /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
564         PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
565
566         const DRWContextState *drwctx = DRW_context_state_get();
567         int fast_mode = 0;
568
569         if (drwctx->evil_C != NULL) {
570                 Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
571                 if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
572                         fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
573                 }
574         }
575
576         if (pbvh) {
577                 BKE_pbvh_draw_cb(
578                         pbvh, NULL, NULL, fast_mode, false,
579                         (void (*)(void *, GPUBatch *))draw_fn, shgroup);
580         }
581 }
582
583 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
584 {
585         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
586 }
587
588 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
589 {
590 #ifdef USE_GPU_SELECT
591         if (G.f & G_PICKSEL) {
592                 if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
593                         GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
594                 }
595                 GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
596         }
597 #endif
598
599         BLI_assert(attr_len == shgroup->attribs_count);
600         UNUSED_VARS_NDEBUG(attr_len);
601
602         for (int i = 0; i < attr_len; ++i) {
603                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
604                         GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
605                 }
606                 GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
607         }
608
609         shgroup->instance_count += 1;
610 }
611
612 /** \} */
613
614 /* -------------------------------------------------------------------- */
615
616 /** \name Shading Groups (DRW_shgroup)
617  * \{ */
618
619 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
620 {
621         shgroup->instance_geom = NULL;
622         shgroup->instance_vbo = NULL;
623         shgroup->instance_count = 0;
624         shgroup->uniforms = NULL;
625 #ifdef USE_GPU_SELECT
626         shgroup->inst_selectid = NULL;
627         shgroup->override_selectid = -1;
628 #endif
629 #ifndef NDEBUG
630         shgroup->attribs_count = 0;
631 #endif
632
633         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
634
635         if (view_ubo_location != -1) {
636                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
637         }
638         else {
639                 /* Only here to support builtin shaders. This should not be used by engines. */
640                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
641                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
642                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
643                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
644                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
645                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
646                 drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
647         }
648
649         shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
650         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
651         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
652         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
653         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
654         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
655         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
656         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
657         shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
658         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
659         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
660
661         shgroup->matflag = 0;
662         if (shgroup->modelinverse > -1)
663                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
664         if (shgroup->modelview > -1)
665                 shgroup->matflag |= DRW_CALL_MODELVIEW;
666         if (shgroup->modelviewinverse > -1)
667                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
668         if (shgroup->modelviewprojection > -1)
669                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
670         if (shgroup->normalview > -1)
671                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
672         if (shgroup->normalworld > -1)
673                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
674         if (shgroup->orcotexfac > -1)
675                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
676         if (shgroup->objectinfo > -1)
677                 shgroup->matflag |= DRW_CALL_OBJECTINFO;
678         if (shgroup->eye > -1)
679                 shgroup->matflag |= DRW_CALL_EYEVEC;
680 }
681
682 static void drw_shgroup_instance_init(
683         DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
684 {
685         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
686         BLI_assert(batch != NULL);
687         BLI_assert(format != NULL);
688
689         drw_shgroup_init(shgroup, shader);
690
691         shgroup->instance_geom = batch;
692 #ifndef NDEBUG
693         shgroup->attribs_count = format->attr_len;
694 #endif
695
696         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
697                                       &shgroup->instance_geom, &shgroup->instance_vbo);
698
699 #ifdef USE_GPU_SELECT
700         if (G.f & G_PICKSEL) {
701                 /* Not actually used for rendering but alloced in one chunk.
702                  * Plus we don't have to care about ownership. */
703                 static GPUVertFormat inst_select_format = {0};
704                 if (inst_select_format.attr_len == 0) {
705                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
706                 }
707                 GPUBatch *batch_dummy; /* Not used */
708                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
709                                             GPU_PRIM_POINTS, shgroup,
710                                             &batch_dummy, &shgroup->inst_selectid);
711         }
712 #endif
713 }
714
715 static void drw_shgroup_batching_init(
716         DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
717 {
718         drw_shgroup_init(shgroup, shader);
719
720 #ifndef NDEBUG
721         shgroup->attribs_count = (format != NULL) ? format->attr_len : 0;
722 #endif
723         BLI_assert(format != NULL);
724
725         GPUPrimType type;
726         switch (shgroup->type) {
727                 case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
728                 case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
729                 case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
730                 default: type = GPU_PRIM_NONE; BLI_assert(0); break;
731         }
732
733         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
734                                     &shgroup->batch_geom, &shgroup->batch_vbo);
735
736 #ifdef USE_GPU_SELECT
737         if (G.f & G_PICKSEL) {
738                 /* Not actually used for rendering but alloced in one chunk. */
739                 static GPUVertFormat inst_select_format = {0};
740                 if (inst_select_format.attr_len == 0) {
741                         GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
742                 }
743                 GPUBatch *batch; /* Not used */
744                 DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
745                                             GPU_PRIM_POINTS, shgroup,
746                                             &batch, &shgroup->inst_selectid);
747         }
748 #endif
749 }
750
751 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
752 {
753         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
754
755         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
756
757         shgroup->type = DRW_SHG_NORMAL;
758         shgroup->shader = shader;
759         shgroup->state_extra = 0;
760         shgroup->state_extra_disable = ~0x0;
761         shgroup->stencil_mask = 0;
762         shgroup->calls.first = NULL;
763         shgroup->calls.last = NULL;
764 #if 0 /* All the same in the union! */
765         shgroup->batch_geom = NULL;
766         shgroup->batch_vbo = NULL;
767
768         shgroup->instance_geom = NULL;
769         shgroup->instance_vbo = NULL;
770 #endif
771         shgroup->pass_parent = pass;
772
773         return shgroup;
774 }
775
776 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
777 {
778         if (!gpupass) {
779                 /* Shader compilation error */
780                 return NULL;
781         }
782
783         GPUShader *sh = GPU_pass_shader_get(gpupass);
784
785         if (!sh) {
786                 /* Shader not yet compiled */
787                 return NULL;
788         }
789
790         DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
791         return grp;
792 }
793
794 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
795 {
796         ListBase *inputs = GPU_material_get_inputs(material);
797
798         /* Converting dynamic GPUInput to DRWUniform */
799         for (GPUInput *input = inputs->first; input; input = input->next) {
800                 /* Textures */
801                 if (input->source == GPU_SOURCE_TEX) {
802                         GPUTexture *tex = NULL;
803
804                         if (input->ima) {
805                                 double time = 0.0; /* TODO make time variable */
806                                 tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata, time);
807                         }
808                         else {
809                                 /* Color Ramps */
810                                 tex = *input->coba;
811                         }
812
813                         if (input->bindtex) {
814                                 drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
815                         }
816                 }
817         }
818
819         GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
820         if (ubo != NULL) {
821                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
822         }
823
824         return grp;
825 }
826
827 GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
828 {
829         GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
830
831         for (int i = 0; i < arraysize; ++i) {
832                 GPU_vertformat_attr_add(format, attribs[i].name,
833                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
834                                         attribs[i].components,
835                                         (attribs[i].type == DRW_ATTRIB_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
836         }
837         return format;
838 }
839
840 DRWShadingGroup *DRW_shgroup_material_create(
841         struct GPUMaterial *material, DRWPass *pass)
842 {
843         GPUPass *gpupass = GPU_material_get_pass(material);
844         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
845
846         if (shgroup) {
847                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
848                 drw_shgroup_material_inputs(shgroup, material);
849         }
850
851         return shgroup;
852 }
853
854 DRWShadingGroup *DRW_shgroup_material_instance_create(
855         struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
856 {
857         GPUPass *gpupass = GPU_material_get_pass(material);
858         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
859
860         if (shgroup) {
861                 shgroup->type = DRW_SHG_INSTANCE;
862                 shgroup->instance_geom = geom;
863                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
864                 drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
865                 drw_shgroup_material_inputs(shgroup, material);
866         }
867
868         return shgroup;
869 }
870
871 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
872         struct GPUMaterial *material, DRWPass *pass, int tri_count)
873 {
874 #ifdef USE_GPU_SELECT
875         BLI_assert((G.f & G_PICKSEL) == 0);
876 #endif
877         GPUPass *gpupass = GPU_material_get_pass(material);
878         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
879
880         if (shgroup) {
881                 /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
882                 drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
883                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
884                 shgroup->instance_count = tri_count * 3;
885                 drw_shgroup_material_inputs(shgroup, material);
886         }
887
888         return shgroup;
889 }
890
891 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
892 {
893         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
894         drw_shgroup_init(shgroup, shader);
895         return shgroup;
896 }
897
898 DRWShadingGroup *DRW_shgroup_instance_create(
899         struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
900 {
901         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
902         shgroup->type = DRW_SHG_INSTANCE;
903         shgroup->instance_geom = geom;
904         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
905         drw_shgroup_instance_init(shgroup, shader, geom, format);
906
907         return shgroup;
908 }
909
910 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
911 {
912         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
913
914         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
915         shgroup->type = DRW_SHG_POINT_BATCH;
916
917         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
918
919         return shgroup;
920 }
921
922 DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
923         struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
924 {
925         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
926         shgroup->type = DRW_SHG_LINE_BATCH;
927
928         drw_shgroup_batching_init(shgroup, shader, format);
929
930         return shgroup;
931 }
932
933 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
934 {
935         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
936
937         return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
938 }
939
940 /* Very special batch. Use this if you position
941  * your vertices with the vertex shader
942  * and dont need any VBO attrib */
943 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
944 {
945 #ifdef USE_GPU_SELECT
946         BLI_assert((G.f & G_PICKSEL) == 0);
947 #endif
948         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
949
950         /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
951         drw_shgroup_init(shgroup, shader);
952
953         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
954         shgroup->instance_count = tri_count * 3;
955
956         return shgroup;
957 }
958
959 DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
960 {
961         BLI_assert(tf_target != NULL);
962         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
963         shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
964
965         drw_shgroup_init(shgroup, shader);
966
967         shgroup->tfeedback_target = tf_target;
968
969         return shgroup;
970 }
971
972 /* Specify an external batch instead of adding each attrib one by one. */
973 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
974 {
975         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
976         BLI_assert(shgroup->instance_count == 0);
977         /* You cannot use external instancing batch without a dummy format. */
978         BLI_assert(shgroup->attribs_count != 0);
979
980         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
981         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
982         /* PERF : This destroys the vaos cache so better check if it's necessary. */
983         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
984          * at the same address. Bindings/VAOs would remain obsolete. */
985         //if (shgroup->instancing_geom->inst != batch->verts[0])
986         GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
987
988 #ifdef USE_GPU_SELECT
989         shgroup->override_selectid = DST.select_id;
990 #endif
991 }
992
993 uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
994 {
995         return shgroup->instance_count;
996 }
997
998 /**
999  * State is added to #Pass.state while drawing.
1000  * Use to temporarily enable draw options.
1001  */
1002 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1003 {
1004         shgroup->state_extra |= state;
1005 }
1006
1007 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1008 {
1009         shgroup->state_extra_disable &= ~state;
1010 }
1011
1012 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
1013 {
1014         BLI_assert(mask <= 255);
1015         shgroup->stencil_mask = mask;
1016 }
1017
1018 bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
1019 {
1020         switch (shgroup->type) {
1021                 case DRW_SHG_NORMAL:
1022                 case DRW_SHG_FEEDBACK_TRANSFORM:
1023                         return shgroup->calls.first == NULL;
1024                 case DRW_SHG_POINT_BATCH:
1025                 case DRW_SHG_LINE_BATCH:
1026                 case DRW_SHG_TRIANGLE_BATCH:
1027                 case DRW_SHG_INSTANCE:
1028                 case DRW_SHG_INSTANCE_EXTERNAL:
1029                         return shgroup->instance_count == 0;
1030         }
1031         BLI_assert(!"Shading Group type not supported");
1032         return true;
1033 }
1034
1035 DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
1036 {
1037         /* Remove this assertion if needed but implement the other cases first! */
1038         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1039
1040         DRWShadingGroup *shgroup_new = BLI_mempool_alloc(DST.vmempool->shgroups);
1041
1042         *shgroup_new = *shgroup;
1043         shgroup_new->uniforms = NULL;
1044         shgroup_new->calls.first = NULL;
1045         shgroup_new->calls.last = NULL;
1046
1047         BLI_LINKS_INSERT_AFTER(&shgroup->pass_parent->shgroups, shgroup, shgroup_new);
1048
1049         return shgroup_new;
1050 }
1051
1052 /** \} */
1053
1054 /* -------------------------------------------------------------------- */
1055
1056 /** \name Passes (DRW_pass)
1057  * \{ */
1058
1059 DRWPass *DRW_pass_create(const char *name, DRWState state)
1060 {
1061         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1062         pass->state = state;
1063         if (((G.debug_value > 20) && (G.debug_value < 30)) ||
1064              (G.debug & G_DEBUG))
1065         {
1066                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1067         }
1068
1069         pass->shgroups.first = NULL;
1070         pass->shgroups.last = NULL;
1071
1072         return pass;
1073 }
1074
1075 bool DRW_pass_is_empty(DRWPass *pass)
1076 {
1077         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1078                 if (!DRW_shgroup_is_empty(shgroup)) {
1079                         return false;
1080                 }
1081         }
1082         return true;
1083 }
1084
1085 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1086 {
1087         pass->state = state;
1088 }
1089
1090 void DRW_pass_state_add(DRWPass *pass, DRWState state)
1091 {
1092         pass->state |= state;
1093 }
1094
1095 void DRW_pass_state_remove(DRWPass *pass, DRWState state)
1096 {
1097         pass->state &= ~state;
1098 }
1099
1100 void DRW_pass_free(DRWPass *pass)
1101 {
1102         pass->shgroups.first = NULL;
1103         pass->shgroups.last = NULL;
1104 }
1105
1106 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1107 {
1108         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
1109                 callback(userData, shgroup);
1110         }
1111 }
1112
1113 typedef struct ZSortData {
1114         float *axis;
1115         float *origin;
1116 } ZSortData;
1117
1118 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1119 {
1120         const ZSortData *zsortdata = (ZSortData *)thunk;
1121         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1122         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1123
1124         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
1125         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
1126
1127         if (call_a == NULL) return -1;
1128         if (call_b == NULL) return -1;
1129
1130         float tmp[3];
1131         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
1132         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1133         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
1134         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1135
1136         if      (a_sq < b_sq) return  1;
1137         else if (a_sq > b_sq) return -1;
1138         else {
1139                 /* If there is a depth prepass put it before */
1140                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1141                         return -1;
1142                 }
1143                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1144                         return  1;
1145                 }
1146                 else return  0;
1147         }
1148 }
1149
1150 /* ------------------ Shading group sorting --------------------- */
1151
1152 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1153
1154 #define SORT_IMPL_USE_THUNK
1155 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1156 #include "../../blenlib/intern/list_sort_impl.h"
1157 #undef SORT_IMPL_FUNC
1158 #undef SORT_IMPL_USE_THUNK
1159
1160 #undef SORT_IMPL_LINKTYPE
1161
1162 /**
1163  * Sort Shading groups by decreasing Z of their first draw call.
1164  * This is useful for order dependent effect such as transparency.
1165  **/
1166 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1167 {
1168         float (*viewinv)[4];
1169         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
1170
1171         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1172
1173         if (pass->shgroups.first && pass->shgroups.first->next) {
1174                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
1175
1176                 /* Find the next last */
1177                 DRWShadingGroup *last = pass->shgroups.first;
1178                 while ((last = last->next)) {
1179                         /* Do nothing */
1180                 }
1181                 pass->shgroups.last = last;
1182         }
1183 }
1184
1185 /** \} */