Code cleanup: fix a few harmless warnings.
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_paint.h"
32 #include "BKE_pbvh.h"
33
34 #include "DNA_curve_types.h"
35 #include "DNA_mesh_types.h"
36 #include "DNA_meta_types.h"
37
38 #include "BLI_link_utils.h"
39 #include "BLI_mempool.h"
40
41 #include "intern/gpu_codegen.h"
42
43 struct Gwn_VertFormat *g_pos_format = NULL;
44
45 /* -------------------------------------------------------------------- */
46
47 /** \name Uniform Buffer Object (DRW_uniformbuffer)
48  * \{ */
49
50 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
51 {
52         return GPU_uniformbuffer_create(size, data, NULL);
53 }
54
55 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
56 {
57         GPU_uniformbuffer_update(ubo, data);
58 }
59
60 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
61 {
62         GPU_uniformbuffer_free(ubo);
63 }
64
65 /** \} */
66
67 /* -------------------------------------------------------------------- */
68
69 /** \name Uniforms (DRW_shgroup_uniform)
70  * \{ */
71
72 static void drw_interface_builtin_uniform(
73         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
74 {
75         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
76
77         if (loc == -1)
78                 return;
79
80         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
81         uni->location = loc;
82         uni->type = DRW_UNIFORM_FLOAT;
83         uni->value = value;
84         uni->length = length;
85         uni->arraysize = arraysize;
86
87         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
88 }
89
90 static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name,
91                                   DRWUniformType type, const void *value, int length, int arraysize)
92 {
93         int location;
94         if (type == DRW_UNIFORM_BLOCK) {
95                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
96         }
97         else {
98                 location = GPU_shader_get_uniform(shgroup->shader, name);
99         }
100
101         if (location == -1) {
102                 if (G.debug & G_DEBUG)
103                         fprintf(stderr, "Uniform '%s' not found!\n", name);
104                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
105                 // BLI_assert(0);
106                 return;
107         }
108
109         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
110
111         BLI_assert(arraysize > 0 && arraysize <= 16);
112         BLI_assert(length >= 0 && length <= 16);
113
114         uni->location = location;
115         uni->type = type;
116         uni->value = value;
117         uni->length = length;
118         uni->arraysize = arraysize;
119
120         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
121 }
122
123 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
124 {
125         drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
126 }
127
128 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
129 {
130         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
131 }
132
133 void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
134 {
135         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
136 }
137
138 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
139 {
140         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
141 }
142
143 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
144 {
145         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
146 }
147
148 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
149 {
150         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
151 }
152
153 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
154 {
155         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
156 }
157
158 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
159 {
160         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
161 }
162
163 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
164 {
165         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
166 }
167
168 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
169 {
170         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
171 }
172
173 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
174 {
175         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
176 }
177
178 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
179 {
180         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
181 }
182
183 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
184 {
185         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
186 }
187
188 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
189 {
190         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 9, 1);
191 }
192
193 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
194 {
195         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 16, 1);
196 }
197
198 /** \} */
199
200 /* -------------------------------------------------------------------- */
201
202 /** \name Draw Call (DRW_calls)
203  * \{ */
204
205 static void drw_call_calc_orco(ID *ob_data, float (*r_orcofacs)[3])
206 {
207         float *texcoloc = NULL;
208         float *texcosize = NULL;
209         if (ob_data != NULL) {
210                 switch (GS(ob_data->name)) {
211                         case ID_ME:
212                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
213                                 break;
214                         case ID_CU:
215                         {
216                                 Curve *cu = (Curve *)ob_data;
217                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
218                                         BKE_curve_texspace_calc(cu);
219                                 }
220                                 texcoloc = cu->loc;
221                                 texcosize = cu->size;
222                                 break;
223                         }
224                         case ID_MB:
225                         {
226                                 MetaBall *mb = (MetaBall *)ob_data;
227                                 texcoloc = mb->loc;
228                                 texcosize = mb->size;
229                                 break;
230                         }
231                         default:
232                                 break;
233                 }
234         }
235
236         if ((texcoloc != NULL) && (texcosize != NULL)) {
237                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
238                 invert_v3(r_orcofacs[1]);
239                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
240                 negate_v3(r_orcofacs[0]);
241                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
242         }
243         else {
244                 copy_v3_fl(r_orcofacs[0], 0.0f);
245                 copy_v3_fl(r_orcofacs[1], 1.0f);
246         }
247 }
248
249 static void drw_call_set_matrices(DRWCallState *state, float (*obmat)[4], ID *ob_data)
250 {
251         /* Matrices */
252         if (obmat != NULL) {
253                 copy_m4_m4(state->model, obmat);
254
255                 if (is_negative_m4(state->model)) {
256                         state->flag |= DRW_CALL_NEGSCALE;
257                 }
258         }
259         else {
260                 unit_m4(state->model);
261         }
262
263         /* Orco factors */
264         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
265                 drw_call_calc_orco(ob_data, state->orcotexfac);
266                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
267         }
268
269         /* TODO Set culling bsphere IF needed by the DRWPass */
270         state->bsphere.rad = -1.0f;
271 }
272
273 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
274 {
275         BLI_assert(geom != NULL);
276         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
277
278         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
279         call->head.type = DRW_CALL_SINGLE;
280         call->state.flag = 0;
281         call->state.matflag = shgroup->matflag;
282 #ifdef USE_GPU_SELECT
283         call->head.select_id = DST.select_id;
284 #endif
285         call->geometry = geom;
286         drw_call_set_matrices(&call->state, obmat, NULL);
287         BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
288 }
289
290 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
291 {
292         BLI_assert(geom != NULL);
293         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
294
295         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
296         call->head.type = DRW_CALL_SINGLE;
297         call->state.flag = 0;
298         call->state.matflag = shgroup->matflag;
299 #ifdef USE_GPU_SELECT
300         call->head.select_id = DST.select_id;
301 #endif
302         call->geometry = geom;
303         drw_call_set_matrices(&call->state, ob->obmat, ob->data);
304         BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
305 }
306
307 void DRW_shgroup_call_generate_add(
308         DRWShadingGroup *shgroup,
309         DRWCallGenerateFn *geometry_fn, void *user_data,
310         float (*obmat)[4])
311 {
312         BLI_assert(geometry_fn != NULL);
313         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
314
315         DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
316         call->head.type = DRW_CALL_GENERATE;
317         call->state.flag = 0;
318         call->state.matflag = shgroup->matflag;
319 #ifdef USE_GPU_SELECT
320         call->head.select_id = DST.select_id;
321 #endif
322         call->geometry_fn = geometry_fn;
323         call->user_data = user_data;
324         drw_call_set_matrices(&call->state, obmat, NULL);
325         BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call);
326 }
327
328 static void sculpt_draw_cb(
329         DRWShadingGroup *shgroup,
330         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
331         void *user_data)
332 {
333         Object *ob = user_data;
334         PBVH *pbvh = ob->sculpt->pbvh;
335
336         if (pbvh) {
337                 BKE_pbvh_draw_cb(
338                         pbvh, NULL, NULL, false,
339                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
340         }
341 }
342
343 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
344 {
345         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
346 }
347
348 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
349 {
350 #ifdef USE_GPU_SELECT
351         if (G.f & G_PICKSEL) {
352                 if (shgroup->inst_selectid == NULL) {
353                         shgroup->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
354                 }
355
356                 int *select_id = DRW_instance_data_next(shgroup->inst_selectid);
357                 *select_id = DST.select_id;
358         }
359 #endif
360
361         BLI_assert(attr_len == shgroup->attribs_count);
362         UNUSED_VARS_NDEBUG(attr_len);
363
364         for (int i = 0; i < attr_len; ++i) {
365                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_ct) {
366                         GWN_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
367                 }
368                 GWN_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
369         }
370
371         shgroup->instance_count += 1;
372 }
373
374 /** \} */
375
376 /* -------------------------------------------------------------------- */
377
378 /** \name Shading Groups (DRW_shgroup)
379  * \{ */
380
381 static void drw_interface_init(DRWShadingGroup *shgroup, GPUShader *shader)
382 {
383         shgroup->instance_geom = NULL;
384         shgroup->instance_vbo = NULL;
385         shgroup->instance_count = 0;
386         shgroup->uniforms = NULL;
387 #ifdef USE_GPU_SELECT
388         shgroup->inst_selectid = NULL;
389         shgroup->override_selectid = -1;
390 #endif
391 #ifndef NDEBUG
392         shgroup->attribs_count = 0;
393 #endif
394
395         /* TODO : They should be grouped inside a UBO updated once per redraw. */
396         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, DST.view_data.mat[DRW_MAT_VIEW], 16, 1);
397         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, DST.view_data.mat[DRW_MAT_VIEWINV], 16, 1);
398         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, DST.view_data.mat[DRW_MAT_PERS], 16, 1);
399         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, DST.view_data.mat[DRW_MAT_PERSINV], 16, 1);
400         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, DST.view_data.mat[DRW_MAT_WIN], 16, 1);
401         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, DST.view_data.mat[DRW_MAT_WININV], 16, 1);
402         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
403         drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CLIPPLANES, DST.view_data.clip_planes_eq, 4, DST.num_clip_planes); /* TO REMOVE */
404
405         shgroup->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
406         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
407         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
408         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
409         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
410         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
411         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
412         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
413         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
414
415         shgroup->matflag = 0;
416         if (shgroup->modelinverse > -1)
417                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
418         if (shgroup->modelview > -1)
419                 shgroup->matflag |= DRW_CALL_MODELVIEW;
420         if (shgroup->modelviewinverse > -1)
421                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
422         if (shgroup->modelviewprojection > -1)
423                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
424         if (shgroup->normalview > -1)
425                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
426         if (shgroup->normalworld > -1)
427                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
428         if (shgroup->orcotexfac > -1)
429                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
430         if (shgroup->eye > -1)
431                 shgroup->matflag |= DRW_CALL_EYEVEC;
432 }
433
434 static void drw_interface_instance_init(
435         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format)
436 {
437         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
438         BLI_assert(batch != NULL);
439
440         drw_interface_init(shgroup, shader);
441
442         shgroup->instance_geom = batch;
443 #ifndef NDEBUG
444         shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
445 #endif
446
447         if (format != NULL) {
448                 DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
449                                               &shgroup->instance_geom, &shgroup->instance_vbo);
450         }
451 }
452
453 static void drw_interface_batching_init(
454         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format)
455 {
456         drw_interface_init(shgroup, shader);
457
458 #ifndef NDEBUG
459         shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
460 #endif
461         BLI_assert(format != NULL);
462
463         Gwn_PrimType type;
464         switch (shgroup->type) {
465                 case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break;
466                 case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break;
467                 case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break;
468                 default: type = GWN_PRIM_NONE; BLI_assert(0); break;
469         }
470
471         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
472                                     &shgroup->batch_geom, &shgroup->batch_vbo);
473 }
474
475 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
476 {
477         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
478
479         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
480
481         shgroup->type = DRW_SHG_NORMAL;
482         shgroup->shader = shader;
483         shgroup->state_extra = 0;
484         shgroup->state_extra_disable = ~0x0;
485         shgroup->stencil_mask = 0;
486         shgroup->calls.first = NULL;
487         shgroup->calls.last = NULL;
488 #if 0 /* All the same in the union! */
489         shgroup->batch_geom = NULL;
490         shgroup->batch_vbo = NULL;
491
492         shgroup->instance_geom = NULL;
493         shgroup->instance_vbo = NULL;
494 #endif
495
496 #ifdef USE_GPU_SELECT
497         shgroup->pass_parent = pass;
498 #endif
499
500         return shgroup;
501 }
502
503 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
504 {
505         if (!gpupass) {
506                 /* Shader compilation error */
507                 return NULL;
508         }
509
510         DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
511         return grp;
512 }
513
514 static DRWShadingGroup *drw_shgroup_material_inputs(
515          DRWShadingGroup *grp, struct GPUMaterial *material, GPUPass *gpupass)
516 {
517         /* TODO : Ideally we should not convert. But since the whole codegen
518          * is relying on GPUPass we keep it as is for now. */
519
520         /* Converting dynamic GPUInput to DRWUniform */
521         ListBase *inputs = &gpupass->inputs;
522
523         for (GPUInput *input = inputs->first; input; input = input->next) {
524                 /* Textures */
525                 if (input->ima) {
526                         double time = 0.0; /* TODO make time variable */
527                         GPUTexture *tex = GPU_texture_from_blender(
528                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
529
530                         if (input->bindtex) {
531                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
532                         }
533                 }
534                 /* Color Ramps */
535                 else if (input->tex) {
536                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
537                 }
538                 /* Floats */
539                 else {
540                         switch (input->type) {
541                                 case GPU_FLOAT:
542                                 case GPU_VEC2:
543                                 case GPU_VEC3:
544                                 case GPU_VEC4:
545                                         /* Should already be in the material ubo. */
546                                         break;
547                                 case GPU_MAT3:
548                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
549                                         break;
550                                 case GPU_MAT4:
551                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
552                                         break;
553                                 default:
554                                         break;
555                         }
556                 }
557         }
558
559         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
560         if (ubo != NULL) {
561                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
562         }
563
564         return grp;
565 }
566
567 Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
568 {
569         Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat");
570
571         for (int i = 0; i < arraysize; ++i) {
572                 GWN_vertformat_attr_add(format, attribs[i].name,
573                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32,
574                                         attribs[i].components,
575                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT);
576         }
577         return format;
578 }
579
580 DRWShadingGroup *DRW_shgroup_material_create(
581         struct GPUMaterial *material, DRWPass *pass)
582 {
583         GPUPass *gpupass = GPU_material_get_pass(material);
584         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
585
586         if (shgroup) {
587                 drw_interface_init(shgroup, GPU_pass_shader(gpupass));
588                 drw_shgroup_material_inputs(shgroup, material, gpupass);
589         }
590
591         return shgroup;
592 }
593
594 DRWShadingGroup *DRW_shgroup_material_instance_create(
595         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format)
596 {
597         GPUPass *gpupass = GPU_material_get_pass(material);
598         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
599
600         if (shgroup) {
601                 shgroup->type = DRW_SHG_INSTANCE;
602                 shgroup->instance_geom = geom;
603                 drw_call_calc_orco(ob->data, shgroup->instance_orcofac);
604                 drw_interface_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
605                 drw_shgroup_material_inputs(shgroup, material, gpupass);
606         }
607
608         return shgroup;
609 }
610
611 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
612         struct GPUMaterial *material, DRWPass *pass, int tri_count)
613 {
614 #ifdef USE_GPU_SELECT
615         BLI_assert((G.f & G_PICKSEL) == 0);
616 #endif
617         GPUPass *gpupass = GPU_material_get_pass(material);
618         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
619
620         if (shgroup) {
621                 /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
622                 drw_interface_init(shgroup, GPU_pass_shader(gpupass));
623                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
624                 shgroup->instance_count = tri_count * 3;
625                 drw_shgroup_material_inputs(shgroup, material, gpupass);
626         }
627
628         return shgroup;
629 }
630
631 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
632 {
633         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
634         drw_interface_init(shgroup, shader);
635         return shgroup;
636 }
637
638 DRWShadingGroup *DRW_shgroup_instance_create(
639         struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format)
640 {
641         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
642         shgroup->type = DRW_SHG_INSTANCE;
643         shgroup->instance_geom = geom;
644         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
645         drw_interface_instance_init(shgroup, shader, geom, format);
646
647         return shgroup;
648 }
649
650 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
651 {
652         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
653
654         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
655         shgroup->type = DRW_SHG_POINT_BATCH;
656
657         drw_interface_batching_init(shgroup, shader, g_pos_format);
658
659         return shgroup;
660 }
661
662 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
663 {
664         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
665
666         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
667         shgroup->type = DRW_SHG_LINE_BATCH;
668
669         drw_interface_batching_init(shgroup, shader, g_pos_format);
670
671         return shgroup;
672 }
673
674 /* Very special batch. Use this if you position
675  * your vertices with the vertex shader
676  * and dont need any VBO attrib */
677 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
678 {
679 #ifdef USE_GPU_SELECT
680         BLI_assert((G.f & G_PICKSEL) == 0);
681 #endif
682         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
683
684         /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
685         drw_interface_init(shgroup, shader);
686
687         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
688         shgroup->instance_count = tri_count * 3;
689
690         return shgroup;
691 }
692
693 /* Specify an external batch instead of adding each attrib one by one. */
694 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch)
695 {
696         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
697         BLI_assert(shgroup->instance_count == 0);
698         /* You cannot use external instancing batch without a dummy format. */
699         BLI_assert(shgroup->attribs_count != 0);
700
701         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
702         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
703         /* PERF : This destroys the vaos cache so better check if it's necessary. */
704         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
705          * at the same adress. Bindings/VAOs would remain obsolete. */
706         //if (shgroup->instancing_geom->inst != batch->verts[0])
707         GWN_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
708
709 #ifdef USE_GPU_SELECT
710         shgroup->override_selectid = DST.select_id;
711 #endif
712 }
713
714 /* Used for instancing with no attributes */
715 void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, unsigned int count)
716 {
717         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
718         BLI_assert(shgroup->instance_count == 0);
719         BLI_assert(shgroup->attribs_count == 0);
720
721 #ifdef USE_GPU_SELECT
722         if (G.f & G_PICKSEL) {
723                 shgroup->override_selectid = DST.select_id;
724         }
725 #endif
726
727         shgroup->instance_count = count;
728 }
729
730 unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
731 {
732         return shgroup->instance_count;
733 }
734
735 /**
736  * State is added to #Pass.state while drawing.
737  * Use to temporarily enable draw options.
738  */
739 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
740 {
741         shgroup->state_extra |= state;
742 }
743
744 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
745 {
746         shgroup->state_extra_disable &= ~state;
747 }
748
749 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
750 {
751         BLI_assert(mask <= 255);
752         shgroup->stencil_mask = mask;
753 }
754
755 /** \} */
756
757 /* -------------------------------------------------------------------- */
758
759 /** \name Passes (DRW_pass)
760  * \{ */
761
762 DRWPass *DRW_pass_create(const char *name, DRWState state)
763 {
764         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
765         pass->state = state;
766         if (G.debug_value > 20) {
767                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
768         }
769
770         pass->shgroups.first = NULL;
771         pass->shgroups.last = NULL;
772
773         return pass;
774 }
775
776 void DRW_pass_state_set(DRWPass *pass, DRWState state)
777 {
778         pass->state = state;
779 }
780
781 void DRW_pass_free(DRWPass *pass)
782 {
783         pass->shgroups.first = NULL;
784         pass->shgroups.last = NULL;
785 }
786
787 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
788 {
789         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
790                 callback(userData, shgroup);
791         }
792 }
793
794 typedef struct ZSortData {
795         float *axis;
796         float *origin;
797 } ZSortData;
798
799 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
800 {
801         const ZSortData *zsortdata = (ZSortData *)thunk;
802         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
803         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
804
805         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
806         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
807
808         if (call_a == NULL) return -1;
809         if (call_b == NULL) return -1;
810
811         float tmp[3];
812         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state.model[3]);
813         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
814         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state.model[3]);
815         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
816
817         if      (a_sq < b_sq) return  1;
818         else if (a_sq > b_sq) return -1;
819         else {
820                 /* If there is a depth prepass put it before */
821                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
822                         return -1;
823                 }
824                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
825                         return  1;
826                 }
827                 else return  0;
828         }
829 }
830
831 /* ------------------ Shading group sorting --------------------- */
832
833 #define SORT_IMPL_LINKTYPE DRWShadingGroup
834
835 #define SORT_IMPL_USE_THUNK
836 #define SORT_IMPL_FUNC shgroup_sort_fn_r
837 #include "../../blenlib/intern/list_sort_impl.h"
838 #undef SORT_IMPL_FUNC
839 #undef SORT_IMPL_USE_THUNK
840
841 #undef SORT_IMPL_LINKTYPE
842
843 /**
844  * Sort Shading groups by decreasing Z of their first draw call.
845  * This is usefull for order dependant effect such as transparency.
846  **/
847 void DRW_pass_sort_shgroup_z(DRWPass *pass)
848 {
849         float (*viewinv)[4];
850         viewinv = DST.view_data.mat[DRW_MAT_VIEWINV];
851
852         ZSortData zsortdata = {viewinv[2], viewinv[3]};
853
854         if (pass->shgroups.first && pass->shgroups.first->next) {
855                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
856
857                 /* Find the next last */
858                 DRWShadingGroup *last = pass->shgroups.first;
859                 while ((last = last->next)) {
860                         /* Do nothing */
861                 }
862                 pass->shgroups.last = last;
863         }
864 }
865
866 /** \} */