DRW: Cleanup: Make DRW_shgroup_uniform_mat4 uniform expect float (*)[4]
[blender.git] / source / blender / draw / intern / draw_manager_data.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_data.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BKE_curve.h"
29 #include "BKE_global.h"
30 #include "BKE_mesh.h"
31 #include "BKE_object.h"
32 #include "BKE_paint.h"
33 #include "BKE_pbvh.h"
34
35 #include "DNA_curve_types.h"
36 #include "DNA_mesh_types.h"
37 #include "DNA_meta_types.h"
38
39 #include "BLI_link_utils.h"
40 #include "BLI_mempool.h"
41
42 #include "intern/gpu_codegen.h"
43
44 struct Gwn_VertFormat *g_pos_format = NULL;
45
46 extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
47
48 /* -------------------------------------------------------------------- */
49
50 /** \name Uniform Buffer Object (DRW_uniformbuffer)
51  * \{ */
52
53 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
54 {
55         return GPU_uniformbuffer_create(size, data, NULL);
56 }
57
58 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
59 {
60         GPU_uniformbuffer_update(ubo, data);
61 }
62
63 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
64 {
65         GPU_uniformbuffer_free(ubo);
66 }
67
68 /** \} */
69
70 /* -------------------------------------------------------------------- */
71
72 /** \name Uniforms (DRW_shgroup_uniform)
73  * \{ */
74
75 static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
76                                             DRWUniformType type, const void *value, int length, int arraysize)
77 {
78         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
79         uni->location = loc;
80         uni->type = type;
81         uni->value = value;
82         uni->length = length;
83         uni->arraysize = arraysize;
84
85         BLI_LINKS_PREPEND(shgroup->uniforms, uni);
86 }
87
88 static void drw_shgroup_builtin_uniform(
89         DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
90 {
91         int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
92
93         if (loc != -1) {
94                 drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
95         }
96 }
97
98 static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
99                                   DRWUniformType type, const void *value, int length, int arraysize)
100 {
101         int location;
102         if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
103                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
104         }
105         else {
106                 location = GPU_shader_get_uniform(shgroup->shader, name);
107         }
108
109         if (location == -1) {
110                 if (G.debug & G_DEBUG)
111                         fprintf(stderr, "Uniform '%s' not found!\n", name);
112                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
113                 // BLI_assert(0);
114                 return;
115         }
116
117         BLI_assert(arraysize > 0 && arraysize <= 16);
118         BLI_assert(length >= 0 && length <= 16);
119
120         drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
121 }
122
123 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
124 {
125         BLI_assert(tex != NULL);
126         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
127 }
128
129 /* Same as DRW_shgroup_uniform_texture but is garanteed to be bound if shader does not change between shgrp. */
130 void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
131 {
132         BLI_assert(tex != NULL);
133         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
134 }
135
136 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
137 {
138         BLI_assert(ubo != NULL);
139         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
140 }
141
142 /* Same as DRW_shgroup_uniform_block but is garanteed to be bound if shader does not change between shgrp. */
143 void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
144 {
145         BLI_assert(ubo != NULL);
146         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
147 }
148
149 void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
150 {
151         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
152 }
153
154 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
155 {
156         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
157 }
158
159 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
160 {
161         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
162 }
163
164 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
165 {
166         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
167 }
168
169 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
170 {
171         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
172 }
173
174 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
175 {
176         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
177 }
178
179 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
180 {
181         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
182 }
183
184 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
185 {
186         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
187 }
188
189 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
190 {
191         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
192 }
193
194 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
195 {
196         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
197 }
198
199 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
200 {
201         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
202 }
203
204 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
205 {
206         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
207 }
208
209 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
210 {
211         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
212 }
213
214 /* Stores the int instead of a pointer. */
215 void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
216 {
217         drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, SET_INT_IN_POINTER(value), 1, 1);
218 }
219
220 /** \} */
221
222 /* -------------------------------------------------------------------- */
223
224 /** \name Draw Call (DRW_calls)
225  * \{ */
226
227 static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
228 {
229         ID *ob_data = (ob) ? ob->data : NULL;
230         float *texcoloc = NULL;
231         float *texcosize = NULL;
232         if (ob_data != NULL) {
233                 switch (GS(ob_data->name)) {
234                         case ID_ME:
235                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
236                                 break;
237                         case ID_CU:
238                         {
239                                 Curve *cu = (Curve *)ob_data;
240                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
241                                         BKE_curve_texspace_calc(cu);
242                                 }
243                                 texcoloc = cu->loc;
244                                 texcosize = cu->size;
245                                 break;
246                         }
247                         case ID_MB:
248                         {
249                                 MetaBall *mb = (MetaBall *)ob_data;
250                                 texcoloc = mb->loc;
251                                 texcosize = mb->size;
252                                 break;
253                         }
254                         default:
255                                 break;
256                 }
257         }
258
259         if ((texcoloc != NULL) && (texcosize != NULL)) {
260                 mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
261                 invert_v3(r_orcofacs[1]);
262                 sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
263                 negate_v3(r_orcofacs[0]);
264                 mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
265         }
266         else {
267                 copy_v3_fl(r_orcofacs[0], 0.0f);
268                 copy_v3_fl(r_orcofacs[1], 1.0f);
269         }
270 }
271
272 static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
273 {
274         DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
275         state->flag = 0;
276         state->cache_id = 0;
277         state->matflag = shgroup->matflag;
278
279         /* Matrices */
280         if (obmat != NULL) {
281                 copy_m4_m4(state->model, obmat);
282
283                 if (is_negative_m4(state->model)) {
284                         state->flag |= DRW_CALL_NEGSCALE;
285                 }
286         }
287         else {
288                 unit_m4(state->model);
289         }
290
291         if (ob != NULL) {
292                 float corner[3];
293                 BoundBox *bbox = BKE_object_boundbox_get(ob);
294                 /* Get BoundSphere center and radius from the BoundBox. */
295                 mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
296                 mul_v3_m4v3(corner, obmat, bbox->vec[0]);
297                 mul_m4_v3(obmat, state->bsphere.center);
298                 state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
299         }
300         else {
301                 /* Bypass test. */
302                 state->bsphere.radius = -1.0f;
303         }
304
305         /* Orco factors: We compute this at creation to not have to save the *ob_data */
306         if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
307                 drw_call_calc_orco(ob, state->orcotexfac);
308                 state->matflag &= ~DRW_CALL_ORCOTEXFAC;
309         }
310
311         return state;
312 }
313
314 static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
315 {
316         if (DST.ob_state == NULL) {
317                 DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
318         }
319         else {
320                 /* If the DRWCallState is reused, add necessary matrices. */
321                 DST.ob_state->matflag |= shgroup->matflag;
322         }
323
324         return DST.ob_state;
325 }
326
327 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
328 {
329         BLI_assert(geom != NULL);
330         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
331
332         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
333         call->state = drw_call_state_create(shgroup, obmat, NULL);
334         call->type = DRW_CALL_SINGLE;
335         call->single.geometry = geom;
336 #ifdef USE_GPU_SELECT
337         call->select_id = DST.select_id;
338 #endif
339
340         BLI_LINKS_APPEND(&shgroup->calls, call);
341 }
342
343 /* These calls can be culled and are optimized for redraw */
344 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
345 {
346         BLI_assert(geom != NULL);
347         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
348
349         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
350         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
351         call->type = DRW_CALL_SINGLE;
352         call->single.geometry = geom;
353 #ifdef USE_GPU_SELECT
354         call->select_id = DST.select_id;
355 #endif
356
357         BLI_LINKS_APPEND(&shgroup->calls, call);
358 }
359
360 void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4], unsigned int *count)
361 {
362         BLI_assert(geom != NULL);
363         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
364
365         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
366         call->state = drw_call_state_create(shgroup, obmat, NULL);
367         call->type = DRW_CALL_INSTANCES;
368         call->instances.geometry = geom;
369         call->instances.count = count;
370 #ifdef USE_GPU_SELECT
371         call->select_id = DST.select_id;
372 #endif
373
374         BLI_LINKS_APPEND(&shgroup->calls, call);
375 }
376
377 /* These calls can be culled and are optimized for redraw */
378 void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob, unsigned int *count)
379 {
380         BLI_assert(geom != NULL);
381         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
382
383         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
384         call->state = drw_call_state_object(shgroup, ob->obmat, ob);
385         call->type = DRW_CALL_INSTANCES;
386         call->instances.geometry = geom;
387         call->instances.count = count;
388 #ifdef USE_GPU_SELECT
389         call->select_id = DST.select_id;
390 #endif
391
392         BLI_LINKS_APPEND(&shgroup->calls, call);
393 }
394
395 void DRW_shgroup_call_generate_add(
396         DRWShadingGroup *shgroup,
397         DRWCallGenerateFn *geometry_fn, void *user_data,
398         float (*obmat)[4])
399 {
400         BLI_assert(geometry_fn != NULL);
401         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
402
403         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
404         call->state = drw_call_state_create(shgroup, obmat, NULL);
405         call->type = DRW_CALL_GENERATE;
406         call->generate.geometry_fn = geometry_fn;
407         call->generate.user_data = user_data;
408 #ifdef USE_GPU_SELECT
409         call->select_id = DST.select_id;
410 #endif
411
412         BLI_LINKS_APPEND(&shgroup->calls, call);
413 }
414
415 static void sculpt_draw_cb(
416         DRWShadingGroup *shgroup,
417         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
418         void *user_data)
419 {
420         Object *ob = user_data;
421         PBVH *pbvh = ob->sculpt->pbvh;
422
423         if (pbvh) {
424                 BKE_pbvh_draw_cb(
425                         pbvh, NULL, NULL, false,
426                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
427         }
428 }
429
430 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
431 {
432         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
433 }
434
435 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
436 {
437 #ifdef USE_GPU_SELECT
438         if (G.f & G_PICKSEL) {
439                 if (shgroup->inst_selectid == NULL) {
440                         shgroup->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
441                 }
442
443                 int *select_id = DRW_instance_data_next(shgroup->inst_selectid);
444                 *select_id = DST.select_id;
445         }
446 #endif
447
448         BLI_assert(attr_len == shgroup->attribs_count);
449         UNUSED_VARS_NDEBUG(attr_len);
450
451         for (int i = 0; i < attr_len; ++i) {
452                 if (shgroup->instance_count == shgroup->instance_vbo->vertex_ct) {
453                         GWN_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
454                 }
455                 GWN_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
456         }
457
458         shgroup->instance_count += 1;
459 }
460
461 /** \} */
462
463 /* -------------------------------------------------------------------- */
464
465 /** \name Shading Groups (DRW_shgroup)
466  * \{ */
467
468 static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
469 {
470         shgroup->instance_geom = NULL;
471         shgroup->instance_vbo = NULL;
472         shgroup->instance_count = 0;
473         shgroup->uniforms = NULL;
474 #ifdef USE_GPU_SELECT
475         shgroup->inst_selectid = NULL;
476         shgroup->override_selectid = -1;
477 #endif
478 #ifndef NDEBUG
479         shgroup->attribs_count = 0;
480 #endif
481
482         int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
483
484         if (view_ubo_location != -1) {
485                 drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
486         }
487         else {
488                 /* Only here to support builtin shaders. This should not be used by engines. */
489                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
490                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
491                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
492                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
493                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
494                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
495                 drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
496         }
497
498         shgroup->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
499         shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
500         shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
501         shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
502         shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
503         shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
504         shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
505         shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
506         shgroup->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
507         shgroup->callid = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CALLID);
508
509         shgroup->matflag = 0;
510         if (shgroup->modelinverse > -1)
511                 shgroup->matflag |= DRW_CALL_MODELINVERSE;
512         if (shgroup->modelview > -1)
513                 shgroup->matflag |= DRW_CALL_MODELVIEW;
514         if (shgroup->modelviewinverse > -1)
515                 shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
516         if (shgroup->modelviewprojection > -1)
517                 shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
518         if (shgroup->normalview > -1)
519                 shgroup->matflag |= DRW_CALL_NORMALVIEW;
520         if (shgroup->normalworld > -1)
521                 shgroup->matflag |= DRW_CALL_NORMALWORLD;
522         if (shgroup->orcotexfac > -1)
523                 shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
524         if (shgroup->eye > -1)
525                 shgroup->matflag |= DRW_CALL_EYEVEC;
526 }
527
528 static void drw_shgroup_instance_init(
529         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format)
530 {
531         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
532         BLI_assert(batch != NULL);
533         BLI_assert(format != NULL);
534
535         drw_shgroup_init(shgroup, shader);
536
537         shgroup->instance_geom = batch;
538 #ifndef NDEBUG
539         shgroup->attribs_count = format->attrib_ct;
540 #endif
541
542         DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
543                                       &shgroup->instance_geom, &shgroup->instance_vbo);
544 }
545
546 static void drw_shgroup_batching_init(
547         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format)
548 {
549         drw_shgroup_init(shgroup, shader);
550
551 #ifndef NDEBUG
552         shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
553 #endif
554         BLI_assert(format != NULL);
555
556         Gwn_PrimType type;
557         switch (shgroup->type) {
558                 case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break;
559                 case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break;
560                 case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break;
561                 default: type = GWN_PRIM_NONE; BLI_assert(0); break;
562         }
563
564         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
565                                     &shgroup->batch_geom, &shgroup->batch_vbo);
566 }
567
568 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
569 {
570         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
571
572         BLI_LINKS_APPEND(&pass->shgroups, shgroup);
573
574         shgroup->type = DRW_SHG_NORMAL;
575         shgroup->shader = shader;
576         shgroup->state_extra = 0;
577         shgroup->state_extra_disable = ~0x0;
578         shgroup->stencil_mask = 0;
579         shgroup->calls.first = NULL;
580         shgroup->calls.last = NULL;
581 #if 0 /* All the same in the union! */
582         shgroup->batch_geom = NULL;
583         shgroup->batch_vbo = NULL;
584
585         shgroup->instance_geom = NULL;
586         shgroup->instance_vbo = NULL;
587 #endif
588
589 #ifdef USE_GPU_SELECT
590         shgroup->pass_parent = pass;
591 #endif
592
593         return shgroup;
594 }
595
596 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
597 {
598         if (!gpupass) {
599                 /* Shader compilation error */
600                 return NULL;
601         }
602
603         DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
604         return grp;
605 }
606
607 static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
608 {
609         /* TODO : Ideally we should not convert. But since the whole codegen
610          * is relying on GPUPass we keep it as is for now. */
611
612         ListBase *inputs = GPU_material_get_inputs(material);
613
614         /* Converting dynamic GPUInput to DRWUniform */
615         for (GPUInput *input = inputs->first; input; input = input->next) {
616                 /* Textures */
617                 if (input->ima) {
618                         double time = 0.0; /* TODO make time variable */
619                         GPUTexture *tex = GPU_texture_from_blender(
620                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
621
622                         if (input->bindtex) {
623                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
624                         }
625                 }
626                 /* Color Ramps */
627                 else if (input->tex) {
628                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
629                 }
630                 /* Floats */
631                 else {
632                         switch (input->type) {
633                                 case GPU_FLOAT:
634                                 case GPU_VEC2:
635                                 case GPU_VEC3:
636                                 case GPU_VEC4:
637                                         /* Should already be in the material ubo. */
638                                         break;
639                                 case GPU_MAT3:
640                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float (*)[3])input->dynamicvec);
641                                         break;
642                                 case GPU_MAT4:
643                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float (*)[4])input->dynamicvec);
644                                         break;
645                                 default:
646                                         break;
647                         }
648                 }
649         }
650
651         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
652         if (ubo != NULL) {
653                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
654         }
655
656         return grp;
657 }
658
659 Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
660 {
661         Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat");
662
663         for (int i = 0; i < arraysize; ++i) {
664                 GWN_vertformat_attr_add(format, attribs[i].name,
665                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32,
666                                         attribs[i].components,
667                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT);
668         }
669         return format;
670 }
671
672 DRWShadingGroup *DRW_shgroup_material_create(
673         struct GPUMaterial *material, DRWPass *pass)
674 {
675         GPUPass *gpupass = GPU_material_get_pass(material);
676         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
677
678         if (shgroup) {
679                 drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
680                 drw_shgroup_material_inputs(shgroup, material);
681         }
682
683         return shgroup;
684 }
685
686 DRWShadingGroup *DRW_shgroup_material_instance_create(
687         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format)
688 {
689         GPUPass *gpupass = GPU_material_get_pass(material);
690         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
691
692         if (shgroup) {
693                 shgroup->type = DRW_SHG_INSTANCE;
694                 shgroup->instance_geom = geom;
695                 drw_call_calc_orco(ob, shgroup->instance_orcofac);
696                 drw_shgroup_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
697                 drw_shgroup_material_inputs(shgroup, material);
698         }
699
700         return shgroup;
701 }
702
703 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
704         struct GPUMaterial *material, DRWPass *pass, int tri_count)
705 {
706 #ifdef USE_GPU_SELECT
707         BLI_assert((G.f & G_PICKSEL) == 0);
708 #endif
709         GPUPass *gpupass = GPU_material_get_pass(material);
710         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
711
712         if (shgroup) {
713                 /* Calling drw_shgroup_init will cause it to call GWN_draw_primitive(). */
714                 drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
715                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
716                 shgroup->instance_count = tri_count * 3;
717                 drw_shgroup_material_inputs(shgroup, material);
718         }
719
720         return shgroup;
721 }
722
723 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
724 {
725         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
726         drw_shgroup_init(shgroup, shader);
727         return shgroup;
728 }
729
730 DRWShadingGroup *DRW_shgroup_instance_create(
731         struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format)
732 {
733         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
734         shgroup->type = DRW_SHG_INSTANCE;
735         shgroup->instance_geom = geom;
736         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
737         drw_shgroup_instance_init(shgroup, shader, geom, format);
738
739         return shgroup;
740 }
741
742 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
743 {
744         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
745
746         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
747         shgroup->type = DRW_SHG_POINT_BATCH;
748
749         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
750
751         return shgroup;
752 }
753
754 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
755 {
756         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
757
758         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
759         shgroup->type = DRW_SHG_LINE_BATCH;
760
761         drw_shgroup_batching_init(shgroup, shader, g_pos_format);
762
763         return shgroup;
764 }
765
766 /* Very special batch. Use this if you position
767  * your vertices with the vertex shader
768  * and dont need any VBO attrib */
769 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
770 {
771 #ifdef USE_GPU_SELECT
772         BLI_assert((G.f & G_PICKSEL) == 0);
773 #endif
774         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
775
776         /* Calling drw_shgroup_init will cause it to call GWN_draw_primitive(). */
777         drw_shgroup_init(shgroup, shader);
778
779         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
780         shgroup->instance_count = tri_count * 3;
781
782         return shgroup;
783 }
784
785 /* Specify an external batch instead of adding each attrib one by one. */
786 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch)
787 {
788         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
789         BLI_assert(shgroup->instance_count == 0);
790         /* You cannot use external instancing batch without a dummy format. */
791         BLI_assert(shgroup->attribs_count != 0);
792
793         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
794         drw_call_calc_orco(NULL, shgroup->instance_orcofac);
795         /* PERF : This destroys the vaos cache so better check if it's necessary. */
796         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
797          * at the same adress. Bindings/VAOs would remain obsolete. */
798         //if (shgroup->instancing_geom->inst != batch->verts[0])
799         GWN_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
800
801 #ifdef USE_GPU_SELECT
802         shgroup->override_selectid = DST.select_id;
803 #endif
804 }
805
806 unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
807 {
808         return shgroup->instance_count;
809 }
810
811 /**
812  * State is added to #Pass.state while drawing.
813  * Use to temporarily enable draw options.
814  */
815 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
816 {
817         shgroup->state_extra |= state;
818 }
819
820 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
821 {
822         shgroup->state_extra_disable &= ~state;
823 }
824
825 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
826 {
827         BLI_assert(mask <= 255);
828         shgroup->stencil_mask = mask;
829 }
830
831 /** \} */
832
833 /* -------------------------------------------------------------------- */
834
835 /** \name Passes (DRW_pass)
836  * \{ */
837
838 DRWPass *DRW_pass_create(const char *name, DRWState state)
839 {
840         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
841         pass->state = state;
842         if (G.debug_value > 20) {
843                 BLI_strncpy(pass->name, name, MAX_PASS_NAME);
844         }
845
846         pass->shgroups.first = NULL;
847         pass->shgroups.last = NULL;
848
849         return pass;
850 }
851
852 void DRW_pass_state_set(DRWPass *pass, DRWState state)
853 {
854         pass->state = state;
855 }
856
857 void DRW_pass_free(DRWPass *pass)
858 {
859         pass->shgroups.first = NULL;
860         pass->shgroups.last = NULL;
861 }
862
863 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
864 {
865         for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
866                 callback(userData, shgroup);
867         }
868 }
869
870 typedef struct ZSortData {
871         float *axis;
872         float *origin;
873 } ZSortData;
874
875 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
876 {
877         const ZSortData *zsortdata = (ZSortData *)thunk;
878         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
879         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
880
881         const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
882         const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
883
884         if (call_a == NULL) return -1;
885         if (call_b == NULL) return -1;
886
887         float tmp[3];
888         sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
889         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
890         sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
891         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
892
893         if      (a_sq < b_sq) return  1;
894         else if (a_sq > b_sq) return -1;
895         else {
896                 /* If there is a depth prepass put it before */
897                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
898                         return -1;
899                 }
900                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
901                         return  1;
902                 }
903                 else return  0;
904         }
905 }
906
907 /* ------------------ Shading group sorting --------------------- */
908
909 #define SORT_IMPL_LINKTYPE DRWShadingGroup
910
911 #define SORT_IMPL_USE_THUNK
912 #define SORT_IMPL_FUNC shgroup_sort_fn_r
913 #include "../../blenlib/intern/list_sort_impl.h"
914 #undef SORT_IMPL_FUNC
915 #undef SORT_IMPL_USE_THUNK
916
917 #undef SORT_IMPL_LINKTYPE
918
919 /**
920  * Sort Shading groups by decreasing Z of their first draw call.
921  * This is usefull for order dependant effect such as transparency.
922  **/
923 void DRW_pass_sort_shgroup_z(DRWPass *pass)
924 {
925         float (*viewinv)[4];
926         viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
927
928         ZSortData zsortdata = {viewinv[2], viewinv[3]};
929
930         if (pass->shgroups.first && pass->shgroups.first->next) {
931                 pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
932
933                 /* Find the next last */
934                 DRWShadingGroup *last = pass->shgroups.first;
935                 while ((last = last->next)) {
936                         /* Do nothing */
937                 }
938                 pass->shgroups.last = last;
939         }
940 }
941
942 /** \} */