Cleanup: use shorter name for shader config
[blender.git] / source / blender / gpu / intern / gpu_batch.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2016 by Mike Erwin.
17  * All rights reserved.
18  */
19
20 /** \file \ingroup gpu
21  *
22  * GPU geometry batch
23  * Contains VAOs + VBOs + Shader representing a drawable entity.
24  */
25
26 #include "MEM_guardedalloc.h"
27
28 #include "GPU_batch.h"
29 #include "GPU_batch_presets.h"
30 #include "GPU_matrix.h"
31 #include "GPU_shader.h"
32
33 #include "gpu_batch_private.h"
34 #include "gpu_context_private.h"
35 #include "gpu_primitive_private.h"
36 #include "gpu_shader_private.h"
37
38 #include <stdlib.h>
39 #include <string.h>
40
41 static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
42
43 void GPU_batch_vao_cache_clear(GPUBatch *batch)
44 {
45         if (batch->context == NULL) {
46                 return;
47         }
48         if (batch->is_dynamic_vao_count) {
49                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
50                         if (batch->dynamic_vaos.vao_ids[i]) {
51                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
52                         }
53                         if (batch->dynamic_vaos.interfaces[i]) {
54                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
55                         }
56                 }
57                 MEM_freeN(batch->dynamic_vaos.interfaces);
58                 MEM_freeN(batch->dynamic_vaos.vao_ids);
59         }
60         else {
61                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
62                         if (batch->static_vaos.vao_ids[i]) {
63                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
64                         }
65                         if (batch->static_vaos.interfaces[i]) {
66                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
67                         }
68                 }
69         }
70         batch->is_dynamic_vao_count = false;
71         for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
72                 batch->static_vaos.vao_ids[i] = 0;
73                 batch->static_vaos.interfaces[i] = NULL;
74         }
75         gpu_context_remove_batch(batch->context, batch);
76         batch->context = NULL;
77 }
78
79 GPUBatch *GPU_batch_create_ex(
80         GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
81         uint owns_flag)
82 {
83         GPUBatch *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
84         GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
85         return batch;
86 }
87
88 void GPU_batch_init_ex(
89         GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
90         uint owns_flag)
91 {
92 #if TRUST_NO_ONE
93         assert(verts != NULL);
94 #endif
95
96         batch->verts[0] = verts;
97         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
98                 batch->verts[v] = NULL;
99         }
100         batch->inst = NULL;
101         batch->elem = elem;
102         batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
103         batch->phase = GPU_BATCH_READY_TO_DRAW;
104         batch->is_dynamic_vao_count = false;
105         batch->owns_flag = owns_flag;
106         batch->free_callback = NULL;
107 }
108
109 /* This will share the VBOs with the new batch. */
110 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
111 {
112         GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
113
114         batch_dst->gl_prim_type = batch_src->gl_prim_type;
115         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
116                 batch_dst->verts[v] = batch_src->verts[v];
117         }
118 }
119
120 void GPU_batch_clear(GPUBatch *batch)
121 {
122         if (batch->free_callback) {
123                 batch->free_callback(batch, batch->callback_data);
124         }
125
126         if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
127                 GPU_indexbuf_discard(batch->elem);
128         }
129         if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
130                 GPU_vertbuf_discard(batch->inst);
131         }
132         if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
133                 for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
134                         if (batch->verts[v] == NULL) {
135                                 break;
136                         }
137                         if (batch->owns_flag & (1 << v)) {
138                                 GPU_vertbuf_discard(batch->verts[v]);
139                         }
140                 }
141         }
142         GPU_batch_vao_cache_clear(batch);
143 }
144
145 void GPU_batch_discard(GPUBatch *batch)
146 {
147         GPU_batch_clear(batch);
148         MEM_freeN(batch);
149 }
150
151 void GPU_batch_callback_free_set(GPUBatch *batch, void (*callback)(GPUBatch *, void *), void *user_data)
152 {
153         batch->free_callback = callback;
154         batch->callback_data = user_data;
155 }
156
157 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
158 {
159 #if TRUST_NO_ONE
160         assert(inst != NULL);
161 #endif
162         /* redo the bindings */
163         GPU_batch_vao_cache_clear(batch);
164
165         if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
166                 GPU_vertbuf_discard(batch->inst);
167         }
168         batch->inst = inst;
169
170         if (own_vbo) {
171                 batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
172         }
173         else {
174                 batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
175         }
176 }
177
178 /* Returns the index of verts in the batch. */
179 int GPU_batch_vertbuf_add_ex(
180         GPUBatch *batch, GPUVertBuf *verts,
181         bool own_vbo)
182 {
183         /* redo the bindings */
184         GPU_batch_vao_cache_clear(batch);
185
186         for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
187                 if (batch->verts[v] == NULL) {
188 #if TRUST_NO_ONE
189                         /* for now all VertexBuffers must have same vertex_len */
190                         assert(verts->vertex_len == batch->verts[0]->vertex_len);
191 #endif
192                         batch->verts[v] = verts;
193                         /* TODO: mark dirty so we can keep attribute bindings up-to-date */
194                         if (own_vbo)
195                                 batch->owns_flag |= (1 << v);
196                         return v;
197                 }
198         }
199
200         /* we only make it this far if there is no room for another GPUVertBuf */
201 #if TRUST_NO_ONE
202         assert(false);
203 #endif
204         return -1;
205 }
206
207 static GLuint batch_vao_get(GPUBatch *batch)
208 {
209         /* Search through cache */
210         if (batch->is_dynamic_vao_count) {
211                 for (int i = 0; i < batch->dynamic_vaos.count; ++i)
212                         if (batch->dynamic_vaos.interfaces[i] == batch->interface)
213                                 return batch->dynamic_vaos.vao_ids[i];
214         }
215         else {
216                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
217                         if (batch->static_vaos.interfaces[i] == batch->interface)
218                                 return batch->static_vaos.vao_ids[i];
219         }
220
221         /* Set context of this batch.
222          * It will be bound to it until GPU_batch_vao_cache_clear is called.
223          * Until then it can only be drawn with this context. */
224         if (batch->context == NULL) {
225                 batch->context = GPU_context_active_get();
226                 gpu_context_add_batch(batch->context, batch);
227         }
228 #if TRUST_NO_ONE
229         else {
230                 /* Make sure you are not trying to draw this batch in another context. */
231                 assert(batch->context == GPU_context_active_get());
232         }
233 #endif
234
235         /* Cache miss, time to add a new entry! */
236         GLuint new_vao = 0;
237         if (!batch->is_dynamic_vao_count) {
238                 int i; /* find first unused slot */
239                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
240                         if (batch->static_vaos.vao_ids[i] == 0)
241                                 break;
242
243                 if (i < GPU_BATCH_VAO_STATIC_LEN) {
244                         batch->static_vaos.interfaces[i] = batch->interface;
245                         batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
246                 }
247                 else {
248                         /* Not enough place switch to dynamic. */
249                         batch->is_dynamic_vao_count = true;
250                         /* Erase previous entries, they will be added back if drawn again. */
251                         for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
252                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
253                                 GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
254                         }
255                         /* Init dynamic arrays and let the branch below set the values. */
256                         batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
257                         batch->dynamic_vaos.interfaces = MEM_callocN(batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
258                         batch->dynamic_vaos.vao_ids = MEM_callocN(batch->dynamic_vaos.count * sizeof(GLuint), "dyn vaos ids");
259                 }
260         }
261
262         if (batch->is_dynamic_vao_count) {
263                 int i; /* find first unused slot */
264                 for (i = 0; i < batch->dynamic_vaos.count; ++i)
265                         if (batch->dynamic_vaos.vao_ids[i] == 0)
266                                 break;
267
268                 if (i == batch->dynamic_vaos.count) {
269                         /* Not enough place, realloc the array. */
270                         i = batch->dynamic_vaos.count;
271                         batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
272                         batch->dynamic_vaos.interfaces = MEM_recallocN(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface *) * batch->dynamic_vaos.count);
273                         batch->dynamic_vaos.vao_ids = MEM_recallocN(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
274                 }
275                 batch->dynamic_vaos.interfaces[i] = batch->interface;
276                 batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
277         }
278
279         GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
280
281 #if TRUST_NO_ONE
282         assert(new_vao != 0);
283 #endif
284
285         /* We just got a fresh VAO we need to initialize it. */
286         glBindVertexArray(new_vao);
287         batch_update_program_bindings(batch, 0);
288         glBindVertexArray(0);
289
290         return new_vao;
291 }
292
293 void GPU_batch_program_set_no_use(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
294 {
295 #if TRUST_NO_ONE
296         assert(glIsProgram(shaderface->program));
297         assert(batch->program_in_use == 0);
298 #endif
299         batch->interface = shaderface;
300         batch->program = program;
301         batch->vao_id = batch_vao_get(batch);
302 }
303
304 void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
305 {
306         GPU_batch_program_set_no_use(batch, program, shaderface);
307         GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
308 }
309
310 void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
311 {
312         if (batch->is_dynamic_vao_count) {
313                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
314                         if (batch->dynamic_vaos.interfaces[i] == interface) {
315                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
316                                 batch->dynamic_vaos.vao_ids[i] = 0;
317                                 batch->dynamic_vaos.interfaces[i] = NULL;
318                                 break; /* cannot have duplicates */
319                         }
320                 }
321         }
322         else {
323                 int i;
324                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
325                         if (batch->static_vaos.interfaces[i] == interface) {
326                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
327                                 batch->static_vaos.vao_ids[i] = 0;
328                                 batch->static_vaos.interfaces[i] = NULL;
329                                 break; /* cannot have duplicates */
330                         }
331                 }
332         }
333 }
334
335 static void create_bindings(
336         GPUVertBuf *verts, const GPUShaderInterface *interface,
337         uint v_first, const bool use_instancing)
338 {
339         const GPUVertFormat *format = &verts->format;
340
341         const uint attr_len = format->attr_len;
342         const uint stride = format->stride;
343
344         GPU_vertbuf_use(verts);
345
346         for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
347                 const GPUVertAttr *a = &format->attrs[a_idx];
348                 const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
349
350                 for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
351                         const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
352
353                         if (input == NULL) continue;
354
355                         if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
356 #if TRUST_NO_ONE
357                                 assert(a->fetch_mode == GPU_FETCH_FLOAT);
358                                 assert(a->gl_comp_type == GL_FLOAT);
359 #endif
360                                 for (int i = 0; i < a->comp_len / 4; ++i) {
361                                         glEnableVertexAttribArray(input->location + i);
362                                         glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
363                                         glVertexAttribPointer(input->location + i, 4, a->gl_comp_type, GL_FALSE, stride,
364                                                               (const GLubyte *)pointer + i * 16);
365                                 }
366                         }
367                         else {
368                                 glEnableVertexAttribArray(input->location);
369                                 glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
370
371                                 switch (a->fetch_mode) {
372                                         case GPU_FETCH_FLOAT:
373                                         case GPU_FETCH_INT_TO_FLOAT:
374                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
375                                                 break;
376                                         case GPU_FETCH_INT_TO_FLOAT_UNIT:
377                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
378                                                 break;
379                                         case GPU_FETCH_INT:
380                                                 glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
381                                                 break;
382                                 }
383                         }
384                 }
385         }
386 }
387
388 static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
389 {
390         for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
391                 create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
392         }
393         if (batch->inst) {
394                 create_bindings(batch->inst, batch->interface, v_first, true);
395         }
396         if (batch->elem) {
397                 GPU_indexbuf_use(batch->elem);
398         }
399 }
400
401 void GPU_batch_program_use_begin(GPUBatch *batch)
402 {
403         /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
404          *       the GL context's active program. use_program doesn't mark other programs as "not used". */
405         /* TODO: make not fragile (somehow) */
406
407         if (!batch->program_in_use) {
408                 glUseProgram(batch->program);
409                 batch->program_in_use = true;
410         }
411 }
412
413 void GPU_batch_program_use_end(GPUBatch *batch)
414 {
415         if (batch->program_in_use) {
416 #if PROGRAM_NO_OPTI
417                 glUseProgram(0);
418 #endif
419                 batch->program_in_use = false;
420         }
421 }
422
423 #if TRUST_NO_ONE
424 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name); assert(uniform);
425 #else
426 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name);
427 #endif
428
429 void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, int value)
430 {
431         GET_UNIFORM
432         glUniform1ui(uniform->location, value);
433 }
434
435 void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
436 {
437         GET_UNIFORM
438         glUniform1i(uniform->location, value);
439 }
440
441 void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
442 {
443         GET_UNIFORM
444         glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
445 }
446
447 void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
448 {
449         GET_UNIFORM
450         glUniform2f(uniform->location, x, y);
451 }
452
453 void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
454 {
455         GET_UNIFORM
456         glUniform3f(uniform->location, x, y, z);
457 }
458
459 void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
460 {
461         GET_UNIFORM
462         glUniform4f(uniform->location, x, y, z, w);
463 }
464
465 void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
466 {
467         GET_UNIFORM
468         glUniform1f(uniform->location, x);
469 }
470
471 void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
472 {
473         GET_UNIFORM
474         glUniform2fv(uniform->location, 1, data);
475 }
476
477 void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
478 {
479         GET_UNIFORM
480         glUniform3fv(uniform->location, 1, data);
481 }
482
483 void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
484 {
485         GET_UNIFORM
486         glUniform4fv(uniform->location, 1, data);
487 }
488
489 void GPU_batch_uniform_2fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
490 {
491         GET_UNIFORM
492         glUniform2fv(uniform->location, len, data);
493 }
494
495 void GPU_batch_uniform_4fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
496 {
497         GET_UNIFORM
498         glUniform4fv(uniform->location, len, data);
499 }
500
501 void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
502 {
503         GET_UNIFORM
504         glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
505 }
506
507 static void primitive_restart_enable(const GPUIndexBuf *el)
508 {
509         // TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
510         glEnable(GL_PRIMITIVE_RESTART);
511         GLuint restart_index = (GLuint)0xFFFFFFFF;
512
513 #if GPU_TRACK_INDEX_RANGE
514         if (el->index_type == GPU_INDEX_U8)
515                 restart_index = (GLuint)0xFF;
516         else if (el->index_type == GPU_INDEX_U16)
517                 restart_index = (GLuint)0xFFFF;
518 #endif
519
520         glPrimitiveRestartIndex(restart_index);
521 }
522
523 static void primitive_restart_disable(void)
524 {
525         glDisable(GL_PRIMITIVE_RESTART);
526 }
527
528 static void *elem_offset(const GPUIndexBuf *el, int v_first)
529 {
530 #if GPU_TRACK_INDEX_RANGE
531         if (el->index_type == GPU_INDEX_U8)
532                 return (GLubyte *)0 + v_first;
533         else if (el->index_type == GPU_INDEX_U16)
534                 return (GLushort *)0 + v_first;
535         else
536 #endif
537                 return (GLuint *)0 + v_first;
538 }
539
540 void GPU_batch_draw(GPUBatch *batch)
541 {
542 #if TRUST_NO_ONE
543         assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
544         assert(batch->verts[0]->vbo_id != 0);
545 #endif
546         GPU_batch_program_use_begin(batch);
547         GPU_matrix_bind(batch->interface); // external call.
548
549         GPU_batch_draw_range_ex(batch, 0, 0, false);
550
551         GPU_batch_program_use_end(batch);
552 }
553
554 void GPU_batch_draw_range_ex(GPUBatch *batch, int v_first, int v_count, bool force_instance)
555 {
556 #if TRUST_NO_ONE
557         assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
558 #endif
559
560         const bool do_instance = (force_instance || batch->inst);
561
562         // If using offset drawing, use the default VAO and redo bindings.
563         if (v_first != 0 && do_instance) {
564                 glBindVertexArray(GPU_vao_default());
565                 batch_update_program_bindings(batch, v_first);
566         }
567         else {
568                 glBindVertexArray(batch->vao_id);
569         }
570
571         if (do_instance) {
572                 /* Infer length if vertex count is not given */
573                 if (v_count == 0) {
574                         v_count = batch->inst->vertex_len;
575                 }
576
577                 if (batch->elem) {
578                         const GPUIndexBuf *el = batch->elem;
579
580                         if (el->use_prim_restart) {
581                                 primitive_restart_enable(el);
582                         }
583 #if GPU_TRACK_INDEX_RANGE
584                         glDrawElementsInstancedBaseVertex(batch->gl_prim_type,
585                                                           el->index_len,
586                                                           el->gl_index_type,
587                                                           0,
588                                                           v_count,
589                                                           el->base_index);
590 #else
591                         glDrawElementsInstanced(batch->gl_prim_type, el->index_len, GL_UNSIGNED_INT, 0, v_count);
592 #endif
593                         if (el->use_prim_restart) {
594                                 primitive_restart_disable();
595                         }
596                 }
597                 else {
598                         glDrawArraysInstanced(batch->gl_prim_type, 0, batch->verts[0]->vertex_len, v_count);
599                 }
600         }
601         else {
602                 /* Infer length if vertex count is not given */
603                 if (v_count == 0) {
604                         v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
605                 }
606
607                 if (batch->elem) {
608                         const GPUIndexBuf *el = batch->elem;
609
610                         if (el->use_prim_restart) {
611                                 primitive_restart_enable(el);
612                         }
613
614                         void *v_first_ofs = elem_offset(el, v_first);
615
616 #if GPU_TRACK_INDEX_RANGE
617                         if (el->base_index) {
618                                 glDrawRangeElementsBaseVertex(
619                                         batch->gl_prim_type,
620                                         el->min_index,
621                                         el->max_index,
622                                         v_count,
623                                         el->gl_index_type,
624                                         v_first_ofs,
625                                         el->base_index);
626                         }
627                         else {
628                                 glDrawRangeElements(batch->gl_prim_type, el->min_index, el->max_index, v_count, el->gl_index_type, v_first_ofs);
629                         }
630 #else
631                         glDrawElements(batch->gl_prim_type, v_count, GL_UNSIGNED_INT, v_first_ofs);
632 #endif
633                         if (el->use_prim_restart) {
634                                 primitive_restart_disable();
635                         }
636                 }
637                 else {
638                         glDrawArrays(batch->gl_prim_type, v_first, v_count);
639                 }
640         }
641
642         /* Performance hog if you are drawing with the same vao multiple time.
643          * Only activate for debugging. */
644         // glBindVertexArray(0);
645 }
646
647 /* just draw some vertices and let shader place them where we want. */
648 void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
649 {
650         /* we cannot draw without vao ... annoying ... */
651         glBindVertexArray(GPU_vao_default());
652
653         GLenum type = convert_prim_type_to_gl(prim_type);
654         glDrawArrays(type, 0, v_count);
655
656         /* Performance hog if you are drawing with the same vao multiple time.
657          * Only activate for debugging.*/
658         // glBindVertexArray(0);
659 }
660
661
662 /* -------------------------------------------------------------------- */
663 /** \name Utilities
664  * \{ */
665
666 void GPU_batch_program_set_shader(GPUBatch *batch, GPUShader *shader)
667 {
668         GPU_batch_program_set(batch, shader->program, shader->interface);
669 }
670
671 void GPU_batch_program_set_builtin_with_config(
672         GPUBatch *batch, eGPUBuiltinShader shader_id, eGPUShaderConfig sh_cfg)
673 {
674         GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
675         GPU_batch_program_set(batch, shader->program, shader->interface);
676 }
677
678 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
679 {
680         GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
681 }
682
683 /** \} */
684
685 /* -------------------------------------------------------------------- */
686 /** \name Init/Exit
687  * \{ */
688
689 void gpu_batch_init(void)
690 {
691         gpu_batch_presets_init();
692 }
693
694 void gpu_batch_exit(void)
695 {
696         gpu_batch_presets_exit();
697 }
698
699 /** \} */