Merge branch 'blender2.7'
[blender.git] / source / blender / gpu / intern / gpu_batch.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2016 by Mike Erwin.
19  * All rights reserved.
20  *
21  * Contributor(s): Blender Foundation
22  *
23  * ***** END GPL LICENSE BLOCK *****
24  */
25
26 /** \file blender/gpu/intern/gpu_batch.c
27  *  \ingroup gpu
28  *
29  * GPU geometry batch
30  * Contains VAOs + VBOs + Shader representing a drawable entity.
31  */
32
33 #include "MEM_guardedalloc.h"
34
35 #include "GPU_batch.h"
36 #include "GPU_batch_presets.h"
37 #include "GPU_matrix.h"
38 #include "GPU_shader.h"
39
40 #include "gpu_batch_private.h"
41 #include "gpu_context_private.h"
42 #include "gpu_primitive_private.h"
43 #include "gpu_shader_private.h"
44
45 #include <stdlib.h>
46 #include <string.h>
47
48 static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
49
50 void GPU_batch_vao_cache_clear(GPUBatch *batch)
51 {
52         if (batch->context == NULL) {
53                 return;
54         }
55         if (batch->is_dynamic_vao_count) {
56                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
57                         if (batch->dynamic_vaos.vao_ids[i]) {
58                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
59                         }
60                         if (batch->dynamic_vaos.interfaces[i]) {
61                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
62                         }
63                 }
64                 MEM_freeN(batch->dynamic_vaos.interfaces);
65                 MEM_freeN(batch->dynamic_vaos.vao_ids);
66         }
67         else {
68                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
69                         if (batch->static_vaos.vao_ids[i]) {
70                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
71                         }
72                         if (batch->static_vaos.interfaces[i]) {
73                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
74                         }
75                 }
76         }
77         batch->is_dynamic_vao_count = false;
78         for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
79                 batch->static_vaos.vao_ids[i] = 0;
80                 batch->static_vaos.interfaces[i] = NULL;
81         }
82         gpu_context_remove_batch(batch->context, batch);
83         batch->context = NULL;
84 }
85
86 GPUBatch *GPU_batch_create_ex(
87         GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
88         uint owns_flag)
89 {
90         GPUBatch *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
91         GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
92         return batch;
93 }
94
95 void GPU_batch_init_ex(
96         GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
97         uint owns_flag)
98 {
99 #if TRUST_NO_ONE
100         assert(verts != NULL);
101 #endif
102
103         batch->verts[0] = verts;
104         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
105                 batch->verts[v] = NULL;
106         }
107         batch->inst = NULL;
108         batch->elem = elem;
109         batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
110         batch->phase = GPU_BATCH_READY_TO_DRAW;
111         batch->is_dynamic_vao_count = false;
112         batch->owns_flag = owns_flag;
113         batch->free_callback = NULL;
114 }
115
116 /* This will share the VBOs with the new batch. */
117 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
118 {
119         GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
120
121         batch_dst->gl_prim_type = batch_src->gl_prim_type;
122         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
123                 batch_dst->verts[v] = batch_src->verts[v];
124         }
125 }
126
127 void GPU_batch_clear(GPUBatch *batch)
128 {
129         if (batch->free_callback) {
130                 batch->free_callback(batch, batch->callback_data);
131         }
132
133         if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
134                 GPU_indexbuf_discard(batch->elem);
135         }
136         if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
137                 GPU_vertbuf_discard(batch->inst);
138         }
139         if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
140                 for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
141                         if (batch->verts[v] == NULL) {
142                                 break;
143                         }
144                         if (batch->owns_flag & (1 << v)) {
145                                 GPU_vertbuf_discard(batch->verts[v]);
146                         }
147                 }
148         }
149         GPU_batch_vao_cache_clear(batch);
150 }
151
152 void GPU_batch_discard(GPUBatch *batch)
153 {
154         GPU_batch_clear(batch);
155         MEM_freeN(batch);
156 }
157
158 void GPU_batch_callback_free_set(GPUBatch *batch, void (*callback)(GPUBatch *, void *), void *user_data)
159 {
160         batch->free_callback = callback;
161         batch->callback_data = user_data;
162 }
163
164 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
165 {
166 #if TRUST_NO_ONE
167         assert(inst != NULL);
168 #endif
169         /* redo the bindings */
170         GPU_batch_vao_cache_clear(batch);
171
172         if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
173                 GPU_vertbuf_discard(batch->inst);
174         }
175         batch->inst = inst;
176
177         if (own_vbo) {
178                 batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
179         }
180         else {
181                 batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
182         }
183 }
184
185 /* Returns the index of verts in the batch. */
186 int GPU_batch_vertbuf_add_ex(
187         GPUBatch *batch, GPUVertBuf *verts,
188         bool own_vbo)
189 {
190         /* redo the bindings */
191         GPU_batch_vao_cache_clear(batch);
192
193         for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
194                 if (batch->verts[v] == NULL) {
195 #if TRUST_NO_ONE
196                         /* for now all VertexBuffers must have same vertex_len */
197                         assert(verts->vertex_len == batch->verts[0]->vertex_len);
198 #endif
199                         batch->verts[v] = verts;
200                         /* TODO: mark dirty so we can keep attrib bindings up-to-date */
201                         if (own_vbo)
202                                 batch->owns_flag |= (1 << v);
203                         return v;
204                 }
205         }
206
207         /* we only make it this far if there is no room for another GPUVertBuf */
208 #if TRUST_NO_ONE
209         assert(false);
210 #endif
211         return -1;
212 }
213
214 static GLuint batch_vao_get(GPUBatch *batch)
215 {
216         /* Search through cache */
217         if (batch->is_dynamic_vao_count) {
218                 for (int i = 0; i < batch->dynamic_vaos.count; ++i)
219                         if (batch->dynamic_vaos.interfaces[i] == batch->interface)
220                                 return batch->dynamic_vaos.vao_ids[i];
221         }
222         else {
223                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
224                         if (batch->static_vaos.interfaces[i] == batch->interface)
225                                 return batch->static_vaos.vao_ids[i];
226         }
227
228         /* Set context of this batch.
229          * It will be bound to it until GPU_batch_vao_cache_clear is called.
230          * Until then it can only be drawn with this context. */
231         if (batch->context == NULL) {
232                 batch->context = GPU_context_active_get();
233                 gpu_context_add_batch(batch->context, batch);
234         }
235 #if TRUST_NO_ONE
236         else {
237                 /* Make sure you are not trying to draw this batch in another context. */
238                 assert(batch->context == GPU_context_active_get());
239         }
240 #endif
241
242         /* Cache miss, time to add a new entry! */
243         GLuint new_vao = 0;
244         if (!batch->is_dynamic_vao_count) {
245                 int i; /* find first unused slot */
246                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
247                         if (batch->static_vaos.vao_ids[i] == 0)
248                                 break;
249
250                 if (i < GPU_BATCH_VAO_STATIC_LEN) {
251                         batch->static_vaos.interfaces[i] = batch->interface;
252                         batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
253                 }
254                 else {
255                         /* Not enough place switch to dynamic. */
256                         batch->is_dynamic_vao_count = true;
257                         /* Erase previous entries, they will be added back if drawn again. */
258                         for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
259                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
260                                 GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
261                         }
262                         /* Init dynamic arrays and let the branch below set the values. */
263                         batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
264                         batch->dynamic_vaos.interfaces = MEM_callocN(batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
265                         batch->dynamic_vaos.vao_ids = MEM_callocN(batch->dynamic_vaos.count * sizeof(GLuint), "dyn vaos ids");
266                 }
267         }
268
269         if (batch->is_dynamic_vao_count) {
270                 int i; /* find first unused slot */
271                 for (i = 0; i < batch->dynamic_vaos.count; ++i)
272                         if (batch->dynamic_vaos.vao_ids[i] == 0)
273                                 break;
274
275                 if (i == batch->dynamic_vaos.count) {
276                         /* Not enough place, realloc the array. */
277                         i = batch->dynamic_vaos.count;
278                         batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
279                         batch->dynamic_vaos.interfaces = MEM_recallocN(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface *) * batch->dynamic_vaos.count);
280                         batch->dynamic_vaos.vao_ids = MEM_recallocN(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
281                 }
282                 batch->dynamic_vaos.interfaces[i] = batch->interface;
283                 batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
284         }
285
286         GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
287
288 #if TRUST_NO_ONE
289         assert(new_vao != 0);
290 #endif
291
292         /* We just got a fresh VAO we need to initialize it. */
293         glBindVertexArray(new_vao);
294         batch_update_program_bindings(batch, 0);
295         glBindVertexArray(0);
296
297         return new_vao;
298 }
299
300 void GPU_batch_program_set_no_use(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
301 {
302 #if TRUST_NO_ONE
303         assert(glIsProgram(shaderface->program));
304         assert(batch->program_in_use == 0);
305 #endif
306         batch->interface = shaderface;
307         batch->program = program;
308         batch->vao_id = batch_vao_get(batch);
309 }
310
311 void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
312 {
313         GPU_batch_program_set_no_use(batch, program, shaderface);
314         GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
315 }
316
317 void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
318 {
319         if (batch->is_dynamic_vao_count) {
320                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
321                         if (batch->dynamic_vaos.interfaces[i] == interface) {
322                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
323                                 batch->dynamic_vaos.vao_ids[i] = 0;
324                                 batch->dynamic_vaos.interfaces[i] = NULL;
325                                 break; /* cannot have duplicates */
326                         }
327                 }
328         }
329         else {
330                 int i;
331                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
332                         if (batch->static_vaos.interfaces[i] == interface) {
333                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
334                                 batch->static_vaos.vao_ids[i] = 0;
335                                 batch->static_vaos.interfaces[i] = NULL;
336                                 break; /* cannot have duplicates */
337                         }
338                 }
339         }
340 }
341
342 static void create_bindings(
343         GPUVertBuf *verts, const GPUShaderInterface *interface,
344         uint v_first, const bool use_instancing)
345 {
346         const GPUVertFormat *format = &verts->format;
347
348         const uint attr_len = format->attr_len;
349         const uint stride = format->stride;
350
351         GPU_vertbuf_use(verts);
352
353         for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
354                 const GPUVertAttr *a = format->attribs + a_idx;
355                 const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
356
357                 for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
358                         const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
359
360                         if (input == NULL) continue;
361
362                         if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
363 #if TRUST_NO_ONE
364                                 assert(a->fetch_mode == GPU_FETCH_FLOAT);
365                                 assert(a->gl_comp_type == GL_FLOAT);
366 #endif
367                                 for (int i = 0; i < a->comp_len / 4; ++i) {
368                                         glEnableVertexAttribArray(input->location + i);
369                                         glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
370                                         glVertexAttribPointer(input->location + i, 4, a->gl_comp_type, GL_FALSE, stride,
371                                                               (const GLubyte *)pointer + i * 16);
372                                 }
373                         }
374                         else {
375                                 glEnableVertexAttribArray(input->location);
376                                 glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
377
378                                 switch (a->fetch_mode) {
379                                         case GPU_FETCH_FLOAT:
380                                         case GPU_FETCH_INT_TO_FLOAT:
381                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
382                                                 break;
383                                         case GPU_FETCH_INT_TO_FLOAT_UNIT:
384                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
385                                                 break;
386                                         case GPU_FETCH_INT:
387                                                 glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
388                                                 break;
389                                 }
390                         }
391                 }
392         }
393 }
394
395 static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
396 {
397         for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
398                 create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
399         }
400         if (batch->inst) {
401                 create_bindings(batch->inst, batch->interface, v_first, true);
402         }
403         if (batch->elem) {
404                 GPU_indexbuf_use(batch->elem);
405         }
406 }
407
408 void GPU_batch_program_use_begin(GPUBatch *batch)
409 {
410         /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
411          *       the GL context's active program. use_program doesn't mark other programs as "not used". */
412         /* TODO: make not fragile (somehow) */
413
414         if (!batch->program_in_use) {
415                 glUseProgram(batch->program);
416                 batch->program_in_use = true;
417         }
418 }
419
420 void GPU_batch_program_use_end(GPUBatch *batch)
421 {
422         if (batch->program_in_use) {
423 #if PROGRAM_NO_OPTI
424                 glUseProgram(0);
425 #endif
426                 batch->program_in_use = false;
427         }
428 }
429
430 #if TRUST_NO_ONE
431 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name); assert(uniform);
432 #else
433 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name);
434 #endif
435
436 void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, int value)
437 {
438         GET_UNIFORM
439         glUniform1ui(uniform->location, value);
440 }
441
442 void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
443 {
444         GET_UNIFORM
445         glUniform1i(uniform->location, value);
446 }
447
448 void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
449 {
450         GET_UNIFORM
451         glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
452 }
453
454 void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
455 {
456         GET_UNIFORM
457         glUniform2f(uniform->location, x, y);
458 }
459
460 void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
461 {
462         GET_UNIFORM
463         glUniform3f(uniform->location, x, y, z);
464 }
465
466 void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
467 {
468         GET_UNIFORM
469         glUniform4f(uniform->location, x, y, z, w);
470 }
471
472 void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
473 {
474         GET_UNIFORM
475         glUniform1f(uniform->location, x);
476 }
477
478 void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
479 {
480         GET_UNIFORM
481         glUniform2fv(uniform->location, 1, data);
482 }
483
484 void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
485 {
486         GET_UNIFORM
487         glUniform3fv(uniform->location, 1, data);
488 }
489
490 void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
491 {
492         GET_UNIFORM
493         glUniform4fv(uniform->location, 1, data);
494 }
495
496 void GPU_batch_uniform_2fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
497 {
498         GET_UNIFORM
499         glUniform2fv(uniform->location, len, data);
500 }
501
502 void GPU_batch_uniform_4fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
503 {
504         GET_UNIFORM
505         glUniform4fv(uniform->location, len, data);
506 }
507
508 void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
509 {
510         GET_UNIFORM
511         glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
512 }
513
514 static void primitive_restart_enable(const GPUIndexBuf *el)
515 {
516         // TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
517         glEnable(GL_PRIMITIVE_RESTART);
518         GLuint restart_index = (GLuint)0xFFFFFFFF;
519
520 #if GPU_TRACK_INDEX_RANGE
521         if (el->index_type == GPU_INDEX_U8)
522                 restart_index = (GLuint)0xFF;
523         else if (el->index_type == GPU_INDEX_U16)
524                 restart_index = (GLuint)0xFFFF;
525 #endif
526
527         glPrimitiveRestartIndex(restart_index);
528 }
529
530 static void primitive_restart_disable(void)
531 {
532         glDisable(GL_PRIMITIVE_RESTART);
533 }
534
535 static void *elem_offset(const GPUIndexBuf *el, int v_first)
536 {
537 #if GPU_TRACK_INDEX_RANGE
538         if (el->index_type == GPU_INDEX_U8)
539                 return (GLubyte *)0 + v_first;
540         else if (el->index_type == GPU_INDEX_U16)
541                 return (GLushort *)0 + v_first;
542         else
543 #endif
544                 return (GLuint *)0 + v_first;
545 }
546
547 void GPU_batch_draw(GPUBatch *batch)
548 {
549 #if TRUST_NO_ONE
550         assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
551         assert(batch->verts[0]->vbo_id != 0);
552 #endif
553         GPU_batch_program_use_begin(batch);
554         GPU_matrix_bind(batch->interface); // external call.
555
556         GPU_batch_draw_range_ex(batch, 0, 0, false);
557
558         GPU_batch_program_use_end(batch);
559 }
560
561 void GPU_batch_draw_range_ex(GPUBatch *batch, int v_first, int v_count, bool force_instance)
562 {
563 #if TRUST_NO_ONE
564         assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
565 #endif
566
567         const bool do_instance = (force_instance || batch->inst);
568
569         // If using offset drawing, use the default VAO and redo bindings.
570         if (v_first != 0 && do_instance) {
571                 glBindVertexArray(GPU_vao_default());
572                 batch_update_program_bindings(batch, v_first);
573         }
574         else {
575                 glBindVertexArray(batch->vao_id);
576         }
577
578         if (do_instance) {
579                 /* Infer length if vertex count is not given */
580                 if (v_count == 0) {
581                         v_count = batch->inst->vertex_len;
582                 }
583
584                 if (batch->elem) {
585                         const GPUIndexBuf *el = batch->elem;
586
587                         if (el->use_prim_restart) {
588                                 primitive_restart_enable(el);
589                         }
590 #if GPU_TRACK_INDEX_RANGE
591                         glDrawElementsInstancedBaseVertex(batch->gl_prim_type,
592                                                           el->index_len,
593                                                           el->gl_index_type,
594                                                           0,
595                                                           v_count,
596                                                           el->base_index);
597 #else
598                         glDrawElementsInstanced(batch->gl_prim_type, el->index_len, GL_UNSIGNED_INT, 0, v_count);
599 #endif
600                         if (el->use_prim_restart) {
601                                 primitive_restart_disable();
602                         }
603                 }
604                 else {
605                         glDrawArraysInstanced(batch->gl_prim_type, 0, batch->verts[0]->vertex_len, v_count);
606                 }
607         }
608         else {
609                 /* Infer length if vertex count is not given */
610                 if (v_count == 0) {
611                         v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
612                 }
613
614                 if (batch->elem) {
615                         const GPUIndexBuf *el = batch->elem;
616
617                         if (el->use_prim_restart) {
618                                 primitive_restart_enable(el);
619                         }
620
621                         void *v_first_ofs = elem_offset(el, v_first);
622
623 #if GPU_TRACK_INDEX_RANGE
624                         if (el->base_index) {
625                                 glDrawRangeElementsBaseVertex(
626                                         batch->gl_prim_type,
627                                         el->min_index,
628                                         el->max_index,
629                                         v_count,
630                                         el->gl_index_type,
631                                         v_first_ofs,
632                                         el->base_index);
633                         }
634                         else {
635                                 glDrawRangeElements(batch->gl_prim_type, el->min_index, el->max_index, v_count, el->gl_index_type, v_first_ofs);
636                         }
637 #else
638                         glDrawElements(batch->gl_prim_type, v_count, GL_UNSIGNED_INT, v_first_ofs);
639 #endif
640                         if (el->use_prim_restart) {
641                                 primitive_restart_disable();
642                         }
643                 }
644                 else {
645                         glDrawArrays(batch->gl_prim_type, v_first, v_count);
646                 }
647         }
648
649         /* Performance hog if you are drawing with the same vao multiple time.
650          * Only activate for debugging. */
651         // glBindVertexArray(0);
652 }
653
654 /* just draw some vertices and let shader place them where we want. */
655 void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
656 {
657         /* we cannot draw without vao ... annoying ... */
658         glBindVertexArray(GPU_vao_default());
659
660         GLenum type = convert_prim_type_to_gl(prim_type);
661         glDrawArrays(type, 0, v_count);
662
663         /* Performance hog if you are drawing with the same vao multiple time.
664          * Only activate for debugging.*/
665         // glBindVertexArray(0);
666 }
667
668
669 /* -------------------------------------------------------------------- */
670 /** \name Utilities
671  * \{ */
672
673 void GPU_batch_program_set_builtin(GPUBatch *batch, GPUBuiltinShader shader_id)
674 {
675         GPUShader *shader = GPU_shader_get_builtin_shader(shader_id);
676         GPU_batch_program_set(batch, shader->program, shader->interface);
677 }
678
679 /** \} */
680
681 /* -------------------------------------------------------------------- */
682 /** \name Init/Exit
683  * \{ */
684
685 void gpu_batch_init(void)
686 {
687         gpu_batch_presets_init();
688 }
689
690 void gpu_batch_exit(void)
691 {
692         gpu_batch_presets_exit();
693 }
694
695 /** \} */