Cleanup: comments (long lines) in gpu
[blender.git] / source / blender / gpu / intern / gpu_batch.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2016 by Mike Erwin.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup gpu
22  *
23  * GPU geometry batch
24  * Contains VAOs + VBOs + Shader representing a drawable entity.
25  */
26
27 #include "MEM_guardedalloc.h"
28
29 #include "GPU_batch.h"
30 #include "GPU_batch_presets.h"
31 #include "GPU_matrix.h"
32 #include "GPU_shader.h"
33
34 #include "gpu_batch_private.h"
35 #include "gpu_context_private.h"
36 #include "gpu_primitive_private.h"
37 #include "gpu_shader_private.h"
38
39 #include <stdlib.h>
40 #include <string.h>
41
42 static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
43
44 void GPU_batch_vao_cache_clear(GPUBatch *batch)
45 {
46   if (batch->context == NULL) {
47     return;
48   }
49   if (batch->is_dynamic_vao_count) {
50     for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
51       if (batch->dynamic_vaos.vao_ids[i]) {
52         GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
53       }
54       if (batch->dynamic_vaos.interfaces[i]) {
55         GPU_shaderinterface_remove_batch_ref(
56             (GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
57       }
58     }
59     MEM_freeN(batch->dynamic_vaos.interfaces);
60     MEM_freeN(batch->dynamic_vaos.vao_ids);
61   }
62   else {
63     for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
64       if (batch->static_vaos.vao_ids[i]) {
65         GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
66       }
67       if (batch->static_vaos.interfaces[i]) {
68         GPU_shaderinterface_remove_batch_ref(
69             (GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
70       }
71     }
72   }
73   batch->is_dynamic_vao_count = false;
74   for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
75     batch->static_vaos.vao_ids[i] = 0;
76     batch->static_vaos.interfaces[i] = NULL;
77   }
78   gpu_context_remove_batch(batch->context, batch);
79   batch->context = NULL;
80 }
81
82 GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
83                               GPUVertBuf *verts,
84                               GPUIndexBuf *elem,
85                               uint owns_flag)
86 {
87   GPUBatch *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
88   GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
89   return batch;
90 }
91
92 void GPU_batch_init_ex(
93     GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem, uint owns_flag)
94 {
95 #if TRUST_NO_ONE
96   assert(verts != NULL);
97 #endif
98
99   batch->verts[0] = verts;
100   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
101     batch->verts[v] = NULL;
102   }
103   batch->inst = NULL;
104   batch->elem = elem;
105   batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
106   batch->phase = GPU_BATCH_READY_TO_DRAW;
107   batch->is_dynamic_vao_count = false;
108   batch->owns_flag = owns_flag;
109   batch->free_callback = NULL;
110 }
111
112 /* This will share the VBOs with the new batch. */
113 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
114 {
115   GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
116
117   batch_dst->gl_prim_type = batch_src->gl_prim_type;
118   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
119     batch_dst->verts[v] = batch_src->verts[v];
120   }
121 }
122
123 void GPU_batch_clear(GPUBatch *batch)
124 {
125   if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
126     GPU_indexbuf_discard(batch->elem);
127   }
128   if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
129     GPU_vertbuf_discard(batch->inst);
130   }
131   if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
132     for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
133       if (batch->verts[v] == NULL) {
134         break;
135       }
136       if (batch->owns_flag & (1 << v)) {
137         GPU_vertbuf_discard(batch->verts[v]);
138       }
139     }
140   }
141   GPU_batch_vao_cache_clear(batch);
142 }
143
144 void GPU_batch_discard(GPUBatch *batch)
145 {
146   if (batch->free_callback) {
147     batch->free_callback(batch, batch->callback_data);
148   }
149
150   GPU_batch_clear(batch);
151   MEM_freeN(batch);
152 }
153
154 void GPU_batch_callback_free_set(GPUBatch *batch,
155                                  void (*callback)(GPUBatch *, void *),
156                                  void *user_data)
157 {
158   batch->free_callback = callback;
159   batch->callback_data = user_data;
160 }
161
162 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
163 {
164 #if TRUST_NO_ONE
165   assert(inst != NULL);
166 #endif
167   /* redo the bindings */
168   GPU_batch_vao_cache_clear(batch);
169
170   if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
171     GPU_vertbuf_discard(batch->inst);
172   }
173   batch->inst = inst;
174
175   if (own_vbo) {
176     batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
177   }
178   else {
179     batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
180   }
181 }
182
183 /* Returns the index of verts in the batch. */
184 int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
185 {
186   /* redo the bindings */
187   GPU_batch_vao_cache_clear(batch);
188
189   for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
190     if (batch->verts[v] == NULL) {
191 #if TRUST_NO_ONE
192       /* for now all VertexBuffers must have same vertex_len */
193       assert(verts->vertex_len == batch->verts[0]->vertex_len);
194 #endif
195       batch->verts[v] = verts;
196       /* TODO: mark dirty so we can keep attribute bindings up-to-date */
197       if (own_vbo)
198         batch->owns_flag |= (1 << v);
199       return v;
200     }
201   }
202
203   /* we only make it this far if there is no room for another GPUVertBuf */
204 #if TRUST_NO_ONE
205   assert(false);
206 #endif
207   return -1;
208 }
209
210 static GLuint batch_vao_get(GPUBatch *batch)
211 {
212   /* Search through cache */
213   if (batch->is_dynamic_vao_count) {
214     for (int i = 0; i < batch->dynamic_vaos.count; ++i)
215       if (batch->dynamic_vaos.interfaces[i] == batch->interface)
216         return batch->dynamic_vaos.vao_ids[i];
217   }
218   else {
219     for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
220       if (batch->static_vaos.interfaces[i] == batch->interface)
221         return batch->static_vaos.vao_ids[i];
222   }
223
224   /* Set context of this batch.
225    * It will be bound to it until GPU_batch_vao_cache_clear is called.
226    * Until then it can only be drawn with this context. */
227   if (batch->context == NULL) {
228     batch->context = GPU_context_active_get();
229     gpu_context_add_batch(batch->context, batch);
230   }
231 #if TRUST_NO_ONE
232   else {
233     /* Make sure you are not trying to draw this batch in another context. */
234     assert(batch->context == GPU_context_active_get());
235   }
236 #endif
237
238   /* Cache miss, time to add a new entry! */
239   GLuint new_vao = 0;
240   if (!batch->is_dynamic_vao_count) {
241     int i; /* find first unused slot */
242     for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
243       if (batch->static_vaos.vao_ids[i] == 0)
244         break;
245
246     if (i < GPU_BATCH_VAO_STATIC_LEN) {
247       batch->static_vaos.interfaces[i] = batch->interface;
248       batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
249     }
250     else {
251       /* Not enough place switch to dynamic. */
252       batch->is_dynamic_vao_count = true;
253       /* Erase previous entries, they will be added back if drawn again. */
254       for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
255         GPU_shaderinterface_remove_batch_ref(
256             (GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
257         GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
258       }
259       /* Init dynamic arrays and let the branch below set the values. */
260       batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
261       batch->dynamic_vaos.interfaces = MEM_callocN(
262           batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
263       batch->dynamic_vaos.vao_ids = MEM_callocN(batch->dynamic_vaos.count * sizeof(GLuint),
264                                                 "dyn vaos ids");
265     }
266   }
267
268   if (batch->is_dynamic_vao_count) {
269     int i; /* find first unused slot */
270     for (i = 0; i < batch->dynamic_vaos.count; ++i)
271       if (batch->dynamic_vaos.vao_ids[i] == 0)
272         break;
273
274     if (i == batch->dynamic_vaos.count) {
275       /* Not enough place, realloc the array. */
276       i = batch->dynamic_vaos.count;
277       batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
278       batch->dynamic_vaos.interfaces = MEM_recallocN(batch->dynamic_vaos.interfaces,
279                                                      sizeof(GPUShaderInterface *) *
280                                                          batch->dynamic_vaos.count);
281       batch->dynamic_vaos.vao_ids = MEM_recallocN(batch->dynamic_vaos.vao_ids,
282                                                   sizeof(GLuint) * batch->dynamic_vaos.count);
283     }
284     batch->dynamic_vaos.interfaces[i] = batch->interface;
285     batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
286   }
287
288   GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
289
290 #if TRUST_NO_ONE
291   assert(new_vao != 0);
292 #endif
293
294   /* We just got a fresh VAO we need to initialize it. */
295   glBindVertexArray(new_vao);
296   batch_update_program_bindings(batch, 0);
297   glBindVertexArray(0);
298
299   return new_vao;
300 }
301
302 void GPU_batch_program_set_no_use(GPUBatch *batch,
303                                   uint32_t program,
304                                   const GPUShaderInterface *shaderface)
305 {
306 #if TRUST_NO_ONE
307   assert(glIsProgram(shaderface->program));
308   assert(batch->program_in_use == 0);
309 #endif
310   batch->interface = shaderface;
311   batch->program = program;
312   batch->vao_id = batch_vao_get(batch);
313 }
314
315 void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
316 {
317   GPU_batch_program_set_no_use(batch, program, shaderface);
318   GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
319 }
320
321 void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
322 {
323   if (batch->is_dynamic_vao_count) {
324     for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
325       if (batch->dynamic_vaos.interfaces[i] == interface) {
326         GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
327         batch->dynamic_vaos.vao_ids[i] = 0;
328         batch->dynamic_vaos.interfaces[i] = NULL;
329         break; /* cannot have duplicates */
330       }
331     }
332   }
333   else {
334     int i;
335     for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
336       if (batch->static_vaos.interfaces[i] == interface) {
337         GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
338         batch->static_vaos.vao_ids[i] = 0;
339         batch->static_vaos.interfaces[i] = NULL;
340         break; /* cannot have duplicates */
341       }
342     }
343   }
344 }
345
346 static void create_bindings(GPUVertBuf *verts,
347                             const GPUShaderInterface *interface,
348                             uint v_first,
349                             const bool use_instancing)
350 {
351   const GPUVertFormat *format = &verts->format;
352
353   const uint attr_len = format->attr_len;
354   const uint stride = format->stride;
355
356   GPU_vertbuf_use(verts);
357
358   for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
359     const GPUVertAttr *a = &format->attrs[a_idx];
360     const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
361
362     for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
363       const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
364
365       if (input == NULL)
366         continue;
367
368       if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
369 #if TRUST_NO_ONE
370         assert(a->fetch_mode == GPU_FETCH_FLOAT);
371         assert(a->gl_comp_type == GL_FLOAT);
372 #endif
373         for (int i = 0; i < a->comp_len / 4; ++i) {
374           glEnableVertexAttribArray(input->location + i);
375           glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
376           glVertexAttribPointer(input->location + i,
377                                 4,
378                                 a->gl_comp_type,
379                                 GL_FALSE,
380                                 stride,
381                                 (const GLubyte *)pointer + i * 16);
382         }
383       }
384       else {
385         glEnableVertexAttribArray(input->location);
386         glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
387
388         switch (a->fetch_mode) {
389           case GPU_FETCH_FLOAT:
390           case GPU_FETCH_INT_TO_FLOAT:
391             glVertexAttribPointer(
392                 input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
393             break;
394           case GPU_FETCH_INT_TO_FLOAT_UNIT:
395             glVertexAttribPointer(
396                 input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
397             break;
398           case GPU_FETCH_INT:
399             glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
400             break;
401         }
402       }
403     }
404   }
405 }
406
407 static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
408 {
409   for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
410     create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
411   }
412   if (batch->inst) {
413     create_bindings(batch->inst, batch->interface, v_first, true);
414   }
415   if (batch->elem) {
416     GPU_indexbuf_use(batch->elem);
417   }
418 }
419
420 void GPU_batch_program_use_begin(GPUBatch *batch)
421 {
422   /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
423    *       the GL context's active program.
424    *       use_program doesn't mark other programs as "not used". */
425   /* TODO: make not fragile (somehow) */
426
427   if (!batch->program_in_use) {
428     glUseProgram(batch->program);
429     batch->program_in_use = true;
430   }
431 }
432
433 void GPU_batch_program_use_end(GPUBatch *batch)
434 {
435   if (batch->program_in_use) {
436 #if PROGRAM_NO_OPTI
437     glUseProgram(0);
438 #endif
439     batch->program_in_use = false;
440   }
441 }
442
443 #if TRUST_NO_ONE
444 #  define GET_UNIFORM \
445     const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name); \
446     assert(uniform);
447 #else
448 #  define GET_UNIFORM \
449     const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name);
450 #endif
451
452 void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, int value)
453 {
454   GET_UNIFORM
455   glUniform1ui(uniform->location, value);
456 }
457
458 void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
459 {
460   GET_UNIFORM
461   glUniform1i(uniform->location, value);
462 }
463
464 void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
465 {
466   GET_UNIFORM
467   glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
468 }
469
470 void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
471 {
472   GET_UNIFORM
473   glUniform2f(uniform->location, x, y);
474 }
475
476 void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
477 {
478   GET_UNIFORM
479   glUniform3f(uniform->location, x, y, z);
480 }
481
482 void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
483 {
484   GET_UNIFORM
485   glUniform4f(uniform->location, x, y, z, w);
486 }
487
488 void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
489 {
490   GET_UNIFORM
491   glUniform1f(uniform->location, x);
492 }
493
494 void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
495 {
496   GET_UNIFORM
497   glUniform2fv(uniform->location, 1, data);
498 }
499
500 void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
501 {
502   GET_UNIFORM
503   glUniform3fv(uniform->location, 1, data);
504 }
505
506 void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
507 {
508   GET_UNIFORM
509   glUniform4fv(uniform->location, 1, data);
510 }
511
512 void GPU_batch_uniform_2fv_array(GPUBatch *batch,
513                                  const char *name,
514                                  const int len,
515                                  const float *data)
516 {
517   GET_UNIFORM
518   glUniform2fv(uniform->location, len, data);
519 }
520
521 void GPU_batch_uniform_4fv_array(GPUBatch *batch,
522                                  const char *name,
523                                  const int len,
524                                  const float *data)
525 {
526   GET_UNIFORM
527   glUniform4fv(uniform->location, len, data);
528 }
529
530 void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
531 {
532   GET_UNIFORM
533   glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
534 }
535
536 static void primitive_restart_enable(const GPUIndexBuf *el)
537 {
538   // TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
539   glEnable(GL_PRIMITIVE_RESTART);
540   GLuint restart_index = (GLuint)0xFFFFFFFF;
541
542 #if GPU_TRACK_INDEX_RANGE
543   if (el->index_type == GPU_INDEX_U8)
544     restart_index = (GLuint)0xFF;
545   else if (el->index_type == GPU_INDEX_U16)
546     restart_index = (GLuint)0xFFFF;
547 #endif
548
549   glPrimitiveRestartIndex(restart_index);
550 }
551
552 static void primitive_restart_disable(void)
553 {
554   glDisable(GL_PRIMITIVE_RESTART);
555 }
556
557 static void *elem_offset(const GPUIndexBuf *el, int v_first)
558 {
559 #if GPU_TRACK_INDEX_RANGE
560   if (el->index_type == GPU_INDEX_U8)
561     return (GLubyte *)0 + v_first;
562   else if (el->index_type == GPU_INDEX_U16)
563     return (GLushort *)0 + v_first;
564   else
565 #endif
566     return (GLuint *)0 + v_first;
567 }
568
569 void GPU_batch_draw(GPUBatch *batch)
570 {
571 #if TRUST_NO_ONE
572   assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
573   assert(batch->verts[0]->vbo_id != 0);
574 #endif
575   GPU_batch_program_use_begin(batch);
576   GPU_matrix_bind(batch->interface);  // external call.
577
578   GPU_batch_draw_range_ex(batch, 0, 0, false);
579
580   GPU_batch_program_use_end(batch);
581 }
582
583 void GPU_batch_draw_range_ex(GPUBatch *batch, int v_first, int v_count, bool force_instance)
584 {
585 #if TRUST_NO_ONE
586   assert(!(force_instance && (batch->inst == NULL)) ||
587          v_count > 0);  // we cannot infer length if force_instance
588 #endif
589
590   const bool do_instance = (force_instance || batch->inst);
591
592   // If using offset drawing, use the default VAO and redo bindings.
593   if (v_first != 0 && do_instance) {
594     glBindVertexArray(GPU_vao_default());
595     batch_update_program_bindings(batch, v_first);
596   }
597   else {
598     glBindVertexArray(batch->vao_id);
599   }
600
601   if (do_instance) {
602     /* Infer length if vertex count is not given */
603     if (v_count == 0) {
604       v_count = batch->inst->vertex_len;
605     }
606
607     if (batch->elem) {
608       const GPUIndexBuf *el = batch->elem;
609
610       if (el->use_prim_restart) {
611         primitive_restart_enable(el);
612       }
613 #if GPU_TRACK_INDEX_RANGE
614       glDrawElementsInstancedBaseVertex(
615           batch->gl_prim_type, el->index_len, el->gl_index_type, 0, v_count, el->base_index);
616 #else
617       glDrawElementsInstanced(batch->gl_prim_type, el->index_len, GL_UNSIGNED_INT, 0, v_count);
618 #endif
619       if (el->use_prim_restart) {
620         primitive_restart_disable();
621       }
622     }
623     else {
624       glDrawArraysInstanced(batch->gl_prim_type, 0, batch->verts[0]->vertex_len, v_count);
625     }
626   }
627   else {
628     /* Infer length if vertex count is not given */
629     if (v_count == 0) {
630       v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
631     }
632
633     if (batch->elem) {
634       const GPUIndexBuf *el = batch->elem;
635
636       if (el->use_prim_restart) {
637         primitive_restart_enable(el);
638       }
639
640       void *v_first_ofs = elem_offset(el, v_first);
641
642 #if GPU_TRACK_INDEX_RANGE
643       if (el->base_index) {
644         glDrawRangeElementsBaseVertex(batch->gl_prim_type,
645                                       el->min_index,
646                                       el->max_index,
647                                       v_count,
648                                       el->gl_index_type,
649                                       v_first_ofs,
650                                       el->base_index);
651       }
652       else {
653         glDrawRangeElements(batch->gl_prim_type,
654                             el->min_index,
655                             el->max_index,
656                             v_count,
657                             el->gl_index_type,
658                             v_first_ofs);
659       }
660 #else
661       glDrawElements(batch->gl_prim_type, v_count, GL_UNSIGNED_INT, v_first_ofs);
662 #endif
663       if (el->use_prim_restart) {
664         primitive_restart_disable();
665       }
666     }
667     else {
668       glDrawArrays(batch->gl_prim_type, v_first, v_count);
669     }
670   }
671
672   /* Performance hog if you are drawing with the same vao multiple time.
673    * Only activate for debugging. */
674   // glBindVertexArray(0);
675 }
676
677 /* just draw some vertices and let shader place them where we want. */
678 void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
679 {
680   /* we cannot draw without vao ... annoying ... */
681   glBindVertexArray(GPU_vao_default());
682
683   GLenum type = convert_prim_type_to_gl(prim_type);
684   glDrawArrays(type, 0, v_count);
685
686   /* Performance hog if you are drawing with the same vao multiple time.
687    * Only activate for debugging.*/
688   // glBindVertexArray(0);
689 }
690
691 /* -------------------------------------------------------------------- */
692 /** \name Utilities
693  * \{ */
694
695 void GPU_batch_program_set_shader(GPUBatch *batch, GPUShader *shader)
696 {
697   GPU_batch_program_set(batch, shader->program, shader->interface);
698 }
699
700 void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
701                                                eGPUBuiltinShader shader_id,
702                                                eGPUShaderConfig sh_cfg)
703 {
704   GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
705   GPU_batch_program_set(batch, shader->program, shader->interface);
706 }
707
708 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
709 {
710   GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
711 }
712
713 /** \} */
714
715 /* -------------------------------------------------------------------- */
716 /** \name Init/Exit
717  * \{ */
718
719 void gpu_batch_init(void)
720 {
721   gpu_batch_presets_init();
722 }
723
724 void gpu_batch_exit(void)
725 {
726   gpu_batch_presets_exit();
727 }
728
729 /** \} */