Fix T67062: Movie Clip Editor does not update Editor Type when changing Editing Context
[blender.git] / source / blender / gpu / intern / gpu_batch.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2016 by Mike Erwin.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup gpu
22  *
23  * GPU geometry batch
24  * Contains VAOs + VBOs + Shader representing a drawable entity.
25  */
26
27 #include "MEM_guardedalloc.h"
28
29 #include "GPU_batch.h"
30 #include "GPU_batch_presets.h"
31 #include "GPU_matrix.h"
32 #include "GPU_shader.h"
33
34 #include "gpu_batch_private.h"
35 #include "gpu_context_private.h"
36 #include "gpu_primitive_private.h"
37 #include "gpu_shader_private.h"
38
39 #include <stdlib.h>
40 #include <string.h>
41
42 static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
43
44 void GPU_batch_vao_cache_clear(GPUBatch *batch)
45 {
46   if (batch->context == NULL) {
47     return;
48   }
49   if (batch->is_dynamic_vao_count) {
50     for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
51       if (batch->dynamic_vaos.vao_ids[i]) {
52         GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
53       }
54       if (batch->dynamic_vaos.interfaces[i]) {
55         GPU_shaderinterface_remove_batch_ref(
56             (GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
57       }
58     }
59     MEM_freeN((void *)batch->dynamic_vaos.interfaces);
60     MEM_freeN(batch->dynamic_vaos.vao_ids);
61   }
62   else {
63     for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
64       if (batch->static_vaos.vao_ids[i]) {
65         GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
66       }
67       if (batch->static_vaos.interfaces[i]) {
68         GPU_shaderinterface_remove_batch_ref(
69             (GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
70       }
71     }
72   }
73   batch->is_dynamic_vao_count = false;
74   for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
75     batch->static_vaos.vao_ids[i] = 0;
76     batch->static_vaos.interfaces[i] = NULL;
77   }
78   gpu_context_remove_batch(batch->context, batch);
79   batch->context = NULL;
80 }
81
82 GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
83                               GPUVertBuf *verts,
84                               GPUIndexBuf *elem,
85                               uint owns_flag)
86 {
87   GPUBatch *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
88   GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
89   return batch;
90 }
91
92 void GPU_batch_init_ex(
93     GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem, uint owns_flag)
94 {
95 #if TRUST_NO_ONE
96   assert(verts != NULL);
97 #endif
98
99   batch->verts[0] = verts;
100   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
101     batch->verts[v] = NULL;
102   }
103   batch->inst = NULL;
104   batch->elem = elem;
105   batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
106   batch->phase = GPU_BATCH_READY_TO_DRAW;
107   batch->is_dynamic_vao_count = false;
108   batch->owns_flag = owns_flag;
109   batch->free_callback = NULL;
110 }
111
112 /* This will share the VBOs with the new batch. */
113 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
114 {
115   GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
116
117   batch_dst->gl_prim_type = batch_src->gl_prim_type;
118   for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
119     batch_dst->verts[v] = batch_src->verts[v];
120   }
121 }
122
123 void GPU_batch_clear(GPUBatch *batch)
124 {
125   if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
126     GPU_indexbuf_discard(batch->elem);
127   }
128   if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
129     GPU_vertbuf_discard(batch->inst);
130   }
131   if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
132     for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
133       if (batch->verts[v] == NULL) {
134         break;
135       }
136       if (batch->owns_flag & (1 << v)) {
137         GPU_vertbuf_discard(batch->verts[v]);
138       }
139     }
140   }
141   GPU_batch_vao_cache_clear(batch);
142   batch->phase = GPU_BATCH_UNUSED;
143 }
144
145 void GPU_batch_discard(GPUBatch *batch)
146 {
147   if (batch->free_callback) {
148     batch->free_callback(batch, batch->callback_data);
149   }
150
151   GPU_batch_clear(batch);
152   MEM_freeN(batch);
153 }
154
155 void GPU_batch_callback_free_set(GPUBatch *batch,
156                                  void (*callback)(GPUBatch *, void *),
157                                  void *user_data)
158 {
159   batch->free_callback = callback;
160   batch->callback_data = user_data;
161 }
162
163 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
164 {
165 #if TRUST_NO_ONE
166   assert(inst != NULL);
167 #endif
168   /* redo the bindings */
169   GPU_batch_vao_cache_clear(batch);
170
171   if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
172     GPU_vertbuf_discard(batch->inst);
173   }
174   batch->inst = inst;
175
176   if (own_vbo) {
177     batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
178   }
179   else {
180     batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
181   }
182 }
183
184 /* Returns the index of verts in the batch. */
185 int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
186 {
187   /* redo the bindings */
188   GPU_batch_vao_cache_clear(batch);
189
190   for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
191     if (batch->verts[v] == NULL) {
192 #if TRUST_NO_ONE
193       /* for now all VertexBuffers must have same vertex_len */
194       assert(verts->vertex_len == batch->verts[0]->vertex_len);
195 #endif
196       batch->verts[v] = verts;
197       /* TODO: mark dirty so we can keep attribute bindings up-to-date */
198       if (own_vbo) {
199         batch->owns_flag |= (1 << v);
200       }
201       return v;
202     }
203   }
204
205   /* we only make it this far if there is no room for another GPUVertBuf */
206 #if TRUST_NO_ONE
207   assert(false);
208 #endif
209   return -1;
210 }
211
212 static GLuint batch_vao_get(GPUBatch *batch)
213 {
214   /* Search through cache */
215   if (batch->is_dynamic_vao_count) {
216     for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
217       if (batch->dynamic_vaos.interfaces[i] == batch->interface) {
218         return batch->dynamic_vaos.vao_ids[i];
219       }
220     }
221   }
222   else {
223     for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
224       if (batch->static_vaos.interfaces[i] == batch->interface) {
225         return batch->static_vaos.vao_ids[i];
226       }
227     }
228   }
229
230   /* Set context of this batch.
231    * It will be bound to it until GPU_batch_vao_cache_clear is called.
232    * Until then it can only be drawn with this context. */
233   if (batch->context == NULL) {
234     batch->context = GPU_context_active_get();
235     gpu_context_add_batch(batch->context, batch);
236   }
237 #if TRUST_NO_ONE
238   else {
239     /* Make sure you are not trying to draw this batch in another context. */
240     assert(batch->context == GPU_context_active_get());
241   }
242 #endif
243
244   /* Cache miss, time to add a new entry! */
245   GLuint new_vao = 0;
246   if (!batch->is_dynamic_vao_count) {
247     int i; /* find first unused slot */
248     for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
249       if (batch->static_vaos.vao_ids[i] == 0) {
250         break;
251       }
252     }
253
254     if (i < GPU_BATCH_VAO_STATIC_LEN) {
255       batch->static_vaos.interfaces[i] = batch->interface;
256       batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
257     }
258     else {
259       /* Not enough place switch to dynamic. */
260       batch->is_dynamic_vao_count = true;
261       /* Erase previous entries, they will be added back if drawn again. */
262       for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
263         GPU_shaderinterface_remove_batch_ref(
264             (GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
265         GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
266       }
267       /* Init dynamic arrays and let the branch below set the values. */
268       batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
269       batch->dynamic_vaos.interfaces = MEM_callocN(
270           batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
271       batch->dynamic_vaos.vao_ids = MEM_callocN(batch->dynamic_vaos.count * sizeof(GLuint),
272                                                 "dyn vaos ids");
273     }
274   }
275
276   if (batch->is_dynamic_vao_count) {
277     int i; /* find first unused slot */
278     for (i = 0; i < batch->dynamic_vaos.count; ++i) {
279       if (batch->dynamic_vaos.vao_ids[i] == 0) {
280         break;
281       }
282     }
283
284     if (i == batch->dynamic_vaos.count) {
285       /* Not enough place, realloc the array. */
286       i = batch->dynamic_vaos.count;
287       batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
288       batch->dynamic_vaos.interfaces = MEM_recallocN((void *)batch->dynamic_vaos.interfaces,
289                                                      sizeof(GPUShaderInterface *) *
290                                                          batch->dynamic_vaos.count);
291       batch->dynamic_vaos.vao_ids = MEM_recallocN(batch->dynamic_vaos.vao_ids,
292                                                   sizeof(GLuint) * batch->dynamic_vaos.count);
293     }
294     batch->dynamic_vaos.interfaces[i] = batch->interface;
295     batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
296   }
297
298   GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
299
300 #if TRUST_NO_ONE
301   assert(new_vao != 0);
302 #endif
303
304   /* We just got a fresh VAO we need to initialize it. */
305   glBindVertexArray(new_vao);
306   batch_update_program_bindings(batch, 0);
307   glBindVertexArray(0);
308
309   return new_vao;
310 }
311
312 void GPU_batch_program_set_no_use(GPUBatch *batch,
313                                   uint32_t program,
314                                   const GPUShaderInterface *shaderface)
315 {
316 #if TRUST_NO_ONE
317   assert(glIsProgram(shaderface->program));
318   assert(batch->program_in_use == 0);
319 #endif
320   batch->interface = shaderface;
321   batch->program = program;
322   batch->vao_id = batch_vao_get(batch);
323 }
324
325 void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
326 {
327   GPU_batch_program_set_no_use(batch, program, shaderface);
328   GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
329 }
330
331 void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
332 {
333   if (batch->is_dynamic_vao_count) {
334     for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
335       if (batch->dynamic_vaos.interfaces[i] == interface) {
336         GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
337         batch->dynamic_vaos.vao_ids[i] = 0;
338         batch->dynamic_vaos.interfaces[i] = NULL;
339         break; /* cannot have duplicates */
340       }
341     }
342   }
343   else {
344     int i;
345     for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
346       if (batch->static_vaos.interfaces[i] == interface) {
347         GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
348         batch->static_vaos.vao_ids[i] = 0;
349         batch->static_vaos.interfaces[i] = NULL;
350         break; /* cannot have duplicates */
351       }
352     }
353   }
354 }
355
356 static void create_bindings(GPUVertBuf *verts,
357                             const GPUShaderInterface *interface,
358                             uint v_first,
359                             const bool use_instancing)
360 {
361   const GPUVertFormat *format = &verts->format;
362
363   const uint attr_len = format->attr_len;
364   const uint stride = format->stride;
365
366   GPU_vertbuf_use(verts);
367
368   for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
369     const GPUVertAttr *a = &format->attrs[a_idx];
370     const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
371
372     for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
373       const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
374       const GPUShaderInput *input = GPU_shaderinterface_attr(interface, name);
375
376       if (input == NULL) {
377         continue;
378       }
379
380       if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
381 #if TRUST_NO_ONE
382         assert(a->fetch_mode == GPU_FETCH_FLOAT);
383         assert(a->gl_comp_type == GL_FLOAT);
384 #endif
385         for (int i = 0; i < a->comp_len / 4; ++i) {
386           glEnableVertexAttribArray(input->location + i);
387           glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
388           glVertexAttribPointer(input->location + i,
389                                 4,
390                                 a->gl_comp_type,
391                                 GL_FALSE,
392                                 stride,
393                                 (const GLubyte *)pointer + i * 16);
394         }
395       }
396       else {
397         glEnableVertexAttribArray(input->location);
398         glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
399
400         switch (a->fetch_mode) {
401           case GPU_FETCH_FLOAT:
402           case GPU_FETCH_INT_TO_FLOAT:
403             glVertexAttribPointer(
404                 input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
405             break;
406           case GPU_FETCH_INT_TO_FLOAT_UNIT:
407             glVertexAttribPointer(
408                 input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
409             break;
410           case GPU_FETCH_INT:
411             glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
412             break;
413         }
414       }
415     }
416   }
417 }
418
419 static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
420 {
421   for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
422     create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
423   }
424   if (batch->inst) {
425     create_bindings(batch->inst, batch->interface, v_first, true);
426   }
427   if (batch->elem) {
428     GPU_indexbuf_use(batch->elem);
429   }
430 }
431
432 void GPU_batch_program_use_begin(GPUBatch *batch)
433 {
434   /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
435    *       the GL context's active program.
436    *       use_program doesn't mark other programs as "not used". */
437   /* TODO: make not fragile (somehow) */
438
439   if (!batch->program_in_use) {
440     glUseProgram(batch->program);
441     batch->program_in_use = true;
442   }
443 }
444
445 void GPU_batch_program_use_end(GPUBatch *batch)
446 {
447   if (batch->program_in_use) {
448 #if PROGRAM_NO_OPTI
449     glUseProgram(0);
450 #endif
451     batch->program_in_use = false;
452   }
453 }
454
455 #if TRUST_NO_ONE
456 #  define GET_UNIFORM \
457     const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name); \
458     assert(uniform);
459 #else
460 #  define GET_UNIFORM \
461     const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name);
462 #endif
463
464 void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, uint value)
465 {
466   GET_UNIFORM
467   glUniform1ui(uniform->location, value);
468 }
469
470 void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
471 {
472   GET_UNIFORM
473   glUniform1i(uniform->location, value);
474 }
475
476 void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
477 {
478   GET_UNIFORM
479   glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
480 }
481
482 void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
483 {
484   GET_UNIFORM
485   glUniform2f(uniform->location, x, y);
486 }
487
488 void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
489 {
490   GET_UNIFORM
491   glUniform3f(uniform->location, x, y, z);
492 }
493
494 void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
495 {
496   GET_UNIFORM
497   glUniform4f(uniform->location, x, y, z, w);
498 }
499
500 void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
501 {
502   GET_UNIFORM
503   glUniform1f(uniform->location, x);
504 }
505
506 void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
507 {
508   GET_UNIFORM
509   glUniform2fv(uniform->location, 1, data);
510 }
511
512 void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
513 {
514   GET_UNIFORM
515   glUniform3fv(uniform->location, 1, data);
516 }
517
518 void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
519 {
520   GET_UNIFORM
521   glUniform4fv(uniform->location, 1, data);
522 }
523
524 void GPU_batch_uniform_2fv_array(GPUBatch *batch,
525                                  const char *name,
526                                  const int len,
527                                  const float *data)
528 {
529   GET_UNIFORM
530   glUniform2fv(uniform->location, len, data);
531 }
532
533 void GPU_batch_uniform_4fv_array(GPUBatch *batch,
534                                  const char *name,
535                                  const int len,
536                                  const float *data)
537 {
538   GET_UNIFORM
539   glUniform4fv(uniform->location, len, data);
540 }
541
542 void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
543 {
544   GET_UNIFORM
545   glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
546 }
547
548 static void *elem_offset(const GPUIndexBuf *el, int v_first)
549 {
550 #if GPU_TRACK_INDEX_RANGE
551   if (el->index_type == GPU_INDEX_U16) {
552     return (GLushort *)0 + v_first;
553   }
554 #endif
555   return (GLuint *)0 + v_first;
556 }
557
558 /* Use when drawing with GPU_batch_draw_advanced */
559 void GPU_batch_bind(GPUBatch *batch)
560 {
561   glBindVertexArray(batch->vao_id);
562
563 #if GPU_TRACK_INDEX_RANGE
564   /* Can be removed if GL 4.3 is required. */
565   if (!GLEW_ARB_ES3_compatibility && batch->elem != NULL) {
566     GLuint restart_index = (batch->elem->index_type == GPU_INDEX_U16) ? (GLuint)0xFFFF :
567                                                                         (GLuint)0xFFFFFFFF;
568     glPrimitiveRestartIndex(restart_index);
569   }
570 #endif
571 }
572
573 void GPU_batch_draw(GPUBatch *batch)
574 {
575 #if TRUST_NO_ONE
576   assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
577   assert(batch->verts[0]->vbo_id != 0);
578 #endif
579   GPU_batch_program_use_begin(batch);
580   GPU_matrix_bind(batch->interface);  // external call.
581
582   GPU_batch_bind(batch);
583   GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
584
585   GPU_batch_program_use_end(batch);
586 }
587
588 void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count)
589 {
590 #if TRUST_NO_ONE
591   BLI_assert(batch->program_in_use);
592   /* TODO could assert that VAO is bound. */
593 #endif
594
595   if (v_count == 0) {
596     v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
597   }
598   if (i_count == 0) {
599     i_count = (batch->inst) ? batch->inst->vertex_len : 1;
600   }
601
602   if (!GLEW_ARB_base_instance) {
603     if (i_first > 0 && i_count > 0) {
604       /* If using offset drawing with instancing, we must
605        * use the default VAO and redo bindings. */
606       glBindVertexArray(GPU_vao_default());
607       batch_update_program_bindings(batch, i_first);
608     }
609     else {
610       /* Previous call could have bind the default vao
611        * see above. */
612       glBindVertexArray(batch->vao_id);
613     }
614   }
615
616   if (batch->elem) {
617     const GPUIndexBuf *el = batch->elem;
618 #if GPU_TRACK_INDEX_RANGE
619     GLenum index_type = el->gl_index_type;
620     GLint base_index = el->base_index;
621 #else
622     GLenum index_type = GL_UNSIGNED_INT;
623     GLint base_index = 0;
624 #endif
625     void *v_first_ofs = elem_offset(el, v_first);
626
627     if (GLEW_ARB_base_instance) {
628       glDrawElementsInstancedBaseVertexBaseInstance(
629           batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index, i_first);
630     }
631     else {
632       glDrawElementsInstancedBaseVertex(
633           batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index);
634     }
635   }
636   else {
637 #ifdef __APPLE__
638     glDisable(GL_PRIMITIVE_RESTART);
639 #endif
640     if (GLEW_ARB_base_instance) {
641       glDrawArraysInstancedBaseInstance(batch->gl_prim_type, v_first, v_count, i_count, i_first);
642     }
643     else {
644       glDrawArraysInstanced(batch->gl_prim_type, v_first, v_count, i_count);
645     }
646 #ifdef __APPLE__
647     glEnable(GL_PRIMITIVE_RESTART);
648 #endif
649   }
650 }
651
652 /* just draw some vertices and let shader place them where we want. */
653 void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
654 {
655   /* we cannot draw without vao ... annoying ... */
656   glBindVertexArray(GPU_vao_default());
657
658   GLenum type = convert_prim_type_to_gl(prim_type);
659   glDrawArrays(type, 0, v_count);
660
661   /* Performance hog if you are drawing with the same vao multiple time.
662    * Only activate for debugging.*/
663   // glBindVertexArray(0);
664 }
665
666 /* -------------------------------------------------------------------- */
667 /** \name Utilities
668  * \{ */
669
670 void GPU_batch_program_set_shader(GPUBatch *batch, GPUShader *shader)
671 {
672   GPU_batch_program_set(batch, shader->program, shader->interface);
673 }
674
675 void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
676                                                eGPUBuiltinShader shader_id,
677                                                eGPUShaderConfig sh_cfg)
678 {
679   GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
680   GPU_batch_program_set(batch, shader->program, shader->interface);
681 }
682
683 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
684 {
685   GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
686 }
687
688 /** \} */
689
690 /* -------------------------------------------------------------------- */
691 /** \name Init/Exit
692  * \{ */
693
694 void gpu_batch_init(void)
695 {
696   gpu_batch_presets_init();
697 }
698
699 void gpu_batch_exit(void)
700 {
701   gpu_batch_presets_exit();
702 }
703
704 /** \} */