Merge branch 'blender2.7'
[blender.git] / source / blender / gpu / intern / gpu_batch.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2016 by Mike Erwin.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup gpu
22  *
23  * GPU geometry batch
24  * Contains VAOs + VBOs + Shader representing a drawable entity.
25  */
26
27 #include "MEM_guardedalloc.h"
28
29 #include "GPU_batch.h"
30 #include "GPU_batch_presets.h"
31 #include "GPU_matrix.h"
32 #include "GPU_shader.h"
33
34 #include "gpu_batch_private.h"
35 #include "gpu_context_private.h"
36 #include "gpu_primitive_private.h"
37 #include "gpu_shader_private.h"
38
39 #include <stdlib.h>
40 #include <string.h>
41
42 static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
43
44 void GPU_batch_vao_cache_clear(GPUBatch *batch)
45 {
46         if (batch->context == NULL) {
47                 return;
48         }
49         if (batch->is_dynamic_vao_count) {
50                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
51                         if (batch->dynamic_vaos.vao_ids[i]) {
52                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
53                         }
54                         if (batch->dynamic_vaos.interfaces[i]) {
55                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
56                         }
57                 }
58                 MEM_freeN(batch->dynamic_vaos.interfaces);
59                 MEM_freeN(batch->dynamic_vaos.vao_ids);
60         }
61         else {
62                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
63                         if (batch->static_vaos.vao_ids[i]) {
64                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
65                         }
66                         if (batch->static_vaos.interfaces[i]) {
67                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
68                         }
69                 }
70         }
71         batch->is_dynamic_vao_count = false;
72         for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
73                 batch->static_vaos.vao_ids[i] = 0;
74                 batch->static_vaos.interfaces[i] = NULL;
75         }
76         gpu_context_remove_batch(batch->context, batch);
77         batch->context = NULL;
78 }
79
80 GPUBatch *GPU_batch_create_ex(
81         GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
82         uint owns_flag)
83 {
84         GPUBatch *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
85         GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
86         return batch;
87 }
88
89 void GPU_batch_init_ex(
90         GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
91         uint owns_flag)
92 {
93 #if TRUST_NO_ONE
94         assert(verts != NULL);
95 #endif
96
97         batch->verts[0] = verts;
98         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
99                 batch->verts[v] = NULL;
100         }
101         batch->inst = NULL;
102         batch->elem = elem;
103         batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
104         batch->phase = GPU_BATCH_READY_TO_DRAW;
105         batch->is_dynamic_vao_count = false;
106         batch->owns_flag = owns_flag;
107         batch->free_callback = NULL;
108 }
109
110 /* This will share the VBOs with the new batch. */
111 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
112 {
113         GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
114
115         batch_dst->gl_prim_type = batch_src->gl_prim_type;
116         for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
117                 batch_dst->verts[v] = batch_src->verts[v];
118         }
119 }
120
121 void GPU_batch_clear(GPUBatch *batch)
122 {
123         if (batch->free_callback) {
124                 batch->free_callback(batch, batch->callback_data);
125         }
126
127         if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
128                 GPU_indexbuf_discard(batch->elem);
129         }
130         if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
131                 GPU_vertbuf_discard(batch->inst);
132         }
133         if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
134                 for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
135                         if (batch->verts[v] == NULL) {
136                                 break;
137                         }
138                         if (batch->owns_flag & (1 << v)) {
139                                 GPU_vertbuf_discard(batch->verts[v]);
140                         }
141                 }
142         }
143         GPU_batch_vao_cache_clear(batch);
144 }
145
146 void GPU_batch_discard(GPUBatch *batch)
147 {
148         GPU_batch_clear(batch);
149         MEM_freeN(batch);
150 }
151
152 void GPU_batch_callback_free_set(GPUBatch *batch, void (*callback)(GPUBatch *, void *), void *user_data)
153 {
154         batch->free_callback = callback;
155         batch->callback_data = user_data;
156 }
157
158 void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
159 {
160 #if TRUST_NO_ONE
161         assert(inst != NULL);
162 #endif
163         /* redo the bindings */
164         GPU_batch_vao_cache_clear(batch);
165
166         if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
167                 GPU_vertbuf_discard(batch->inst);
168         }
169         batch->inst = inst;
170
171         if (own_vbo) {
172                 batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
173         }
174         else {
175                 batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
176         }
177 }
178
179 /* Returns the index of verts in the batch. */
180 int GPU_batch_vertbuf_add_ex(
181         GPUBatch *batch, GPUVertBuf *verts,
182         bool own_vbo)
183 {
184         /* redo the bindings */
185         GPU_batch_vao_cache_clear(batch);
186
187         for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
188                 if (batch->verts[v] == NULL) {
189 #if TRUST_NO_ONE
190                         /* for now all VertexBuffers must have same vertex_len */
191                         assert(verts->vertex_len == batch->verts[0]->vertex_len);
192 #endif
193                         batch->verts[v] = verts;
194                         /* TODO: mark dirty so we can keep attribute bindings up-to-date */
195                         if (own_vbo)
196                                 batch->owns_flag |= (1 << v);
197                         return v;
198                 }
199         }
200
201         /* we only make it this far if there is no room for another GPUVertBuf */
202 #if TRUST_NO_ONE
203         assert(false);
204 #endif
205         return -1;
206 }
207
208 static GLuint batch_vao_get(GPUBatch *batch)
209 {
210         /* Search through cache */
211         if (batch->is_dynamic_vao_count) {
212                 for (int i = 0; i < batch->dynamic_vaos.count; ++i)
213                         if (batch->dynamic_vaos.interfaces[i] == batch->interface)
214                                 return batch->dynamic_vaos.vao_ids[i];
215         }
216         else {
217                 for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
218                         if (batch->static_vaos.interfaces[i] == batch->interface)
219                                 return batch->static_vaos.vao_ids[i];
220         }
221
222         /* Set context of this batch.
223          * It will be bound to it until GPU_batch_vao_cache_clear is called.
224          * Until then it can only be drawn with this context. */
225         if (batch->context == NULL) {
226                 batch->context = GPU_context_active_get();
227                 gpu_context_add_batch(batch->context, batch);
228         }
229 #if TRUST_NO_ONE
230         else {
231                 /* Make sure you are not trying to draw this batch in another context. */
232                 assert(batch->context == GPU_context_active_get());
233         }
234 #endif
235
236         /* Cache miss, time to add a new entry! */
237         GLuint new_vao = 0;
238         if (!batch->is_dynamic_vao_count) {
239                 int i; /* find first unused slot */
240                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
241                         if (batch->static_vaos.vao_ids[i] == 0)
242                                 break;
243
244                 if (i < GPU_BATCH_VAO_STATIC_LEN) {
245                         batch->static_vaos.interfaces[i] = batch->interface;
246                         batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
247                 }
248                 else {
249                         /* Not enough place switch to dynamic. */
250                         batch->is_dynamic_vao_count = true;
251                         /* Erase previous entries, they will be added back if drawn again. */
252                         for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
253                                 GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
254                                 GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
255                         }
256                         /* Init dynamic arrays and let the branch below set the values. */
257                         batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
258                         batch->dynamic_vaos.interfaces = MEM_callocN(batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
259                         batch->dynamic_vaos.vao_ids = MEM_callocN(batch->dynamic_vaos.count * sizeof(GLuint), "dyn vaos ids");
260                 }
261         }
262
263         if (batch->is_dynamic_vao_count) {
264                 int i; /* find first unused slot */
265                 for (i = 0; i < batch->dynamic_vaos.count; ++i)
266                         if (batch->dynamic_vaos.vao_ids[i] == 0)
267                                 break;
268
269                 if (i == batch->dynamic_vaos.count) {
270                         /* Not enough place, realloc the array. */
271                         i = batch->dynamic_vaos.count;
272                         batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
273                         batch->dynamic_vaos.interfaces = MEM_recallocN(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface *) * batch->dynamic_vaos.count);
274                         batch->dynamic_vaos.vao_ids = MEM_recallocN(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
275                 }
276                 batch->dynamic_vaos.interfaces[i] = batch->interface;
277                 batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
278         }
279
280         GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
281
282 #if TRUST_NO_ONE
283         assert(new_vao != 0);
284 #endif
285
286         /* We just got a fresh VAO we need to initialize it. */
287         glBindVertexArray(new_vao);
288         batch_update_program_bindings(batch, 0);
289         glBindVertexArray(0);
290
291         return new_vao;
292 }
293
294 void GPU_batch_program_set_no_use(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
295 {
296 #if TRUST_NO_ONE
297         assert(glIsProgram(shaderface->program));
298         assert(batch->program_in_use == 0);
299 #endif
300         batch->interface = shaderface;
301         batch->program = program;
302         batch->vao_id = batch_vao_get(batch);
303 }
304
305 void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
306 {
307         GPU_batch_program_set_no_use(batch, program, shaderface);
308         GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
309 }
310
311 void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
312 {
313         if (batch->is_dynamic_vao_count) {
314                 for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
315                         if (batch->dynamic_vaos.interfaces[i] == interface) {
316                                 GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
317                                 batch->dynamic_vaos.vao_ids[i] = 0;
318                                 batch->dynamic_vaos.interfaces[i] = NULL;
319                                 break; /* cannot have duplicates */
320                         }
321                 }
322         }
323         else {
324                 int i;
325                 for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
326                         if (batch->static_vaos.interfaces[i] == interface) {
327                                 GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
328                                 batch->static_vaos.vao_ids[i] = 0;
329                                 batch->static_vaos.interfaces[i] = NULL;
330                                 break; /* cannot have duplicates */
331                         }
332                 }
333         }
334 }
335
336 static void create_bindings(
337         GPUVertBuf *verts, const GPUShaderInterface *interface,
338         uint v_first, const bool use_instancing)
339 {
340         const GPUVertFormat *format = &verts->format;
341
342         const uint attr_len = format->attr_len;
343         const uint stride = format->stride;
344
345         GPU_vertbuf_use(verts);
346
347         for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
348                 const GPUVertAttr *a = &format->attrs[a_idx];
349                 const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
350
351                 for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
352                         const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
353
354                         if (input == NULL) continue;
355
356                         if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
357 #if TRUST_NO_ONE
358                                 assert(a->fetch_mode == GPU_FETCH_FLOAT);
359                                 assert(a->gl_comp_type == GL_FLOAT);
360 #endif
361                                 for (int i = 0; i < a->comp_len / 4; ++i) {
362                                         glEnableVertexAttribArray(input->location + i);
363                                         glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
364                                         glVertexAttribPointer(input->location + i, 4, a->gl_comp_type, GL_FALSE, stride,
365                                                               (const GLubyte *)pointer + i * 16);
366                                 }
367                         }
368                         else {
369                                 glEnableVertexAttribArray(input->location);
370                                 glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
371
372                                 switch (a->fetch_mode) {
373                                         case GPU_FETCH_FLOAT:
374                                         case GPU_FETCH_INT_TO_FLOAT:
375                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
376                                                 break;
377                                         case GPU_FETCH_INT_TO_FLOAT_UNIT:
378                                                 glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
379                                                 break;
380                                         case GPU_FETCH_INT:
381                                                 glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
382                                                 break;
383                                 }
384                         }
385                 }
386         }
387 }
388
389 static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
390 {
391         for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
392                 create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
393         }
394         if (batch->inst) {
395                 create_bindings(batch->inst, batch->interface, v_first, true);
396         }
397         if (batch->elem) {
398                 GPU_indexbuf_use(batch->elem);
399         }
400 }
401
402 void GPU_batch_program_use_begin(GPUBatch *batch)
403 {
404         /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
405          *       the GL context's active program. use_program doesn't mark other programs as "not used". */
406         /* TODO: make not fragile (somehow) */
407
408         if (!batch->program_in_use) {
409                 glUseProgram(batch->program);
410                 batch->program_in_use = true;
411         }
412 }
413
414 void GPU_batch_program_use_end(GPUBatch *batch)
415 {
416         if (batch->program_in_use) {
417 #if PROGRAM_NO_OPTI
418                 glUseProgram(0);
419 #endif
420                 batch->program_in_use = false;
421         }
422 }
423
424 #if TRUST_NO_ONE
425 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name); assert(uniform);
426 #else
427 #  define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform_ensure(batch->interface, name);
428 #endif
429
430 void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, int value)
431 {
432         GET_UNIFORM
433         glUniform1ui(uniform->location, value);
434 }
435
436 void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
437 {
438         GET_UNIFORM
439         glUniform1i(uniform->location, value);
440 }
441
442 void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
443 {
444         GET_UNIFORM
445         glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
446 }
447
448 void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
449 {
450         GET_UNIFORM
451         glUniform2f(uniform->location, x, y);
452 }
453
454 void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
455 {
456         GET_UNIFORM
457         glUniform3f(uniform->location, x, y, z);
458 }
459
460 void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
461 {
462         GET_UNIFORM
463         glUniform4f(uniform->location, x, y, z, w);
464 }
465
466 void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
467 {
468         GET_UNIFORM
469         glUniform1f(uniform->location, x);
470 }
471
472 void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
473 {
474         GET_UNIFORM
475         glUniform2fv(uniform->location, 1, data);
476 }
477
478 void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
479 {
480         GET_UNIFORM
481         glUniform3fv(uniform->location, 1, data);
482 }
483
484 void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
485 {
486         GET_UNIFORM
487         glUniform4fv(uniform->location, 1, data);
488 }
489
490 void GPU_batch_uniform_2fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
491 {
492         GET_UNIFORM
493         glUniform2fv(uniform->location, len, data);
494 }
495
496 void GPU_batch_uniform_4fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
497 {
498         GET_UNIFORM
499         glUniform4fv(uniform->location, len, data);
500 }
501
502 void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
503 {
504         GET_UNIFORM
505         glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
506 }
507
508 static void primitive_restart_enable(const GPUIndexBuf *el)
509 {
510         // TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
511         glEnable(GL_PRIMITIVE_RESTART);
512         GLuint restart_index = (GLuint)0xFFFFFFFF;
513
514 #if GPU_TRACK_INDEX_RANGE
515         if (el->index_type == GPU_INDEX_U8)
516                 restart_index = (GLuint)0xFF;
517         else if (el->index_type == GPU_INDEX_U16)
518                 restart_index = (GLuint)0xFFFF;
519 #endif
520
521         glPrimitiveRestartIndex(restart_index);
522 }
523
524 static void primitive_restart_disable(void)
525 {
526         glDisable(GL_PRIMITIVE_RESTART);
527 }
528
529 static void *elem_offset(const GPUIndexBuf *el, int v_first)
530 {
531 #if GPU_TRACK_INDEX_RANGE
532         if (el->index_type == GPU_INDEX_U8)
533                 return (GLubyte *)0 + v_first;
534         else if (el->index_type == GPU_INDEX_U16)
535                 return (GLushort *)0 + v_first;
536         else
537 #endif
538                 return (GLuint *)0 + v_first;
539 }
540
541 void GPU_batch_draw(GPUBatch *batch)
542 {
543 #if TRUST_NO_ONE
544         assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
545         assert(batch->verts[0]->vbo_id != 0);
546 #endif
547         GPU_batch_program_use_begin(batch);
548         GPU_matrix_bind(batch->interface); // external call.
549
550         GPU_batch_draw_range_ex(batch, 0, 0, false);
551
552         GPU_batch_program_use_end(batch);
553 }
554
555 void GPU_batch_draw_range_ex(GPUBatch *batch, int v_first, int v_count, bool force_instance)
556 {
557 #if TRUST_NO_ONE
558         assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
559 #endif
560
561         const bool do_instance = (force_instance || batch->inst);
562
563         // If using offset drawing, use the default VAO and redo bindings.
564         if (v_first != 0 && do_instance) {
565                 glBindVertexArray(GPU_vao_default());
566                 batch_update_program_bindings(batch, v_first);
567         }
568         else {
569                 glBindVertexArray(batch->vao_id);
570         }
571
572         if (do_instance) {
573                 /* Infer length if vertex count is not given */
574                 if (v_count == 0) {
575                         v_count = batch->inst->vertex_len;
576                 }
577
578                 if (batch->elem) {
579                         const GPUIndexBuf *el = batch->elem;
580
581                         if (el->use_prim_restart) {
582                                 primitive_restart_enable(el);
583                         }
584 #if GPU_TRACK_INDEX_RANGE
585                         glDrawElementsInstancedBaseVertex(batch->gl_prim_type,
586                                                           el->index_len,
587                                                           el->gl_index_type,
588                                                           0,
589                                                           v_count,
590                                                           el->base_index);
591 #else
592                         glDrawElementsInstanced(batch->gl_prim_type, el->index_len, GL_UNSIGNED_INT, 0, v_count);
593 #endif
594                         if (el->use_prim_restart) {
595                                 primitive_restart_disable();
596                         }
597                 }
598                 else {
599                         glDrawArraysInstanced(batch->gl_prim_type, 0, batch->verts[0]->vertex_len, v_count);
600                 }
601         }
602         else {
603                 /* Infer length if vertex count is not given */
604                 if (v_count == 0) {
605                         v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
606                 }
607
608                 if (batch->elem) {
609                         const GPUIndexBuf *el = batch->elem;
610
611                         if (el->use_prim_restart) {
612                                 primitive_restart_enable(el);
613                         }
614
615                         void *v_first_ofs = elem_offset(el, v_first);
616
617 #if GPU_TRACK_INDEX_RANGE
618                         if (el->base_index) {
619                                 glDrawRangeElementsBaseVertex(
620                                         batch->gl_prim_type,
621                                         el->min_index,
622                                         el->max_index,
623                                         v_count,
624                                         el->gl_index_type,
625                                         v_first_ofs,
626                                         el->base_index);
627                         }
628                         else {
629                                 glDrawRangeElements(batch->gl_prim_type, el->min_index, el->max_index, v_count, el->gl_index_type, v_first_ofs);
630                         }
631 #else
632                         glDrawElements(batch->gl_prim_type, v_count, GL_UNSIGNED_INT, v_first_ofs);
633 #endif
634                         if (el->use_prim_restart) {
635                                 primitive_restart_disable();
636                         }
637                 }
638                 else {
639                         glDrawArrays(batch->gl_prim_type, v_first, v_count);
640                 }
641         }
642
643         /* Performance hog if you are drawing with the same vao multiple time.
644          * Only activate for debugging. */
645         // glBindVertexArray(0);
646 }
647
648 /* just draw some vertices and let shader place them where we want. */
649 void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
650 {
651         /* we cannot draw without vao ... annoying ... */
652         glBindVertexArray(GPU_vao_default());
653
654         GLenum type = convert_prim_type_to_gl(prim_type);
655         glDrawArrays(type, 0, v_count);
656
657         /* Performance hog if you are drawing with the same vao multiple time.
658          * Only activate for debugging.*/
659         // glBindVertexArray(0);
660 }
661
662
663 /* -------------------------------------------------------------------- */
664 /** \name Utilities
665  * \{ */
666
667 void GPU_batch_program_set_shader(GPUBatch *batch, GPUShader *shader)
668 {
669         GPU_batch_program_set(batch, shader->program, shader->interface);
670 }
671
672 void GPU_batch_program_set_builtin_with_config(
673         GPUBatch *batch, eGPUBuiltinShader shader_id, eGPUShaderConfig sh_cfg)
674 {
675         GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
676         GPU_batch_program_set(batch, shader->program, shader->interface);
677 }
678
679 void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
680 {
681         GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
682 }
683
684 /** \} */
685
686 /* -------------------------------------------------------------------- */
687 /** \name Init/Exit
688  * \{ */
689
690 void gpu_batch_init(void)
691 {
692         gpu_batch_presets_init();
693 }
694
695 void gpu_batch_exit(void)
696 {
697         gpu_batch_presets_exit();
698 }
699
700 /** \} */