2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software Foundation,
14 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 * The Original Code is Copyright (C) 2016 by Mike Erwin.
17 * All rights reserved.
20 /** \file \ingroup gpu
23 * Contains VAOs + VBOs + Shader representing a drawable entity.
26 #ifndef __GPU_BATCH_H__
27 #define __GPU_BATCH_H__
29 #include "GPU_vertex_buffer.h"
30 #include "GPU_element.h"
31 #include "GPU_shader_interface.h"
32 #include "GPU_shader.h"
35 GPU_BATCH_READY_TO_FORMAT,
36 GPU_BATCH_READY_TO_BUILD,
38 GPU_BATCH_READY_TO_DRAW
41 #define GPU_BATCH_VBO_MAX_LEN 3
42 #define GPU_BATCH_VAO_STATIC_LEN 3
43 #define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
45 typedef struct GPUBatch {
48 /** verts[0] is required, others can be NULL */
49 GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
50 /** Instance attributes. */
52 /** NULL if element list not needed */
54 uint32_t gl_prim_type;
56 /* cached values (avoid dereferencing later) */
59 const struct GPUShaderInterface *interface;
63 /** used to free all vaos. this implies all vaos were created under the same context. */
64 struct GPUContext *context;
68 /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
69 * for each shader interface. Start with a static number of vaos and fallback to dynamic count
70 * if necessary. Once a batch goes dynamic it does not go back. */
71 bool is_dynamic_vao_count;
73 /** Static handle count */
75 const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
76 uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
78 /** Dynamic handle count */
81 const struct GPUShaderInterface **interfaces;
86 /* XXX This is the only solution if we want to have some data structure using
87 * batches as key to identify nodes. We must destroy these nodes with this callback. */
88 void (*free_callback)(struct GPUBatch *, void *);
93 GPU_BATCH_OWNS_VBO = (1 << 0),
94 /* each vbo index gets bit-shifted */
95 GPU_BATCH_OWNS_INSTANCES = (1 << 30),
96 GPU_BATCH_OWNS_INDEX = (1u << 31u),
99 GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
100 void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
101 void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
103 #define GPU_batch_create(prim, verts, elem) \
104 GPU_batch_create_ex(prim, verts, elem, 0)
105 #define GPU_batch_init(batch, prim, verts, elem) \
106 GPU_batch_init_ex(batch, prim, verts, elem, 0)
108 void GPU_batch_clear(GPUBatch *); /* Same as discard but does not free. */
109 void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
111 void GPU_batch_vao_cache_clear(GPUBatch *);
113 void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *);
115 void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
117 int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
119 #define GPU_batch_vertbuf_add(batch, verts) \
120 GPU_batch_vertbuf_add_ex(batch, verts, false)
122 void GPU_batch_program_set_no_use(GPUBatch *, uint32_t program, const GPUShaderInterface *);
123 void GPU_batch_program_set(GPUBatch *, uint32_t program, const GPUShaderInterface *);
124 void GPU_batch_program_set_shader(GPUBatch *, GPUShader *shader);
125 void GPU_batch_program_set_builtin(
126 GPUBatch *batch, eGPUBuiltinShader shader_id);
127 void GPU_batch_program_set_builtin_with_config(
128 GPUBatch *batch, eGPUBuiltinShader shader_id, eGPUShaderConfig sh_cfg);
129 /* Entire batch draws with one shader program, but can be redrawn later with another program. */
130 /* Vertex shader's inputs must be compatible with the batch's vertex format. */
132 void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
133 void GPU_batch_program_use_end(GPUBatch *);
135 void GPU_batch_uniform_1ui(GPUBatch *, const char *name, int value);
136 void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
137 void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value);
138 void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value);
139 void GPU_batch_uniform_2f(GPUBatch *, const char *name, float x, float y);
140 void GPU_batch_uniform_3f(GPUBatch *, const char *name, float x, float y, float z);
141 void GPU_batch_uniform_4f(GPUBatch *, const char *name, float x, float y, float z, float w);
142 void GPU_batch_uniform_2fv(GPUBatch *, const char *name, const float data[2]);
143 void GPU_batch_uniform_3fv(GPUBatch *, const char *name, const float data[3]);
144 void GPU_batch_uniform_4fv(GPUBatch *, const char *name, const float data[4]);
145 void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, int len, const float *data);
146 void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, int len, const float *data);
147 void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]);
149 void GPU_batch_draw(GPUBatch *);
151 /* This does not bind/unbind shader and does not call GPU_matrix_bind() */
152 void GPU_batch_draw_range_ex(GPUBatch *, int v_first, int v_count, bool force_instance);
154 /* Does not even need batch */
155 void GPU_draw_primitive(GPUPrimType, int v_count);
157 #if 0 /* future plans */
159 /* Can multiple batches share a GPUVertBuf? Use ref count? */
162 /* We often need a batch with its own data, to be created and discarded together. */
163 /* WithOwn variants reduce number of system allocations. */
165 typedef struct BatchWithOwnVertexBuffer {
167 GPUVertBuf verts; /* link batch.verts to this */
168 } BatchWithOwnVertexBuffer;
170 typedef struct BatchWithOwnElementList {
172 GPUIndexBuf elem; /* link batch.elem to this */
173 } BatchWithOwnElementList;
175 typedef struct BatchWithOwnVertexBufferAndElementList {
177 GPUIndexBuf elem; /* link batch.elem to this */
178 GPUVertBuf verts; /* link batch.verts to this */
179 } BatchWithOwnVertexBufferAndElementList;
181 GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
182 GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
183 GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, GPUVertFormat *, uint v_len, uint prim_len);
184 /* verts: shared, own */
185 /* elem: none, shared, own */
186 GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
188 #endif /* future plans */
190 void gpu_batch_init(void);
191 void gpu_batch_exit(void);
195 #define GPU_BATCH_DISCARD_SAFE(batch) do { \
196 if (batch != NULL) { \
197 GPU_batch_discard(batch); \
202 #define GPU_BATCH_CLEAR_SAFE(batch) do { \
203 if (batch != NULL) { \
204 GPU_batch_clear(batch); \
205 memset(batch, 0, sizeof(*(batch))); \
209 #define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) do { \
210 if (_batch_array != NULL) { \
211 BLI_assert(_len > 0); \
212 for (int _i = 0; _i < _len; _i++) { \
213 GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
215 MEM_freeN(_batch_array); \
219 #endif /* __GPU_BATCH_H__ */