2 * Copyright 2016, Blender Foundation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Contributor(s): Blender Institute
22 /** \file draw_cache.c
27 #include "DNA_scene_types.h"
28 #include "DNA_mesh_types.h"
29 #include "DNA_meta_types.h"
30 #include "DNA_curve_types.h"
31 #include "DNA_object_types.h"
32 #include "DNA_particle_types.h"
33 #include "DNA_modifier_types.h"
34 #include "DNA_lattice_types.h"
36 #include "UI_resources.h"
38 #include "BLI_utildefines.h"
40 #include "BLI_listbase.h"
42 #include "BKE_object.h"
43 #include "BKE_object_deform.h"
45 #include "GPU_batch.h"
46 #include "GPU_batch_presets.h"
47 #include "GPU_batch_utils.h"
49 #include "MEM_guardedalloc.h"
51 #include "draw_cache.h"
52 #include "draw_cache_impl.h"
53 #include "draw_manager.h"
55 /* Batch's only (free'd as an array) */
56 static struct DRWShapeCache {
57 GPUBatch *drw_single_vertice;
59 GPUBatch *drw_cursor_only_circle;
60 GPUBatch *drw_fullscreen_quad;
61 GPUBatch *drw_fullscreen_quad_texcoord;
65 GPUBatch *drw_screenspace_circle;
66 GPUBatch *drw_plain_axes;
67 GPUBatch *drw_single_arrow;
72 GPUBatch *drw_line_endpoints;
73 GPUBatch *drw_empty_cube;
74 GPUBatch *drw_empty_sphere;
75 GPUBatch *drw_empty_cylinder;
76 GPUBatch *drw_empty_capsule_body;
77 GPUBatch *drw_empty_capsule_cap;
78 GPUBatch *drw_empty_cone;
80 GPUBatch *drw_axis_names;
81 GPUBatch *drw_image_plane;
82 GPUBatch *drw_image_plane_wire;
83 GPUBatch *drw_field_wind;
84 GPUBatch *drw_field_force;
85 GPUBatch *drw_field_vortex;
86 GPUBatch *drw_field_tube_limit;
87 GPUBatch *drw_field_cone_limit;
89 GPUBatch *drw_lamp_shadows;
90 GPUBatch *drw_lamp_sunrays;
91 GPUBatch *drw_lamp_area_square;
92 GPUBatch *drw_lamp_area_disk;
93 GPUBatch *drw_lamp_hemi;
94 GPUBatch *drw_lamp_spot;
95 GPUBatch *drw_lamp_spot_volume;
96 GPUBatch *drw_lamp_spot_square;
97 GPUBatch *drw_lamp_spot_square_volume;
98 GPUBatch *drw_speaker;
99 GPUBatch *drw_lightprobe_cube;
100 GPUBatch *drw_lightprobe_planar;
101 GPUBatch *drw_lightprobe_grid;
102 GPUBatch *drw_bone_octahedral;
103 GPUBatch *drw_bone_octahedral_wire;
104 GPUBatch *drw_bone_box;
105 GPUBatch *drw_bone_box_wire;
106 GPUBatch *drw_bone_wire_wire;
107 GPUBatch *drw_bone_envelope;
108 GPUBatch *drw_bone_envelope_outline;
109 GPUBatch *drw_bone_point;
110 GPUBatch *drw_bone_point_wire;
111 GPUBatch *drw_bone_stick;
112 GPUBatch *drw_bone_arrows;
113 GPUBatch *drw_bone_dof_sphere;
114 GPUBatch *drw_bone_dof_lines;
115 GPUBatch *drw_camera;
116 GPUBatch *drw_camera_frame;
117 GPUBatch *drw_camera_tria;
118 GPUBatch *drw_camera_focus;
119 GPUBatch *drw_particle_cross;
120 GPUBatch *drw_particle_circle;
121 GPUBatch *drw_particle_axis;
122 GPUBatch *drw_gpencil_axes;
125 void DRW_shape_cache_free(void)
127 uint i = sizeof(SHC) / sizeof(GPUBatch *);
128 GPUBatch **batch = (GPUBatch **)&SHC;
130 GPU_BATCH_DISCARD_SAFE(*batch);
135 void DRW_shape_cache_reset(void)
137 uint i = sizeof(SHC) / sizeof(GPUBatch *);
138 GPUBatch **batch = (GPUBatch **)&SHC;
141 GPU_batch_vao_cache_clear(*batch);
147 /* -------------------------------------------------------------------- */
149 /** \name Helper functions
152 static void UNUSED_FUNCTION(add_fancy_edge)(
153 GPUVertBuf *vbo, uint pos_id, uint n1_id, uint n2_id,
154 uint *v_idx, const float co1[3], const float co2[3],
155 const float n1[3], const float n2[3])
157 GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
158 GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
159 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co1);
161 GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
162 GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
163 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co2);
167 static void add_lat_lon_vert(
168 GPUVertBuf *vbo, uint pos_id, uint nor_id,
169 uint *v_idx, const float rad, const float lat, const float lon)
171 float pos[3], nor[3];
172 nor[0] = sinf(lat) * cosf(lon);
174 nor[2] = sinf(lat) * sinf(lon);
175 mul_v3_v3fl(pos, nor, rad);
177 GPU_vertbuf_attr_set(vbo, nor_id, *v_idx, nor);
178 GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, pos);
181 static GPUVertBuf *fill_arrows_vbo(const float scale)
183 /* Position Only 3D format */
184 static GPUVertFormat format = { 0 };
185 static struct { uint pos; } attr_id;
186 if (format.attr_len == 0) {
187 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
191 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
192 GPU_vertbuf_data_alloc(vbo, 6 * 3);
194 float v1[3] = {0.0, 0.0, 0.0};
195 float v2[3] = {0.0, 0.0, 0.0};
196 float vtmp1[3], vtmp2[3];
198 for (int axis = 0; axis < 3; axis++) {
199 const int arrow_axis = (axis == 0) ? 1 : 0;
202 mul_v3_v3fl(vtmp1, v1, scale);
203 mul_v3_v3fl(vtmp2, v2, scale);
204 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 0, vtmp1);
205 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 1, vtmp2);
208 v1[arrow_axis] = -0.08f;
209 mul_v3_v3fl(vtmp1, v1, scale);
210 mul_v3_v3fl(vtmp2, v2, scale);
211 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 2, vtmp1);
212 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 3, vtmp2);
214 v1[arrow_axis] = 0.08f;
215 mul_v3_v3fl(vtmp1, v1, scale);
216 mul_v3_v3fl(vtmp2, v2, scale);
217 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 4, vtmp1);
218 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 5, vtmp2);
220 /* reset v1 & v2 to zero */
221 v1[arrow_axis] = v1[axis] = v2[axis] = 0.0f;
228 static GPUVertBuf *sphere_wire_vbo(const float rad)
231 /* Position Only 3D format */
232 static GPUVertFormat format = { 0 };
233 static struct { uint pos; } attr_id;
234 if (format.attr_len == 0) {
235 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
238 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
239 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2 * 3);
241 /* a single ring of vertices */
242 float p[NSEGMENTS][2];
243 for (int i = 0; i < NSEGMENTS; ++i) {
244 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
245 p[i][0] = rad * cosf(angle);
246 p[i][1] = rad * sinf(angle);
249 for (int axis = 0; axis < 3; ++axis) {
250 for (int i = 0; i < NSEGMENTS; ++i) {
251 for (int j = 0; j < 2; ++j) {
254 cv[0] = p[(i + j) % NSEGMENTS][0];
255 cv[1] = p[(i + j) % NSEGMENTS][1];
258 ARRAY_SET_ITEMS(v, cv[0], cv[1], 0.0f);
260 else if (axis == 1) {
261 ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
264 ARRAY_SET_ITEMS(v, 0.0f, cv[0], cv[1]);
266 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + j + (NSEGMENTS * 2 * axis), v);
276 /* Use this one for rendering fullscreen passes. For 3D objects use DRW_cache_quad_get(). */
277 GPUBatch *DRW_cache_fullscreen_quad_get(void)
279 if (!SHC.drw_fullscreen_quad) {
280 /* Use a triangle instead of a real quad */
281 /* https://www.slideshare.net/DevCentralAMD/vertex-shader-tricks-bill-bilodeau - slide 14 */
282 float pos[3][2] = {{-1.0f, -1.0f}, { 3.0f, -1.0f}, {-1.0f, 3.0f}};
283 float uvs[3][2] = {{ 0.0f, 0.0f}, { 2.0f, 0.0f}, { 0.0f, 2.0f}};
285 /* Position Only 2D format */
286 static GPUVertFormat format = { 0 };
287 static struct { uint pos, uvs; } attr_id;
288 if (format.attr_len == 0) {
289 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
290 attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
291 GPU_vertformat_alias_add(&format, "texCoord");
294 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
295 GPU_vertbuf_data_alloc(vbo, 3);
297 for (int i = 0; i < 3; ++i) {
298 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
299 GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
302 SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
304 return SHC.drw_fullscreen_quad;
307 /* Just a regular quad with 4 vertices. */
308 GPUBatch *DRW_cache_quad_get(void)
311 float pos[4][2] = {{-1.0f, -1.0f}, { 1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
312 float uvs[4][2] = {{ 0.0f, 0.0f}, { 1.0f, 0.0f}, {1.0f, 1.0f}, { 0.0f, 1.0f}};
314 /* Position Only 2D format */
315 static GPUVertFormat format = { 0 };
316 static struct { uint pos, uvs; } attr_id;
317 if (format.attr_len == 0) {
318 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
319 attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
322 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
323 GPU_vertbuf_data_alloc(vbo, 4);
325 for (int i = 0; i < 4; ++i) {
326 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
327 GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
330 SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
336 GPUBatch *DRW_cache_grid_get(void)
339 /* Position Only 2D format */
340 static GPUVertFormat format = { 0 };
341 static struct { uint pos; } attr_id;
342 if (format.attr_len == 0) {
343 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
346 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
347 GPU_vertbuf_data_alloc(vbo, 8 * 8 * 2 * 3);
350 for (int i = 0; i < 8; ++i) {
351 for (int j = 0; j < 8; ++j) {
352 float pos0[2] = {(float)i / 8.0f, (float)j / 8.0f};
353 float pos1[2] = {(float)(i + 1) / 8.0f, (float)j / 8.0f};
354 float pos2[2] = {(float)i / 8.0f, (float)(j + 1) / 8.0f};
355 float pos3[2] = {(float)(i + 1) / 8.0f, (float)(j + 1) / 8.0f};
357 madd_v2_v2v2fl(pos0, (float[2]){-1.0f, -1.0f}, pos0, 2.0f);
358 madd_v2_v2v2fl(pos1, (float[2]){-1.0f, -1.0f}, pos1, 2.0f);
359 madd_v2_v2v2fl(pos2, (float[2]){-1.0f, -1.0f}, pos2, 2.0f);
360 madd_v2_v2v2fl(pos3, (float[2]){-1.0f, -1.0f}, pos3, 2.0f);
362 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos0);
363 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
364 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
366 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
367 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
368 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos3);
372 SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
378 GPUBatch *DRW_cache_sphere_get(void)
380 if (!SHC.drw_sphere) {
381 SHC.drw_sphere = gpu_batch_sphere(32, 24);
383 return SHC.drw_sphere;
388 /* -------------------------------------------------------------------- */
393 GPUBatch *DRW_cache_cube_get(void)
396 const GLfloat verts[8][3] = {
397 {-1.0f, -1.0f, -1.0f},
398 {-1.0f, -1.0f, 1.0f},
399 {-1.0f, 1.0f, -1.0f},
401 { 1.0f, -1.0f, -1.0f},
402 { 1.0f, -1.0f, 1.0f},
403 { 1.0f, 1.0f, -1.0f},
407 const uint indices[36] = {
422 /* Position Only 3D format */
423 static GPUVertFormat format = { 0 };
424 static struct { uint pos; } attr_id;
425 if (format.attr_len == 0) {
426 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
429 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
430 GPU_vertbuf_data_alloc(vbo, 36);
432 for (int i = 0; i < 36; ++i) {
433 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
436 SHC.drw_cube = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
441 GPUBatch *DRW_cache_empty_cube_get(void)
443 if (!SHC.drw_empty_cube) {
444 const GLfloat verts[8][3] = {
445 {-1.0f, -1.0f, -1.0f},
446 {-1.0f, -1.0f, 1.0f},
447 {-1.0f, 1.0f, -1.0f},
449 { 1.0f, -1.0f, -1.0f},
450 { 1.0f, -1.0f, 1.0f},
451 { 1.0f, 1.0f, -1.0f},
455 const GLubyte indices[24] = {0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5, 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6};
457 /* Position Only 3D format */
458 static GPUVertFormat format = { 0 };
459 static struct { uint pos; } attr_id;
460 if (format.attr_len == 0) {
461 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
464 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
465 GPU_vertbuf_data_alloc(vbo, 24);
467 for (int i = 0; i < 24; ++i) {
468 GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
471 SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
473 return SHC.drw_empty_cube;
476 GPUBatch *DRW_cache_circle_get(void)
478 #define CIRCLE_RESOL 64
479 if (!SHC.drw_circle) {
480 float v[3] = {0.0f, 0.0f, 0.0f};
482 /* Position Only 3D format */
483 static GPUVertFormat format = { 0 };
484 static struct { uint pos; } attr_id;
485 if (format.attr_len == 0) {
486 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
489 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
490 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
492 for (int a = 0; a < CIRCLE_RESOL; a++) {
493 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
494 v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
496 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
499 SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
501 return SHC.drw_circle;
505 GPUBatch *DRW_cache_square_get(void)
507 if (!SHC.drw_square) {
510 { 1.0f, 0.0f, -1.0f},
511 {-1.0f, 0.0f, -1.0f},
512 {-1.0f, 0.0f, 1.0f}};
514 /* Position Only 3D format */
515 static GPUVertFormat format = { 0 };
516 static struct { uint pos; } attr_id;
517 if (format.attr_len == 0) {
518 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
521 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
522 GPU_vertbuf_data_alloc(vbo, 8);
524 for (int i = 0; i < 4; i++) {
525 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2, p[i % 4]);
526 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, p[(i + 1) % 4]);
529 SHC.drw_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
531 return SHC.drw_square;
534 GPUBatch *DRW_cache_single_line_get(void)
538 float v1[3] = {0.0f, 0.0f, 0.0f};
539 float v2[3] = {0.0f, 0.0f, 1.0f};
541 /* Position Only 3D format */
542 static GPUVertFormat format = { 0 };
543 static struct { uint pos; } attr_id;
544 if (format.attr_len == 0) {
545 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
548 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
549 GPU_vertbuf_data_alloc(vbo, 2);
551 GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
552 GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
554 SHC.drw_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
559 GPUBatch *DRW_cache_single_line_endpoints_get(void)
562 if (!SHC.drw_line_endpoints) {
563 float v1[3] = {0.0f, 0.0f, 0.0f};
564 float v2[3] = {0.0f, 0.0f, 1.0f};
566 /* Position Only 3D format */
567 static GPUVertFormat format = { 0 };
568 static struct { uint pos; } attr_id;
569 if (format.attr_len == 0) {
570 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
573 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
574 GPU_vertbuf_data_alloc(vbo, 2);
576 GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
577 GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
579 SHC.drw_line_endpoints = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
581 return SHC.drw_line_endpoints;
584 GPUBatch *DRW_cache_screenspace_circle_get(void)
586 #define CIRCLE_RESOL 32
587 if (!SHC.drw_screenspace_circle) {
588 float v[3] = {0.0f, 0.0f, 0.0f};
590 /* Position Only 3D format */
591 static GPUVertFormat format = { 0 };
592 static struct { uint pos; } attr_id;
593 if (format.attr_len == 0) {
594 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
597 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
598 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
600 for (int a = 0; a <= CIRCLE_RESOL; a++) {
601 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
602 v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
603 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
606 SHC.drw_screenspace_circle = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
608 return SHC.drw_screenspace_circle;
612 /* Grease Pencil object */
613 GPUBatch *DRW_cache_gpencil_axes_get(void)
615 if (!SHC.drw_gpencil_axes) {
617 float v1[3] = { 0.0f, 0.0f, 0.0f };
618 float v2[3] = { 0.0f, 0.0f, 0.0f };
621 const GLfloat verts[8][3] = {
622 { -0.25f, -0.25f, -0.25f },
623 { -0.25f, -0.25f, 0.25f },
624 { -0.25f, 0.25f, -0.25f },
625 { -0.25f, 0.25f, 0.25f },
626 { 0.25f, -0.25f, -0.25f },
627 { 0.25f, -0.25f, 0.25f },
628 { 0.25f, 0.25f, -0.25f },
629 { 0.25f, 0.25f, 0.25f }
632 const GLubyte indices[24] = { 0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5, 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6 };
634 /* Position Only 3D format */
635 static GPUVertFormat format = { 0 };
637 if (format.attr_len == 0) {
638 pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
641 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
643 /* alloc 30 elements for cube and 3 axis */
644 GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(indices) + 6);
647 for (axis = 0; axis < 3; axis++) {
651 GPU_vertbuf_attr_set(vbo, pos_id, axis * 2, v1);
652 GPU_vertbuf_attr_set(vbo, pos_id, axis * 2 + 1, v2);
654 /* reset v1 & v2 to zero for next axis */
655 v1[axis] = v2[axis] = 0.0f;
659 for (int i = 0; i < 24; ++i) {
660 GPU_vertbuf_attr_set(vbo, pos_id, i + 6, verts[indices[i]]);
663 SHC.drw_gpencil_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
665 return SHC.drw_gpencil_axes;
669 /* -------------------------------------------------------------------- */
671 /** \name Common Object API
674 GPUBatch *DRW_cache_object_all_edges_get(Object *ob)
678 return DRW_cache_mesh_all_edges_get(ob);
680 /* TODO, should match 'DRW_cache_object_surface_get' */
686 GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
690 return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
692 /* TODO, should match 'DRW_cache_object_surface_get' */
698 GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
702 return DRW_cache_mesh_face_wireframe_get(ob);
704 return DRW_cache_curve_face_wireframe_get(ob);
706 return DRW_cache_surf_face_wireframe_get(ob);
708 return DRW_cache_text_face_wireframe_get(ob);
710 return DRW_cache_mball_face_wireframe_get(ob);
716 GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
720 return DRW_cache_mesh_loose_edges_get(ob);
722 return DRW_cache_curve_loose_edges_get(ob);
724 return DRW_cache_surf_loose_edges_get(ob);
726 return DRW_cache_text_loose_edges_get(ob);
728 /* Cannot have any loose edge */
734 GPUBatch *DRW_cache_object_surface_get(Object *ob)
738 return DRW_cache_mesh_surface_get(ob);
740 return DRW_cache_curve_surface_get(ob);
742 return DRW_cache_surf_surface_get(ob);
744 return DRW_cache_text_surface_get(ob);
746 return DRW_cache_mball_surface_get(ob);
752 GPUBatch **DRW_cache_object_surface_material_get(
753 struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
754 char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
756 if (auto_layer_names != NULL) {
757 *auto_layer_names = NULL;
758 *auto_layer_is_srgb = NULL;
759 *auto_layer_count = 0;
764 return DRW_cache_mesh_surface_shaded_get(
765 ob, gpumat_array, gpumat_array_len,
766 auto_layer_names, auto_layer_is_srgb, auto_layer_count);
768 return DRW_cache_curve_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
770 return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
772 return DRW_cache_text_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
774 return DRW_cache_mball_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
783 /* -------------------------------------------------------------------- */
788 GPUBatch *DRW_cache_plain_axes_get(void)
790 if (!SHC.drw_plain_axes) {
792 float v1[3] = {0.0f, 0.0f, 0.0f};
793 float v2[3] = {0.0f, 0.0f, 0.0f};
795 /* Position Only 3D format */
796 static GPUVertFormat format = { 0 };
797 static struct { uint pos; } attr_id;
798 if (format.attr_len == 0) {
799 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
802 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
803 GPU_vertbuf_data_alloc(vbo, 6);
805 for (axis = 0; axis < 3; axis++) {
809 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2, v1);
810 GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2 + 1, v2);
812 /* reset v1 & v2 to zero for next axis */
813 v1[axis] = v2[axis] = 0.0f;
816 SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
818 return SHC.drw_plain_axes;
821 GPUBatch *DRW_cache_single_arrow_get(void)
823 if (!SHC.drw_single_arrow) {
824 float v1[3] = {0.0f, 0.0f, 1.0f}, v2[3], v3[3];
826 /* Position Only 3D format */
827 static GPUVertFormat format = { 0 };
828 static struct { uint pos; } attr_id;
829 if (format.attr_len == 0) {
830 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
834 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
835 GPU_vertbuf_data_alloc(vbo, 12);
837 v2[0] = 0.035f; v2[1] = 0.035f;
838 v3[0] = -0.035f; v3[1] = 0.035f;
839 v2[2] = v3[2] = 0.75f;
841 for (int sides = 0; sides < 4; sides++) {
842 if (sides % 2 == 1) {
851 GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 0, v1);
852 GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 1, v2);
853 GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 2, v3);
856 SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
858 return SHC.drw_single_arrow;
861 GPUBatch *DRW_cache_empty_sphere_get(void)
863 if (!SHC.drw_empty_sphere) {
864 GPUVertBuf *vbo = sphere_wire_vbo(1.0f);
865 SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
867 return SHC.drw_empty_sphere;
870 GPUBatch *DRW_cache_empty_cone_get(void)
873 if (!SHC.drw_empty_cone) {
874 /* a single ring of vertices */
875 float p[NSEGMENTS][2];
876 for (int i = 0; i < NSEGMENTS; ++i) {
877 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
878 p[i][0] = cosf(angle);
879 p[i][1] = sinf(angle);
882 /* Position Only 3D format */
883 static GPUVertFormat format = { 0 };
884 static struct { uint pos; } attr_id;
885 if (format.attr_len == 0) {
886 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
889 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
890 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
892 for (int i = 0; i < NSEGMENTS; ++i) {
894 cv[0] = p[(i) % NSEGMENTS][0];
895 cv[1] = p[(i) % NSEGMENTS][1];
898 ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
899 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
900 ARRAY_SET_ITEMS(v, 0.0f, 2.0f, 0.0f);
901 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
904 ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
905 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
906 cv[0] = p[(i + 1) % NSEGMENTS][0];
907 cv[1] = p[(i + 1) % NSEGMENTS][1];
908 ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
909 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
912 SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
914 return SHC.drw_empty_cone;
918 GPUBatch *DRW_cache_empty_cylinder_get(void)
921 if (!SHC.drw_empty_cylinder) {
922 /* a single ring of vertices */
923 float p[NSEGMENTS][2];
924 for (int i = 0; i < NSEGMENTS; ++i) {
925 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
926 p[i][0] = cosf(angle);
927 p[i][1] = sinf(angle);
930 /* Position Only 3D format */
931 static GPUVertFormat format = { 0 };
932 static struct { uint pos; } attr_id;
933 if (format.attr_len == 0) {
934 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
937 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
938 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 6);
940 for (int i = 0; i < NSEGMENTS; ++i) {
941 float cv[2], pv[2], v[3];
942 cv[0] = p[(i) % NSEGMENTS][0];
943 cv[1] = p[(i) % NSEGMENTS][1];
944 pv[0] = p[(i + 1) % NSEGMENTS][0];
945 pv[1] = p[(i + 1) % NSEGMENTS][1];
948 copy_v3_fl3(v, cv[0], cv[1], -1.0f);
949 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6, v);
950 copy_v3_fl3(v, cv[0], cv[1], 1.0f);
951 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 1, v);
954 copy_v3_fl3(v, cv[0], cv[1], 1.0f);
955 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 2, v);
956 copy_v3_fl3(v, pv[0], pv[1], 1.0f);
957 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 3, v);
960 copy_v3_fl3(v, cv[0], cv[1], -1.0f);
961 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 4, v);
962 copy_v3_fl3(v, pv[0], pv[1], -1.0f);
963 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 5, v);
966 SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
968 return SHC.drw_empty_cylinder;
972 GPUBatch *DRW_cache_empty_capsule_body_get(void)
974 if (!SHC.drw_empty_capsule_body) {
975 const float pos[8][3] = {
982 { 0.0f, -1.0f, 1.0f},
986 /* Position Only 3D format */
987 static GPUVertFormat format = { 0 };
988 static struct { uint pos; } attr_id;
989 if (format.attr_len == 0) {
990 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
993 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
994 GPU_vertbuf_data_alloc(vbo, 8);
995 GPU_vertbuf_attr_fill(vbo, attr_id.pos, pos);
997 SHC.drw_empty_capsule_body = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
999 return SHC.drw_empty_capsule_body;
1002 GPUBatch *DRW_cache_empty_capsule_cap_get(void)
1004 #define NSEGMENTS 24 /* Must be multiple of 2. */
1005 if (!SHC.drw_empty_capsule_cap) {
1006 /* a single ring of vertices */
1007 float p[NSEGMENTS][2];
1008 for (int i = 0; i < NSEGMENTS; ++i) {
1009 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1010 p[i][0] = cosf(angle);
1011 p[i][1] = sinf(angle);
1014 /* Position Only 3D format */
1015 static GPUVertFormat format = { 0 };
1016 static struct { uint pos; } attr_id;
1017 if (format.attr_len == 0) {
1018 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1021 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1022 GPU_vertbuf_data_alloc(vbo, (NSEGMENTS * 2) * 2);
1026 for (int i = 0; i < NSEGMENTS; ++i) {
1027 float v[3] = {0.0f, 0.0f, 0.0f};
1028 copy_v2_v2(v, p[(i) % NSEGMENTS]);
1029 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1030 copy_v2_v2(v, p[(i + 1) % NSEGMENTS]);
1031 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1034 for (int i = 0; i < NSEGMENTS / 2; ++i) {
1035 float v[3] = {0.0f, 0.0f, 0.0f};
1036 int ci = i % NSEGMENTS;
1037 int pi = (i + 1) % NSEGMENTS;
1039 copy_v3_fl3(v, p[ci][0], 0.0f, p[ci][1]);
1040 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1041 copy_v3_fl3(v, p[pi][0], 0.0f, p[pi][1]);
1042 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1044 copy_v3_fl3(v, 0.0f, p[ci][0], p[ci][1]);
1045 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1046 copy_v3_fl3(v, 0.0f, p[pi][0], p[pi][1]);
1047 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1050 SHC.drw_empty_capsule_cap = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1052 return SHC.drw_empty_capsule_cap;
1056 GPUBatch *DRW_cache_image_plane_get(void)
1058 if (!SHC.drw_image_plane) {
1059 const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
1060 static GPUVertFormat format = { 0 };
1061 static struct { uint pos, texCoords; } attr_id;
1062 if (format.attr_len == 0) {
1063 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1064 attr_id.texCoords = GPU_vertformat_attr_add(&format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1066 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1067 GPU_vertbuf_data_alloc(vbo, 4);
1068 for (uint j = 0; j < 4; j++) {
1069 GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
1070 GPU_vertbuf_attr_set(vbo, attr_id.texCoords, j, quad[j]);
1072 SHC.drw_image_plane = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
1074 return SHC.drw_image_plane;
1077 GPUBatch *DRW_cache_image_plane_wire_get(void)
1079 if (!SHC.drw_image_plane_wire) {
1080 const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
1081 static GPUVertFormat format = { 0 };
1082 static struct { uint pos; } attr_id;
1083 if (format.attr_len == 0) {
1084 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1086 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1087 GPU_vertbuf_data_alloc(vbo, 4);
1088 for (uint j = 0; j < 4; j++) {
1089 GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
1091 SHC.drw_image_plane_wire = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
1093 return SHC.drw_image_plane_wire;
1097 GPUBatch *DRW_cache_field_wind_get(void)
1099 #define CIRCLE_RESOL 32
1100 if (!SHC.drw_field_wind) {
1101 float v[3] = {0.0f, 0.0f, 0.0f};
1103 /* Position Only 3D format */
1104 static GPUVertFormat format = { 0 };
1105 static struct { uint pos; } attr_id;
1106 if (format.attr_len == 0) {
1107 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1110 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1111 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 4);
1113 for (int i = 0; i < 4; i++) {
1114 float z = 0.05f * (float)i;
1115 for (int a = 0; a < CIRCLE_RESOL; a++) {
1116 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1117 v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1119 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
1121 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1122 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1124 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
1128 SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1130 return SHC.drw_field_wind;
1134 GPUBatch *DRW_cache_field_force_get(void)
1136 #define CIRCLE_RESOL 32
1137 if (!SHC.drw_field_force) {
1138 float v[3] = {0.0f, 0.0f, 0.0f};
1140 /* Position Only 3D format */
1141 static GPUVertFormat format = { 0 };
1142 static struct { uint pos; } attr_id;
1143 if (format.attr_len == 0) {
1144 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1147 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1148 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 3);
1150 for (int i = 0; i < 3; i++) {
1151 float radius = 1.0f + 0.5f * (float)i;
1152 for (int a = 0; a < CIRCLE_RESOL; a++) {
1153 v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1154 v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1156 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
1158 v[0] = radius * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1159 v[1] = radius * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1161 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
1165 SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1167 return SHC.drw_field_force;
1171 GPUBatch *DRW_cache_field_vortex_get(void)
1173 #define SPIRAL_RESOL 32
1174 if (!SHC.drw_field_vortex) {
1175 float v[3] = {0.0f, 0.0f, 0.0f};
1178 /* Position Only 3D format */
1179 static GPUVertFormat format = { 0 };
1180 static struct { uint pos; } attr_id;
1181 if (format.attr_len == 0) {
1182 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1185 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1186 GPU_vertbuf_data_alloc(vbo, SPIRAL_RESOL * 2 + 1);
1188 for (int a = SPIRAL_RESOL; a > -1; a--) {
1189 v[0] = sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
1190 v[1] = cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
1192 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1195 for (int a = 1; a <= SPIRAL_RESOL; a++) {
1196 v[0] = -sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
1197 v[1] = -cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
1199 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1202 SHC.drw_field_vortex = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
1204 return SHC.drw_field_vortex;
1208 GPUBatch *DRW_cache_field_tube_limit_get(void)
1210 #define CIRCLE_RESOL 32
1211 if (!SHC.drw_field_tube_limit) {
1212 float v[3] = {0.0f, 0.0f, 0.0f};
1215 /* Position Only 3D format */
1216 static GPUVertFormat format = { 0 };
1217 static struct { uint pos; } attr_id;
1218 if (format.attr_len == 0) {
1219 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1222 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1223 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
1226 for (int i = 0; i < 2; i++) {
1227 float z = (float)i * 2.0f - 1.0f;
1228 for (int a = 0; a < CIRCLE_RESOL; a++) {
1229 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1230 v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1232 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1234 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1235 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1237 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1241 for (int a = 0; a < 4; a++) {
1242 for (int i = 0; i < 2; i++) {
1243 float z = (float)i * 2.0f - 1.0f;
1244 v[0] = sinf((2.0f * M_PI * a) / 4.0f);
1245 v[1] = cosf((2.0f * M_PI * a) / 4.0f);
1247 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1251 SHC.drw_field_tube_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1253 return SHC.drw_field_tube_limit;
1257 GPUBatch *DRW_cache_field_cone_limit_get(void)
1259 #define CIRCLE_RESOL 32
1260 if (!SHC.drw_field_cone_limit) {
1261 float v[3] = {0.0f, 0.0f, 0.0f};
1264 /* Position Only 3D format */
1265 static GPUVertFormat format = { 0 };
1266 static struct { uint pos; } attr_id;
1267 if (format.attr_len == 0) {
1268 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1271 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1272 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
1275 for (int i = 0; i < 2; i++) {
1276 float z = (float)i * 2.0f - 1.0f;
1277 for (int a = 0; a < CIRCLE_RESOL; a++) {
1278 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1279 v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1281 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1283 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1284 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1286 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1290 for (int a = 0; a < 4; a++) {
1291 for (int i = 0; i < 2; i++) {
1292 float z = (float)i * 2.0f - 1.0f;
1293 v[0] = z * sinf((2.0f * M_PI * a) / 4.0f);
1294 v[1] = z * cosf((2.0f * M_PI * a) / 4.0f);
1296 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1300 SHC.drw_field_cone_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1302 return SHC.drw_field_cone_limit;
1308 /* -------------------------------------------------------------------- */
1313 GPUBatch *DRW_cache_lamp_get(void)
1316 if (!SHC.drw_lamp) {
1319 /* Position Only 3D format */
1320 static GPUVertFormat format = { 0 };
1321 static struct { uint pos; } attr_id;
1322 if (format.attr_len == 0) {
1323 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1326 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1327 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
1329 for (int a = 0; a < NSEGMENTS * 2; a += 2) {
1330 v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
1331 v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
1332 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
1334 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
1335 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
1336 GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
1339 SHC.drw_lamp = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1341 return SHC.drw_lamp;
1345 GPUBatch *DRW_cache_lamp_shadows_get(void)
1347 #define NSEGMENTS 10
1348 if (!SHC.drw_lamp_shadows) {
1351 /* Position Only 3D format */
1352 static GPUVertFormat format = { 0 };
1353 static struct { uint pos; } attr_id;
1354 if (format.attr_len == 0) {
1355 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1358 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1359 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
1361 for (int a = 0; a < NSEGMENTS * 2; a += 2) {
1362 v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
1363 v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
1364 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
1366 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
1367 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
1368 GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
1371 SHC.drw_lamp_shadows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1373 return SHC.drw_lamp_shadows;
1377 GPUBatch *DRW_cache_lamp_sunrays_get(void)
1379 if (!SHC.drw_lamp_sunrays) {
1380 float v[2], v1[2], v2[2];
1382 /* Position Only 2D format */
1383 static GPUVertFormat format = { 0 };
1384 static struct { uint pos; } attr_id;
1385 if (format.attr_len == 0) {
1386 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
1389 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1390 GPU_vertbuf_data_alloc(vbo, 32);
1392 for (int a = 0; a < 8; a++) {
1393 v[0] = sinf((2.0f * M_PI * a) / 8.0f);
1394 v[1] = cosf((2.0f * M_PI * a) / 8.0f);
1396 mul_v2_v2fl(v1, v, 1.6f);
1397 mul_v2_v2fl(v2, v, 1.9f);
1398 GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4, v1);
1399 GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 1, v2);
1401 mul_v2_v2fl(v1, v, 2.2f);
1402 mul_v2_v2fl(v2, v, 2.5f);
1403 GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 2, v1);
1404 GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 3, v2);
1407 SHC.drw_lamp_sunrays = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1409 return SHC.drw_lamp_sunrays;
1412 GPUBatch *DRW_cache_lamp_area_square_get(void)
1414 if (!SHC.drw_lamp_area_square) {
1415 float v1[3] = {0.0f, 0.0f, 0.0f};
1417 /* Position Only 3D format */
1418 static GPUVertFormat format = { 0 };
1419 static struct { uint pos; } attr_id;
1420 if (format.attr_len == 0) {
1421 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1424 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1425 GPU_vertbuf_data_alloc(vbo, 8);
1427 v1[0] = v1[1] = 0.5f;
1428 GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
1430 GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v1);
1431 GPU_vertbuf_attr_set(vbo, attr_id.pos, 2, v1);
1433 GPU_vertbuf_attr_set(vbo, attr_id.pos, 3, v1);
1434 GPU_vertbuf_attr_set(vbo, attr_id.pos, 4, v1);
1436 GPU_vertbuf_attr_set(vbo, attr_id.pos, 5, v1);
1437 GPU_vertbuf_attr_set(vbo, attr_id.pos, 6, v1);
1439 GPU_vertbuf_attr_set(vbo, attr_id.pos, 7, v1);
1441 SHC.drw_lamp_area_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1443 return SHC.drw_lamp_area_square;
1446 GPUBatch *DRW_cache_lamp_area_disk_get(void)
1448 #define NSEGMENTS 32
1449 if (!SHC.drw_lamp_area_disk) {
1450 /* Position Only 3D format */
1451 static GPUVertFormat format = { 0 };
1452 static struct { uint pos; } attr_id;
1453 if (format.attr_len == 0) {
1454 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1457 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1458 GPU_vertbuf_data_alloc(vbo, 2 * NSEGMENTS);
1460 float v[3] = {0.0f, 0.5f, 0.0f};
1461 GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v);
1462 for (int a = 1; a < NSEGMENTS; a++) {
1463 v[0] = 0.5f * sinf(2.0f * (float)M_PI * a / NSEGMENTS);
1464 v[1] = 0.5f * cosf(2.0f * (float)M_PI * a / NSEGMENTS);
1465 GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a - 1, v);
1466 GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a, v);
1468 copy_v3_fl3(v, 0.0f, 0.5f, 0.0f);
1469 GPU_vertbuf_attr_set(vbo, attr_id.pos, (2 * NSEGMENTS) - 1, v);
1471 SHC.drw_lamp_area_disk = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1473 return SHC.drw_lamp_area_disk;
1477 GPUBatch *DRW_cache_lamp_hemi_get(void)
1479 #define CIRCLE_RESOL 32
1480 if (!SHC.drw_lamp_hemi) {
1484 /* Position Only 3D format */
1485 static GPUVertFormat format = { 0 };
1486 static struct { uint pos; } attr_id;
1487 if (format.attr_len == 0) {
1488 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1491 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1492 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 - 6 * 2 * 2);
1495 for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
1496 v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2);
1497 v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
1499 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1501 v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2);
1502 v[2] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
1504 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1508 for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
1509 v[2] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL)) - 1.0f;
1510 v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1512 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1514 v[2] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL)) - 1.0f;
1515 v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1517 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1520 /* YZ plane full circle */
1521 /* lease v[2] as it is */
1522 const float rad = cosf((2.0f * M_PI * 3) / ((float)CIRCLE_RESOL));
1523 for (int a = 0; a < CIRCLE_RESOL; a++) {
1524 v[1] = rad * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1525 v[0] = rad * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
1526 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1528 v[1] = rad * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1529 v[0] = rad * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
1530 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1534 SHC.drw_lamp_hemi = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1536 return SHC.drw_lamp_hemi;
1541 GPUBatch *DRW_cache_lamp_spot_get(void)
1543 #define NSEGMENTS 32
1544 if (!SHC.drw_lamp_spot) {
1545 /* a single ring of vertices */
1546 float p[NSEGMENTS][2];
1547 float n[NSEGMENTS][3];
1548 float neg[NSEGMENTS][3];
1549 float half_angle = 2 * M_PI / ((float)NSEGMENTS * 2);
1550 for (int i = 0; i < NSEGMENTS; ++i) {
1551 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1552 p[i][0] = cosf(angle);
1553 p[i][1] = sinf(angle);
1555 n[i][0] = cosf(angle - half_angle);
1556 n[i][1] = sinf(angle - half_angle);
1557 n[i][2] = cosf(M_PI / 16.0f); /* slope of the cone */
1558 normalize_v3(n[i]); /* necessary ? */
1559 negate_v3_v3(neg[i], n[i]);
1562 static GPUVertFormat format = { 0 };
1563 static struct { uint pos, n1, n2; } attr_id;
1564 if (format.attr_len == 0) {
1565 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1566 attr_id.n1 = GPU_vertformat_attr_add(&format, "N1", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1567 attr_id.n2 = GPU_vertformat_attr_add(&format, "N2", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1570 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1571 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
1573 for (int i = 0; i < NSEGMENTS; ++i) {
1575 cv[0] = p[i % NSEGMENTS][0];
1576 cv[1] = p[i % NSEGMENTS][1];
1579 ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
1580 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
1581 ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
1582 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
1584 GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4, n[(i) % NSEGMENTS]);
1585 GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 1, n[(i) % NSEGMENTS]);
1586 GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4, n[(i + 1) % NSEGMENTS]);
1587 GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 1, n[(i + 1) % NSEGMENTS]);
1590 ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
1591 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
1592 cv[0] = p[(i + 1) % NSEGMENTS][0];
1593 cv[1] = p[(i + 1) % NSEGMENTS][1];
1594 ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
1595 GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
1597 GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 2, n[(i) % NSEGMENTS]);
1598 GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 3, n[(i) % NSEGMENTS]);
1599 GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 2, neg[(i) % NSEGMENTS]);
1600 GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 3, neg[(i) % NSEGMENTS]);
1603 SHC.drw_lamp_spot = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1605 return SHC.drw_lamp_spot;
1609 GPUBatch *DRW_cache_lamp_spot_volume_get(void)
1611 #define NSEGMENTS 32
1612 if (!SHC.drw_lamp_spot_volume) {
1613 /* a single ring of vertices */
1614 float p[NSEGMENTS][2];
1615 for (int i = 0; i < NSEGMENTS; ++i) {
1616 float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
1617 p[i][0] = cosf(angle);
1618 p[i][1] = sinf(angle);
1621 static GPUVertFormat format = { 0 };
1622 static struct { uint pos; } attr_id;
1623 if (format.attr_len == 0) {
1624 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1627 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1628 GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 3);
1631 for (int i = 0; i < NSEGMENTS; ++i) {
1634 ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
1635 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1637 cv[0] = p[i % NSEGMENTS][0];
1638 cv[1] = p[i % NSEGMENTS][1];
1639 ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
1640 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1642 cv[0] = p[(i + 1) % NSEGMENTS][0];
1643 cv[1] = p[(i + 1) % NSEGMENTS][1];
1644 ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
1645 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
1648 SHC.drw_lamp_spot_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
1650 return SHC.drw_lamp_spot_volume;
1654 GPUBatch *DRW_cache_lamp_spot_square_get(void)
1656 if (!SHC.drw_lamp_spot_square) {
1658 { 0.0f, 0.0f, 0.0f},
1659 { 1.0f, 1.0f, -1.0f},
1660 { 1.0f, -1.0f, -1.0f},
1661 {-1.0f, -1.0f, -1.0f},
1662 {-1.0f, 1.0f, -1.0f}};
1666 /* Position Only 3D format */
1667 static GPUVertFormat format = { 0 };
1668 static struct { uint pos; } attr_id;
1669 if (format.attr_len == 0) {
1670 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1673 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1674 GPU_vertbuf_data_alloc(vbo, 16);
1677 for (int i = 1; i <= 4; ++i) {
1678 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
1679 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[i]);
1681 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
1682 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
1685 SHC.drw_lamp_spot_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1687 return SHC.drw_lamp_spot_square;
1690 GPUBatch *DRW_cache_lamp_spot_square_volume_get(void)
1692 if (!SHC.drw_lamp_spot_square_volume) {
1694 { 0.0f, 0.0f, 0.0f},
1695 { 1.0f, 1.0f, -1.0f},
1696 { 1.0f, -1.0f, -1.0f},
1697 {-1.0f, -1.0f, -1.0f},
1698 {-1.0f, 1.0f, -1.0f}};
1702 /* Position Only 3D format */
1703 static GPUVertFormat format = { 0 };
1704 static struct { uint pos; } attr_id;
1705 if (format.attr_len == 0) {
1706 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1709 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1710 GPU_vertbuf_data_alloc(vbo, 12);
1713 for (int i = 1; i <= 4; ++i) {
1714 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
1715 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
1716 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
1719 SHC.drw_lamp_spot_square_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
1721 return SHC.drw_lamp_spot_square_volume;
1726 /* -------------------------------------------------------------------- */
1731 GPUBatch *DRW_cache_speaker_get(void)
1733 if (!SHC.drw_speaker) {
1735 const int segments = 16;
1738 /* Position Only 3D format */
1739 static GPUVertFormat format = { 0 };
1740 static struct { uint pos; } attr_id;
1741 if (format.attr_len == 0) {
1742 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1745 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1746 GPU_vertbuf_data_alloc(vbo, 3 * segments * 2 + 4 * 4);
1748 for (int j = 0; j < 3; j++) {
1749 float z = 0.25f * j - 0.125f;
1750 float r = (j == 0 ? 0.5f : 0.25f);
1752 copy_v3_fl3(v, r, 0.0f, z);
1753 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1754 for (int i = 1; i < segments; i++) {
1755 float x = cosf(2.f * (float)M_PI * i / segments) * r;
1756 float y = sinf(2.f * (float)M_PI * i / segments) * r;
1757 copy_v3_fl3(v, x, y, z);
1758 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1759 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1761 copy_v3_fl3(v, r, 0.0f, z);
1762 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1765 for (int j = 0; j < 4; j++) {
1766 float x = (((j + 1) % 2) * (j - 1)) * 0.5f;
1767 float y = ((j % 2) * (j - 2)) * 0.5f;
1768 for (int i = 0; i < 3; i++) {
1774 float z = 0.25f * i - 0.125f;
1775 copy_v3_fl3(v, x, y, z);
1776 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1778 GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
1783 SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1785 return SHC.drw_speaker;
1790 /* -------------------------------------------------------------------- */
1795 GPUBatch *DRW_cache_lightprobe_cube_get(void)
1797 if (!SHC.drw_lightprobe_cube) {
1799 const float sin_pi_3 = 0.86602540378f;
1800 const float cos_pi_3 = 0.5f;
1803 {sin_pi_3, cos_pi_3, 0.0f},
1804 {sin_pi_3, -cos_pi_3, 0.0f},
1805 {0.0f, -1.0f, 0.0f},
1806 {-sin_pi_3, -cos_pi_3, 0.0f},
1807 {-sin_pi_3, cos_pi_3, 0.0f},
1811 /* Position Only 3D format */
1812 static GPUVertFormat format = { 0 };
1813 static struct { uint pos; } attr_id;
1814 if (format.attr_len == 0) {
1815 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1818 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1819 GPU_vertbuf_data_alloc(vbo, (6 + 3) * 2);
1821 for (int i = 0; i < 6; ++i) {
1822 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
1823 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 6]);
1826 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
1827 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1829 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
1830 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1832 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
1833 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1835 SHC.drw_lightprobe_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1837 return SHC.drw_lightprobe_cube;
1840 GPUBatch *DRW_cache_lightprobe_grid_get(void)
1842 if (!SHC.drw_lightprobe_grid) {
1844 const float sin_pi_3 = 0.86602540378f;
1845 const float cos_pi_3 = 0.5f;
1846 const float v[7][3] = {
1848 {sin_pi_3, cos_pi_3, 0.0f},
1849 {sin_pi_3, -cos_pi_3, 0.0f},
1850 {0.0f, -1.0f, 0.0f},
1851 {-sin_pi_3, -cos_pi_3, 0.0f},
1852 {-sin_pi_3, cos_pi_3, 0.0f},
1856 /* Position Only 3D format */
1857 static GPUVertFormat format = { 0 };
1858 static struct { uint pos; } attr_id;
1859 if (format.attr_len == 0) {
1860 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1863 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1864 GPU_vertbuf_data_alloc(vbo, (6 * 2 + 3) * 2);
1866 for (int i = 0; i < 6; ++i) {
1867 float tmp_v1[3], tmp_v2[3], tmp_tr[3];
1868 copy_v3_v3(tmp_v1, v[i]);
1869 copy_v3_v3(tmp_v2, v[(i + 1) % 6]);
1870 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
1871 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
1873 /* Internal wires. */
1874 for (int j = 1; j < 2; ++j) {
1875 mul_v3_v3fl(tmp_tr, v[(i / 2) * 2 + 1], -0.5f * j);
1876 add_v3_v3v3(tmp_v1, v[i], tmp_tr);
1877 add_v3_v3v3(tmp_v2, v[(i + 1) % 6], tmp_tr);
1878 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
1879 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
1883 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
1884 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1886 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
1887 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1889 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
1890 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
1892 SHC.drw_lightprobe_grid = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1894 return SHC.drw_lightprobe_grid;
1897 GPUBatch *DRW_cache_lightprobe_planar_get(void)
1899 if (!SHC.drw_lightprobe_planar) {
1901 const float sin_pi_3 = 0.86602540378f;
1904 {sin_pi_3, 0.0f, 0.0f},
1905 {0.0f, -0.5f, 0.0f},
1906 {-sin_pi_3, 0.0f, 0.0f},
1909 /* Position Only 3D format */
1910 static GPUVertFormat format = { 0 };
1911 static struct { uint pos; } attr_id;
1912 if (format.attr_len == 0) {
1913 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
1916 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
1917 GPU_vertbuf_data_alloc(vbo, 4 * 2);
1919 for (int i = 0; i < 4; ++i) {
1920 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
1921 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 4]);
1924 SHC.drw_lightprobe_planar = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
1926 return SHC.drw_lightprobe_planar;
1931 /* -------------------------------------------------------------------- */
1933 /** \name Armature Bones
1936 static const float bone_octahedral_verts[6][3] = {
1937 { 0.0f, 0.0f, 0.0f},
1938 { 0.1f, 0.1f, 0.1f},
1939 { 0.1f, 0.1f, -0.1f},
1940 {-0.1f, 0.1f, -0.1f},
1941 {-0.1f, 0.1f, 0.1f},
1945 static const float bone_octahedral_smooth_normals[6][3] = {
1946 { 0.0f, -1.0f, 0.0f},
1947 #if 0 /* creates problems for outlines when scaled */
1948 { 0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
1949 { 0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
1950 {-0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
1951 {-0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
1953 { M_SQRT1_2, 0.0f, M_SQRT1_2},
1954 { M_SQRT1_2, 0.0f, -M_SQRT1_2},
1955 {-M_SQRT1_2, 0.0f, -M_SQRT1_2},
1956 {-M_SQRT1_2, 0.0f, M_SQRT1_2},
1963 static const uint bone_octahedral_wire[24] = {
1964 0, 1, 1, 5, 5, 3, 3, 0,
1965 0, 4, 4, 5, 5, 2, 2, 0,
1966 1, 2, 2, 3, 3, 4, 4, 1,
1969 /* aligned with bone_octahedral_wire
1970 * Contains adjacent normal index */
1971 static const uint bone_octahedral_wire_adjacent_face[24] = {
1972 0, 3, 4, 7, 5, 6, 1, 2,
1973 2, 3, 6, 7, 4, 5, 0, 1,
1974 0, 4, 1, 5, 2, 6, 3, 7,
1978 static const uint bone_octahedral_solid_tris[8][3] = {
1979 {2, 1, 0}, /* bottom */
1984 {5, 1, 2}, /* top */
1991 * Store indices of generated verts from bone_octahedral_solid_tris to define adjacency infos.
1992 * Example: triangle {2, 1, 0} is adjacent to {3, 2, 0}, {1, 4, 0} and {5, 1, 2}.
1993 * {2, 1, 0} becomes {0, 1, 2}
1994 * {3, 2, 0} becomes {3, 4, 5}
1995 * {1, 4, 0} becomes {9, 10, 11}
1996 * {5, 1, 2} becomes {12, 13, 14}
1997 * According to opengl specification it becomes (starting from
1998 * the first vertex of the first face aka. vertex 2):
1999 * {0, 12, 1, 10, 2, 3}
2001 static const uint bone_octahedral_wire_lines_adjacency[12][4] = {
2002 { 0, 1, 2, 6}, { 0, 12, 1, 6}, { 0, 3, 12, 6}, { 0, 2, 3, 6},
2003 { 1, 6, 2, 3}, { 1, 12, 6, 3}, { 1, 0, 12, 3}, { 1, 2, 0, 3},
2004 { 2, 0, 1, 12}, { 2, 3, 0, 12}, { 2, 6, 3, 12}, { 2, 1, 6, 12},
2008 static const uint bone_octahedral_solid_tris_adjacency[8][6] = {
2009 { 0, 12, 1, 10, 2, 3},
2010 { 3, 15, 4, 1, 5, 6},
2011 { 6, 18, 7, 4, 8, 9},
2012 { 9, 21, 10, 7, 11, 0},
2014 {12, 22, 13, 2, 14, 17},
2015 {15, 13, 16, 5, 17, 20},
2016 {18, 16, 19, 8, 20, 23},
2017 {21, 19, 22, 11, 23, 14},
2021 /* aligned with bone_octahedral_solid_tris */
2022 static const float bone_octahedral_solid_normals[8][3] = {
2023 { M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
2024 {-0.00000000f, -M_SQRT1_2, -M_SQRT1_2},
2025 {-M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
2026 { 0.00000000f, -M_SQRT1_2, M_SQRT1_2},
2027 { 0.99388373f, 0.11043154f, -0.00000000f},
2028 { 0.00000000f, 0.11043154f, -0.99388373f},
2029 {-0.99388373f, 0.11043154f, 0.00000000f},
2030 { 0.00000000f, 0.11043154f, 0.99388373f}
2033 GPUBatch *DRW_cache_bone_octahedral_get(void)
2035 if (!SHC.drw_bone_octahedral) {
2038 static GPUVertFormat format = { 0 };
2039 static struct { uint pos, nor, snor; } attr_id;
2040 if (format.attr_len == 0) {
2041 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2042 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2043 attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2047 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2048 GPU_vertbuf_data_alloc(vbo, 24);
2050 for (int i = 0; i < 8; i++) {
2051 for (int j = 0; j < 3; ++j) {
2052 GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
2053 GPU_vertbuf_attr_set(vbo, attr_id.snor, v_idx, bone_octahedral_smooth_normals[bone_octahedral_solid_tris[i][j]]);
2054 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][j]]);
2058 SHC.drw_bone_octahedral = GPU_batch_create_ex(
2059 GPU_PRIM_TRIS, vbo, NULL,
2060 GPU_BATCH_OWNS_VBO);
2062 return SHC.drw_bone_octahedral;
2065 GPUBatch *DRW_cache_bone_octahedral_wire_get(void)
2067 if (!SHC.drw_bone_octahedral_wire) {
2068 GPUIndexBufBuilder elb;
2069 GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 24);
2071 for (int i = 0; i < 12; i++) {
2072 GPU_indexbuf_add_line_adj_verts(
2074 bone_octahedral_wire_lines_adjacency[i][0],
2075 bone_octahedral_wire_lines_adjacency[i][1],
2076 bone_octahedral_wire_lines_adjacency[i][2],
2077 bone_octahedral_wire_lines_adjacency[i][3]);
2080 /* HACK Reuse vertex buffer. */
2081 GPUBatch *pos_nor_batch = DRW_cache_bone_octahedral_get();
2083 SHC.drw_bone_octahedral_wire = GPU_batch_create_ex(
2084 GPU_PRIM_LINES_ADJ, pos_nor_batch->verts[0], GPU_indexbuf_build(&elb),
2085 GPU_BATCH_OWNS_INDEX);
2087 return SHC.drw_bone_octahedral_wire;
2090 /* XXX TODO move that 1 unit cube to more common/generic place? */
2091 static const float bone_box_verts[8][3] = {
2092 { 1.0f, 0.0f, 1.0f},
2093 { 1.0f, 0.0f, -1.0f},
2094 {-1.0f, 0.0f, -1.0f},
2095 {-1.0f, 0.0f, 1.0f},
2096 { 1.0f, 1.0f, 1.0f},
2097 { 1.0f, 1.0f, -1.0f},
2098 {-1.0f, 1.0f, -1.0f},
2102 static const float bone_box_smooth_normals[8][3] = {
2103 { M_SQRT3, -M_SQRT3, M_SQRT3},
2104 { M_SQRT3, -M_SQRT3, -M_SQRT3},
2105 {-M_SQRT3, -M_SQRT3, -M_SQRT3},
2106 {-M_SQRT3, -M_SQRT3, M_SQRT3},
2107 { M_SQRT3, M_SQRT3, M_SQRT3},
2108 { M_SQRT3, M_SQRT3, -M_SQRT3},
2109 {-M_SQRT3, M_SQRT3, -M_SQRT3},
2110 {-M_SQRT3, M_SQRT3, M_SQRT3},
2114 static const uint bone_box_wire[24] = {
2115 0, 1, 1, 2, 2, 3, 3, 0,
2116 4, 5, 5, 6, 6, 7, 7, 4,
2117 0, 4, 1, 5, 2, 6, 3, 7,
2120 /* aligned with bone_octahedral_wire
2121 * Contains adjacent normal index */
2122 static const uint bone_box_wire_adjacent_face[24] = {
2123 0, 2, 0, 4, 1, 6, 1, 8,
2124 3, 10, 5, 10, 7, 11, 9, 11,
2125 3, 8, 2, 5, 4, 7, 6, 9,
2129 static const uint bone_box_solid_tris[12][3] = {
2130 {0, 2, 1}, /* bottom */
2133 {0, 1, 5}, /* sides */
2145 {4, 5, 6}, /* top */
2150 * Store indices of generated verts from bone_box_solid_tris to define adjacency infos.
2151 * See bone_octahedral_solid_tris for more infos.
2153 static const uint bone_box_wire_lines_adjacency[12][4] = {
2154 { 4, 2, 0, 11}, { 0, 1, 2, 8}, { 2, 4, 1, 14}, { 1, 0, 4, 20}, /* bottom */
2155 { 0, 8, 11, 14}, { 2, 14, 8, 20}, { 1, 20, 14, 11}, { 4, 11, 20, 8}, /* top */
2156 { 20, 0, 11, 2}, { 11, 2, 8, 1}, { 8, 1, 14, 4}, { 14, 4, 20, 0}, /* sides */
2160 static const uint bone_box_solid_tris_adjacency[12][6] = {
2161 { 0, 5, 1, 14, 2, 8},
2162 { 3, 26, 4, 20, 5, 1},
2164 { 6, 2, 7, 16, 8, 11},
2165 { 9, 7, 10, 32, 11, 24},
2167 {12, 0, 13, 22, 14, 17},
2168 {15, 13, 16, 30, 17, 6},
2170 {18, 3, 19, 28, 20, 23},
2171 {21, 19, 22, 33, 23, 12},
2173 {24, 4, 25, 10, 26, 29},
2174 {27, 25, 28, 34, 29, 18},
2176 {30, 9, 31, 15, 32, 35},
2177 {33, 31, 34, 21, 35, 27},
2181 /* aligned with bone_box_solid_tris */
2182 static const float bone_box_solid_normals[12][3] = {
2183 { 0.0f, -1.0f, 0.0f},
2184 { 0.0f, -1.0f, 0.0f},
2186 { 1.0f, 0.0f, 0.0f},
2187 { 1.0f, 0.0f, 0.0f},
2189 { 0.0f, 0.0f, -1.0f},
2190 { 0.0f, 0.0f, -1.0f},
2192 {-1.0f, 0.0f, 0.0f},
2193 {-1.0f, 0.0f, 0.0f},
2195 { 0.0f, 0.0f, 1.0f},
2196 { 0.0f, 0.0f, 1.0f},
2198 { 0.0f, 1.0f, 0.0f},
2199 { 0.0f, 1.0f, 0.0f},
2202 GPUBatch *DRW_cache_bone_box_get(void)
2204 if (!SHC.drw_bone_box) {
2207 static GPUVertFormat format = { 0 };
2208 static struct { uint pos, nor, snor; } attr_id;
2209 if (format.attr_len == 0) {
2210 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2211 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2212 attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2216 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2217 GPU_vertbuf_data_alloc(vbo, 36);
2219 for (int i = 0; i < 12; i++) {
2220 for (int j = 0; j < 3; j++) {
2221 GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_box_solid_normals[i]);
2222 GPU_vertbuf_attr_set(vbo, attr_id.snor, v_idx, bone_box_smooth_normals[bone_box_solid_tris[i][j]]);
2223 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_box_verts[bone_box_solid_tris[i][j]]);
2227 SHC.drw_bone_box = GPU_batch_create_ex(
2228 GPU_PRIM_TRIS, vbo, NULL,
2229 GPU_BATCH_OWNS_VBO);
2231 return SHC.drw_bone_box;
2234 GPUBatch *DRW_cache_bone_box_wire_get(void)
2236 if (!SHC.drw_bone_box_wire) {
2237 GPUIndexBufBuilder elb;
2238 GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 36);
2240 for (int i = 0; i < 12; i++) {
2241 GPU_indexbuf_add_line_adj_verts(
2243 bone_box_wire_lines_adjacency[i][0],
2244 bone_box_wire_lines_adjacency[i][1],
2245 bone_box_wire_lines_adjacency[i][2],
2246 bone_box_wire_lines_adjacency[i][3]);
2249 /* HACK Reuse vertex buffer. */
2250 GPUBatch *pos_nor_batch = DRW_cache_bone_box_get();
2252 SHC.drw_bone_box_wire = GPU_batch_create_ex(
2253 GPU_PRIM_LINES_ADJ, pos_nor_batch->verts[0], GPU_indexbuf_build(&elb),
2254 GPU_BATCH_OWNS_INDEX);
2256 return SHC.drw_bone_box_wire;
2259 /* Helpers for envelope bone's solid sphere-with-hidden-equatorial-cylinder.
2260 * Note that here we only encode head/tail in forth component of the vector. */
2261 static void benv_lat_lon_to_co(const float lat, const float lon, float r_nor[3])
2263 r_nor[0] = sinf(lat) * cosf(lon);
2264 r_nor[1] = sinf(lat) * sinf(lon);
2265 r_nor[2] = cosf(lat);
2268 GPUBatch *DRW_cache_bone_envelope_solid_get(void)
2270 if (!SHC.drw_bone_envelope) {
2271 const int lon_res = 24;
2272 const int lat_res = 24;
2273 const float lon_inc = 2.0f * M_PI / lon_res;
2274 const float lat_inc = M_PI / lat_res;
2277 static GPUVertFormat format = { 0 };
2278 static struct { uint pos; } attr_id;
2279 if (format.attr_len == 0) {
2280 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2284 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2285 GPU_vertbuf_data_alloc(vbo, ((lat_res + 1) * 2) * lon_res * 1);
2288 for (int i = 0; i < lon_res; i++, lon += lon_inc) {
2290 float co1[3], co2[3];
2292 /* Note: the poles are duplicated on purpose, to restart the strip. */
2295 for (int j = 0; j < lat_res; j++, lat += lat_inc) {
2296 benv_lat_lon_to_co(lat, lon, co1);
2297 benv_lat_lon_to_co(lat, lon + lon_inc, co2);
2299 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
2300 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
2303 /* Closing the loop */
2304 benv_lat_lon_to_co(M_PI, lon, co1);
2305 benv_lat_lon_to_co(M_PI, lon + lon_inc, co2);
2307 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
2308 GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
2311 SHC.drw_bone_envelope = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2313 return SHC.drw_bone_envelope;
2316 GPUBatch *DRW_cache_bone_envelope_outline_get(void)
2318 if (!SHC.drw_bone_envelope_outline) {
2319 # define CIRCLE_RESOL 64
2320 float v0[2], v1[2], v2[2];
2321 const float radius = 1.0f;
2323 /* Position Only 2D format */
2324 static GPUVertFormat format = { 0 };
2325 static struct { uint pos0, pos1, pos2; } attr_id;
2326 if (format.attr_len == 0) {
2327 attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2328 attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2329 attr_id.pos2 = GPU_vertformat_attr_add(&format, "pos2", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2332 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2333 GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
2335 v0[0] = radius * sinf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
2336 v0[1] = radius * cosf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
2337 v1[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2338 v1[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2340 /* Output 4 verts for each position. See shader for explanation. */
2342 for (int a = 0; a < CIRCLE_RESOL; a++) {
2343 v2[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2344 v2[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2345 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2346 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
2347 GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
2348 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2349 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
2350 GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
2356 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2357 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
2358 GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
2359 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2360 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
2361 GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
2363 SHC.drw_bone_envelope_outline = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2364 # undef CIRCLE_RESOL
2366 return SHC.drw_bone_envelope_outline;
2369 GPUBatch *DRW_cache_bone_point_get(void)
2371 if (!SHC.drw_bone_point) {
2372 #if 0 /* old style geometry sphere */
2373 const int lon_res = 16;
2374 const int lat_res = 8;
2375 const float rad = 0.05f;
2376 const float lon_inc = 2 * M_PI / lon_res;
2377 const float lat_inc = M_PI / lat_res;
2380 static GPUVertFormat format = { 0 };
2381 static struct { uint pos, nor; } attr_id;
2382 if (format.attr_len == 0) {
2383 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2384 attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2388 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2389 GPU_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6);
2392 for (int i = 0; i < lon_res; i++, lon += lon_inc) {
2394 for (int j = 0; j < lat_res; j++, lat += lat_inc) {
2395 if (j != lat_res - 1) { /* Pole */
2396 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
2397 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon);
2398 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
2401 if (j != 0) { /* Pole */
2402 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon + lon_inc);
2403 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
2404 add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
2409 SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2411 # define CIRCLE_RESOL 64
2413 const float radius = 0.05f;
2415 /* Position Only 2D format */
2416 static GPUVertFormat format = { 0 };
2417 static struct { uint pos; } attr_id;
2418 if (format.attr_len == 0) {
2419 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2422 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2423 GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
2425 for (int a = 0; a < CIRCLE_RESOL; a++) {
2426 v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2427 v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2428 GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
2431 SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
2432 # undef CIRCLE_RESOL
2435 return SHC.drw_bone_point;
2438 GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
2440 if (!SHC.drw_bone_point_wire) {
2441 #if 0 /* old style geometry sphere */
2442 GPUVertBuf *vbo = sphere_wire_vbo(0.05f);
2443 SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2445 # define CIRCLE_RESOL 64
2447 const float radius = 0.05f;
2449 /* Position Only 2D format */
2450 static GPUVertFormat format = { 0 };
2451 static struct { uint pos0, pos1; } attr_id;
2452 if (format.attr_len == 0) {
2453 attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2454 attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2457 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2458 GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
2460 v0[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2461 v0[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
2464 for (int a = 0; a < CIRCLE_RESOL; a++) {
2465 v1[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2466 v1[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2467 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2468 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
2469 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2470 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
2475 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2476 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
2477 GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
2478 GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
2480 SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2481 # undef CIRCLE_RESOL
2484 return SHC.drw_bone_point_wire;
2487 /* keep in sync with armature_stick_vert.glsl */
2488 #define COL_WIRE (1 << 0)
2489 #define COL_HEAD (1 << 1)
2490 #define COL_TAIL (1 << 2)
2491 #define COL_BONE (1 << 3)
2493 #define POS_HEAD (1 << 4)
2494 #define POS_TAIL (1 << 5)
2495 #define POS_BONE (1 << 6)
2497 GPUBatch *DRW_cache_bone_stick_get(void)
2499 if (!SHC.drw_bone_stick) {
2500 #define CIRCLE_RESOL 12
2503 const float radius = 2.0f; /* head/tail radius */
2506 /* Position Only 2D format */
2507 static GPUVertFormat format = { 0 };
2508 static struct { uint pos, flag; } attr_id;
2509 if (format.attr_len == 0) {
2510 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2511 attr_id.flag = GPU_vertformat_attr_add(&format, "flag", GPU_COMP_U32, 1, GPU_FETCH_INT);
2514 const uint vcount = (CIRCLE_RESOL + 1) * 2 + 6;
2516 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2517 GPU_vertbuf_data_alloc(vbo, vcount);
2519 GPUIndexBufBuilder elb;
2520 GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_FAN, (CIRCLE_RESOL + 2) * 2 + 6 + 2, vcount, true);
2522 /* head/tail points */
2523 for (int i = 0; i < 2; ++i) {
2525 copy_v2_fl(pos, 0.0f);
2526 flag = (i == 0) ? POS_HEAD : POS_TAIL;
2527 flag |= (i == 0) ? COL_HEAD : COL_TAIL;
2528 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2529 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2530 GPU_indexbuf_add_generic_vert(&elb, v++);
2531 /* circle vertices */
2533 for (int a = 0; a < CIRCLE_RESOL; a++) {
2534 pos[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2535 pos[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
2536 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2537 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2538 GPU_indexbuf_add_generic_vert(&elb, v++);
2540 /* Close the circle */
2541 GPU_indexbuf_add_generic_vert(&elb, v - CIRCLE_RESOL);
2543 GPU_indexbuf_add_primitive_restart(&elb);
2546 /* Bone rectangle */
2548 for (int i = 0; i < 6; ++i) {
2549 pos[1] = (i == 0 || i == 3) ? 0.0f : ((i < 3) ? 1.0f : -1.0f);
2550 flag = ((i < 2 || i > 4) ? POS_HEAD : POS_TAIL) |
2551 ((i == 0 || i == 3) ? 0 : COL_WIRE) | COL_BONE | POS_BONE;
2552 GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
2553 GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
2554 GPU_indexbuf_add_generic_vert(&elb, v++);
2557 SHC.drw_bone_stick = GPU_batch_create_ex(
2558 GPU_PRIM_TRI_FAN, vbo, GPU_indexbuf_build(&elb),
2559 GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
2562 return SHC.drw_bone_stick;
2565 static void set_bone_axis_vert(
2566 GPUVertBuf *vbo, uint axis, uint pos, uint col,
2567 uint *v, const float *a, const float *p, const float *c)
2569 GPU_vertbuf_attr_set(vbo, axis, *v, a);
2570 GPU_vertbuf_attr_set(vbo, pos, *v, p);
2571 GPU_vertbuf_attr_set(vbo, col, *v, c);
2577 static float x_axis_name[4][2] = {
2578 { 0.9f * S_X, 1.0f * S_Y}, {-1.0f * S_X, -1.0f * S_Y},
2579 {-0.9f * S_X, 1.0f * S_Y}, { 1.0f * S_X, -1.0f * S_Y}
2581 #define X_LEN (sizeof(x_axis_name) / (sizeof(float) * 2))
2587 static float y_axis_name[6][2] = {
2588 {-1.0f * S_X, 1.0f * S_Y}, { 0.0f * S_X, -0.1f * S_Y},
2589 { 1.0f * S_X, 1.0f * S_Y}, { 0.0f * S_X, -0.1f * S_Y},
2590 { 0.0f * S_X, -0.1f * S_Y}, { 0.0f * S_X, -1.0f * S_Y}
2592 #define Y_LEN (sizeof(y_axis_name) / (sizeof(float) * 2))
2598 static float z_axis_name[10][2] = {
2599 {-0.95f * S_X, 1.00f * S_Y}, { 0.95f * S_X, 1.00f * S_Y},
2600 { 0.95f * S_X, 1.00f * S_Y}, { 0.95f * S_X, 0.90f * S_Y},
2601 { 0.95f * S_X, 0.90f * S_Y}, {-1.00f * S_X, -0.90f * S_Y},
2602 {-1.00f * S_X, -0.90f * S_Y}, {-1.00f * S_X, -1.00f * S_Y},
2603 {-1.00f * S_X, -1.00f * S_Y}, { 1.00f * S_X, -1.00f * S_Y}
2605 #define Z_LEN (sizeof(z_axis_name) / (sizeof(float) * 2))
2611 static float axis_marker[8][2] = {
2613 {-1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, 1.0f * S_Y},
2614 { 1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, -1.0f * S_Y},
2615 { 1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, -1.0f * S_Y},
2616 {-1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, 1.0f * S_Y}
2618 {-S_X, 0.f}, { 0.f, S_Y},
2619 { 0.f, S_Y}, { S_X, 0.f},
2620 { S_X, 0.f}, { 0.f, -S_Y},
2621 { 0.f, -S_Y}, {-S_X, 0.f}
2624 #define MARKER_LEN (sizeof(axis_marker) / (sizeof(float) * 2))
2625 #define MARKER_FILL_LAYER 6
2633 static float axis_name_shadow[8][2] = {
2634 {-S_X + O_X, S_Y + O_Y}, { S_X + O_X, S_Y + O_Y},
2635 { S_X + O_X, S_Y + O_Y}, { S_X + O_X, -S_Y + O_Y},
2636 { S_X + O_X, -S_Y + O_Y}, {-S_X + O_X, -S_Y + O_Y},
2637 {-S_X + O_X, -S_Y + O_Y}, {-S_X + O_X, S_Y + O_Y}
2639 // #define SHADOW_RES (sizeof(axis_name_shadow) / (sizeof(float) * 2))
2640 #define SHADOW_RES 0
2646 GPUBatch *DRW_cache_bone_arrows_get(void)
2648 if (!SHC.drw_bone_arrows) {
2649 /* Position Only 3D format */
2650 static GPUVertFormat format = { 0 };
2651 static struct { uint axis, pos, col; } attr_id;
2652 if (format.attr_len == 0) {
2653 attr_id.axis = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
2654 attr_id.pos = GPU_vertformat_attr_add(&format, "screenPos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2655 attr_id.col = GPU_vertformat_attr_add(&format, "colorAxis", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2659 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2660 GPU_vertbuf_data_alloc(vbo, (2 + MARKER_LEN * MARKER_FILL_LAYER) * 3 +
2661 (X_LEN + Y_LEN + Z_LEN) * (1 + SHADOW_RES));
2665 for (int axis = 0; axis < 3; axis++) {
2666 float pos[2] = {0.0f, 0.0f};
2667 float c[3] = {0.0f, 0.0f, 0.0f};
2669 /* center to axis line */
2670 set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
2673 set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
2675 /* Axis end marker */
2676 for (int j = 1; j < MARKER_FILL_LAYER + 1; ++j) {
2677 for (int i = 0; i < MARKER_LEN; ++i) {
2679 mul_v2_v2fl(tmp, axis_marker[i], j / (float)MARKER_FILL_LAYER);
2680 set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
2688 float (*axis_verts)[2];
2690 axis_verts = x_axis_name;
2693 else if (axis == 1) {
2694 axis_verts = y_axis_name;
2698 axis_verts = z_axis_name;
2702 /* Axis name shadows */
2703 copy_v3_fl(c, 0.0f);
2705 for (int j = 0; j < SHADOW_RES; ++j) {
2706 for (int i = 0; i < axis_v_len; ++i) {
2708 add_v2_v2v2(tmp, axis_verts[i], axis_name_shadow[j]);
2709 set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
2715 copy_v3_fl(c, 0.1f);
2717 for (int i = 0; i < axis_v_len; ++i) {
2718 set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
2719 &v, &a, axis_verts[i], c);
2723 SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2725 return SHC.drw_bone_arrows;
2728 static const float staticSine[16] = {
2729 0.0f, 0.104528463268f, 0.207911690818f, 0.309016994375f,
2730 0.406736643076f, 0.5f, 0.587785252292f, 0.669130606359f,
2731 0.743144825477f, 0.809016994375f, 0.866025403784f,
2732 0.913545457643f, 0.951056516295f, 0.978147600734f,
2733 0.994521895368f, 1.0f
2736 #define set_vert(a, b, quarter) \
2737 copy_v2_fl2(pos, (quarter % 2 == 0) ? -(a) : (a), (quarter < 2) ? -(b) : (b)); \
2738 GPU_vertbuf_attr_set(vbo, attr_id.pos, v++, pos);
2740 GPUBatch *DRW_cache_bone_dof_sphere_get(void)
2742 if (!SHC.drw_bone_dof_sphere) {
2743 int i, j, q, n = ARRAY_SIZE(staticSine);
2744 float x, z, px, pz, pos[2];
2746 /* Position Only 3D format */
2747 static GPUVertFormat format = { 0 };
2748 static struct { uint pos; } attr_id;
2749 if (format.attr_len == 0) {
2750 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2753 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2754 GPU_vertbuf_data_alloc(vbo, n * n * 6 * 4);
2757 for (q = 0; q < 4; ++q) {
2759 for (i = 1; i < n; ++i) {
2762 for (j = 1; j <= (n - i); ++j) {
2766 set_vert(px, pz, q);
2775 set_vert(px, pz, q);
2783 /* TODO alloc right count from the begining. */
2784 GPU_vertbuf_data_resize(vbo, v);
2786 SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2788 return SHC.drw_bone_dof_sphere;
2791 GPUBatch *DRW_cache_bone_dof_lines_get(void)
2793 if (!SHC.drw_bone_dof_lines) {
2794 int i, n = ARRAY_SIZE(staticSine);
2797 /* Position Only 3D format */
2798 static GPUVertFormat format = { 0 };
2799 static struct { uint pos; } attr_id;
2800 if (format.attr_len == 0) {
2801 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
2804 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2805 GPU_vertbuf_data_alloc(vbo, n * 4);
2808 for (i = 0; i < n * 4; i++) {
2809 float a = (1.0f - (i / (float)(n * 4))) * 2.0f * M_PI;
2815 SHC.drw_bone_dof_lines = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
2817 return SHC.drw_bone_dof_lines;
2824 /* -------------------------------------------------------------------- */
2830 * We could make these more generic functions.
2831 * although filling 1d lines is not common.
2833 * \note Use x coordinate to identify the vertex the vertex shader take care to place it appropriately.
2836 static const float camera_coords_frame_bounds[5] = {
2837 0.0f, /* center point */
2844 static const float camera_coords_frame_tri[3] = {
2845 5.0f, /* tria + X */
2846 6.0f, /* tria - X */
2847 7.0f, /* tria + Y */
2850 /** Draw a loop of lines. */
2851 static void camera_fill_lines_loop_fl_v1(
2852 GPUVertBufRaw *pos_step,
2853 const float *coords, const uint coords_len)
2855 for (uint i = 0, i_prev = coords_len - 1; i < coords_len; i_prev = i++) {
2856 *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i_prev];
2857 *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
2861 /** Fan lines out from the first vertex. */
2862 static void camera_fill_lines_fan_fl_v1(
2863 GPUVertBufRaw *pos_step,
2864 const float *coords, const uint coords_len)
2866 for (uint i = 1; i < coords_len; i++) {
2867 *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[0];
2868 *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
2872 /** Simply fill the array. */
2873 static void camera_fill_array_fl_v1(
2874 GPUVertBufRaw *pos_step,
2875 const float *coords, const uint coords_len)
2877 for (uint i = 0; i < coords_len; i++) {
2878 *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
2883 GPUBatch *DRW_cache_camera_get(void)
2885 if (!SHC.drw_camera) {
2886 static GPUVertFormat format = { 0 };
2887 static struct { uint pos; } attr_id;
2888 if (format.attr_len == 0) {
2889 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
2893 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2894 const int vbo_len_capacity = 22;
2895 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
2896 GPUVertBufRaw pos_step;
2897 GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
2899 /* camera cone (from center to frame) */
2900 camera_fill_lines_fan_fl_v1(&pos_step, camera_coords_frame_bounds, ARRAY_SIZE(camera_coords_frame_bounds));
2902 /* camera frame (skip center) */
2903 camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
2905 /* camera triangle (above the frame) */
2906 camera_fill_lines_loop_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
2908 BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
2910 SHC.drw_camera = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2912 return SHC.drw_camera;
2915 GPUBatch *DRW_cache_camera_frame_get(void)
2917 if (!SHC.drw_camera_frame) {
2919 static GPUVertFormat format = { 0 };
2920 static struct { uint pos; } attr_id;
2921 if (format.attr_len == 0) {
2922 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
2926 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2927 const int vbo_len_capacity = 8;
2928 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
2929 GPUVertBufRaw pos_step;
2930 GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
2932 /* camera frame (skip center) */
2933 camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
2935 BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
2937 SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
2939 return SHC.drw_camera_frame;
2942 GPUBatch *DRW_cache_camera_tria_get(void)
2944 if (!SHC.drw_camera_tria) {
2945 static GPUVertFormat format = { 0 };
2946 static struct { uint pos; } attr_id;
2947 if (format.attr_len == 0) {
2948 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
2952 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2953 const int vbo_len_capacity = 3;
2954 GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
2955 GPUVertBufRaw pos_step;
2956 GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
2958 /* camera triangle (above the frame) */
2959 camera_fill_array_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
2961 BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
2963 SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2965 return SHC.drw_camera_tria;
2970 /* -------------------------------------------------------------------- */
2972 /** \name Object Mode Helpers
2976 GPUBatch *DRW_cache_single_vert_get(void)
2978 if (!SHC.drw_single_vertice) {
2979 float v1[3] = {0.0f, 0.0f, 0.0f};
2981 /* Position Only 3D format */
2982 static GPUVertFormat format = { 0 };
2983 static struct { uint pos; } attr_id;
2984 if (format.attr_len == 0) {
2985 attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
2988 GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
2989 GPU_vertbuf_data_alloc(vbo, 1);
2991 GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
2993 SHC.drw_single_vertice = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
2995 return SHC.drw_single_vertice;
3000 /* -------------------------------------------------------------------- */
3005 GPUBatch *DRW_cache_mesh_all_verts_get(Object *ob)
3007 BLI_assert(ob->type == OB_MESH);
3008 return DRW_mesh_batch_cache_get_all_verts(ob->data);
3011 GPUBatch *DRW_cache_mesh_all_edges_get(Object *ob)
3013 BLI_assert(ob->type == OB_MESH);
3014 return DRW_mesh_batch_cache_get_all_edges(ob->data);
3017 GPUBatch *DRW_cache_mesh_loose_edges_get(Object *ob)
3019 BLI_assert(ob->type == OB_MESH);
3020 return DRW_mesh_batch_cache_get_loose_edges(ob->data);
3023 GPUBatch *DRW_cache_mesh_edge_detection_get(Object *ob, bool *r_is_manifold)
3025 BLI_assert(ob->type == OB_MESH);
3026 return DRW_mesh_batch_cache_get_edge_detection(ob->data, r_is_manifold);
3029 GPUBatch *DRW_cache_mesh_surface_get(Object *ob)
3031 BLI_assert(ob->type == OB_MESH);
3032 return DRW_mesh_batch_cache_get_surface(ob->data);
3035 GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
3037 BLI_assert(ob->type == OB_MESH);
3038 return DRW_mesh_batch_cache_get_surface_edges(ob->data);
3041 /* Return list of batches with length equal to max(1, totcol). */
3042 GPUBatch **DRW_cache_mesh_surface_shaded_get(
3043 Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
3044 char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
3046 BLI_assert(ob->type == OB_MESH);
3047 return DRW_mesh_batch_cache_get_surface_shaded(
3048 ob->data, gpumat_array, gpumat_array_len,
3049 auto_layer_names, auto_layer_is_srgb, auto_layer_count);
3052 /* Return list of batches with length equal to max(1, totcol). */
3053 GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
3055 BLI_assert(ob->type == OB_MESH);
3056 return DRW_mesh_batch_cache_get_surface_texpaint(ob->data);
3059 GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
3061 BLI_assert(ob->type == OB_MESH);
3062 return DRW_mesh_batch_cache_get_surface_texpaint_single(ob->data);
3065 GPUBatch *DRW_cache_mesh_surface_vertpaint_get(Object *ob)
3067 BLI_assert(ob->type == OB_MESH);
3068 return DRW_mesh_batch_cache_get_surface_vertpaint(ob->data);
3071 GPUBatch *DRW_cache_mesh_surface_weights_get(Object *ob)
3073 BLI_assert(ob->type == OB_MESH);
3074 return DRW_mesh_batch_cache_get_surface_weights(ob->data);
3077 GPUBatch *DRW_cache_mesh_face_wireframe_get(Object *ob)
3079 BLI_assert(ob->type == OB_MESH);
3080 return DRW_mesh_batch_cache_get_wireframes_face(ob->data);
3083 void DRW_cache_mesh_sculpt_coords_ensure(Object *ob)
3085 BLI_assert(ob->type == OB_MESH);
3087 Mesh *me = ob->data;
3088 DRW_mesh_cache_sculpt_coords_ensure(me);
3093 /* -------------------------------------------------------------------- */
3098 GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
3100 BLI_assert(ob->type == OB_CURVE);
3102 struct Curve *cu = ob->data;
3103 return DRW_curve_batch_cache_get_wire_edge(cu);
3106 GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
3108 BLI_assert(ob->type == OB_CURVE);
3110 struct Curve *cu = ob->data;
3111 return DRW_curve_batch_cache_get_normal_edge(cu);
3114 GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
3116 BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
3118 struct Curve *cu = ob->data;
3119 return DRW_curve_batch_cache_get_edit_edges(cu);
3122 GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob, bool handles)
3124 BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
3126 struct Curve *cu = ob->data;
3127 return DRW_curve_batch_cache_get_edit_verts(cu, handles);
3130 GPUBatch *DRW_cache_curve_surface_get(Object *ob)
3132 BLI_assert(ob->type == OB_CURVE);
3134 struct Curve *cu = ob->data;
3135 struct Mesh *mesh_eval = ob->runtime.mesh_eval;
3136 if (mesh_eval != NULL) {
3137 return DRW_mesh_batch_cache_get_surface(mesh_eval);
3140 return DRW_curve_batch_cache_get_triangles_with_normals(cu);
3144 GPUBatch *DRW_cache_curve_loose_edges_get(Object *ob)
3146 BLI_assert(ob->type == OB_CURVE);
3148 struct Curve *cu = ob->data;
3149 struct Mesh *mesh_eval = ob->runtime.mesh_eval;
3150 if (mesh_eval != NULL) {
3151 return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
3160 GPUBatch *DRW_cache_curve_face_wireframe_get(Object *ob)
3162 BLI_assert(ob->type == OB_CURVE);
3164 struct Curve *cu = ob->data;
3165 struct Mesh *mesh_eval = ob->runtime.mesh_eval;
3166 if (mesh_eval != NULL) {
3167 return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
3170 return DRW_curve_batch_cache_get_wireframes_face(cu);
3174 /* Return list of batches */
3175 GPUBatch **DRW_cache_curve_surface_shaded_get(
3176 Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
3178 BLI_assert(ob->type == OB_CURVE);
3180 struct Curve *cu = ob->data;
3181 struct Mesh *mesh_eval = ob->runtime.mesh_eval;
3182 if (mesh_eval != NULL) {
3183 return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
3186 return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);