2 * ***** BEGIN GPL LICENSE BLOCK *****
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * The Original Code is Copyright (C) 2005 Blender Foundation.
19 * All rights reserved.
21 * The Original Code is: all of this file.
23 * Contributor(s): Brecht Van Lommel.
25 * ***** END GPL LICENSE BLOCK *****
28 /** \file blender/gpu/intern/gpu_buffers.c
39 #include "MEM_guardedalloc.h"
41 #include "BLI_bitmap.h"
43 #include "BLI_utildefines.h"
44 #include "BLI_ghash.h"
45 #include "BLI_threads.h"
47 #include "DNA_meshdata_types.h"
48 #include "DNA_material_types.h"
51 #include "BKE_DerivedMesh.h"
52 #include "BKE_paint.h"
53 #include "BKE_subsurf.h"
55 #include "DNA_userdef_types.h"
57 #include "GPU_buffers.h"
63 GPU_BUFFER_VERTEX_STATE = 1,
64 GPU_BUFFER_NORMAL_STATE = 2,
65 GPU_BUFFER_TEXCOORD_STATE = 4,
66 GPU_BUFFER_COLOR_STATE = 8,
67 GPU_BUFFER_ELEMENT_STATE = 16,
70 #define MAX_GPU_ATTRIB_DATA 32
72 /* material number is an 16-bit signed short and the range (assume material number is non-negative) */
73 #define MAX_MATERIALS MAXMAT
75 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
76 static int useVBOs = -1;
77 static GPUBufferState GLStates = 0;
78 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
80 static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
82 /* stores recently-deleted buffers so that new buffers won't have to
83 * be recreated as often
85 * only one instance of this pool is created, stored in
88 * note that the number of buffers in the pool is usually limited to
89 * MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
90 * when a GPUBuffer is released outside the main thread; due to OpenGL
91 * restrictions it cannot be immediately released
93 typedef struct GPUBufferPool {
94 /* number of allocated buffers stored */
96 /* actual allocated length of the array */
100 #define MAX_FREE_GPU_BUFFERS 8
102 /* create a new GPUBufferPool */
103 static GPUBufferPool *gpu_buffer_pool_new(void)
107 /* enable VBOs if supported */
109 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
111 pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer_Pool");
113 pool->maxsize = MAX_FREE_GPU_BUFFERS;
114 pool->buffers = MEM_callocN(sizeof(GPUBuffer *) * pool->maxsize,
115 "GPUBuffer.buffers");
120 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
121 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
125 if (!pool || index < 0 || index >= pool->totbuf)
128 /* shift entries down, overwriting the buffer at `index' */
129 for (i = index; i < pool->totbuf - 1; i++)
130 pool->buffers[i] = pool->buffers[i + 1];
132 /* clear the last entry */
133 if (pool->totbuf > 0)
134 pool->buffers[pool->totbuf - 1] = NULL;
139 /* delete the last entry in the pool */
140 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
144 if (pool->totbuf <= 0)
147 /* get the last entry */
148 if (!(last = pool->buffers[pool->totbuf - 1]))
151 /* delete the buffer's data */
153 glDeleteBuffersARB(1, &last->id);
155 MEM_freeN(last->pointer);
157 /* delete the buffer and remove from pool */
160 pool->buffers[pool->totbuf] = NULL;
163 /* free a GPUBufferPool; also frees the data in the pool's
165 static void gpu_buffer_pool_free(GPUBufferPool *pool)
171 gpu_buffer_pool_delete_last(pool);
173 MEM_freeN(pool->buffers);
177 static GPUBufferPool *gpu_buffer_pool = NULL;
178 static GPUBufferPool *gpu_get_global_buffer_pool(void)
180 /* initialize the pool */
181 if (!gpu_buffer_pool)
182 gpu_buffer_pool = gpu_buffer_pool_new();
184 return gpu_buffer_pool;
187 void GPU_global_buffer_pool_free(void)
189 gpu_buffer_pool_free(gpu_buffer_pool);
190 gpu_buffer_pool = NULL;
193 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
194 * pool if possible, otherwise creates a new one
196 * Thread-unsafe version for internal usage only.
198 static GPUBuffer *gpu_buffer_alloc_intern(int size)
202 int i, bufsize, bestfit = -1;
204 /* bad case, leads to leak of buf since buf->pointer will allocate
205 * NULL, leading to return without cleanup. In any case better detect early
210 pool = gpu_get_global_buffer_pool();
212 /* not sure if this buffer pool code has been profiled much,
213 * seems to me that the graphics driver and system memory
214 * management might do this stuff anyway. --nicholas
217 /* check the global buffer pool for a recently-deleted buffer
218 * that is at least as big as the request, but not more than
220 for (i = 0; i < pool->totbuf; i++) {
221 bufsize = pool->buffers[i]->size;
223 /* check for an exact size match */
224 if (bufsize == size) {
228 /* smaller buffers won't fit data and buffers at least
229 * twice as big are a waste of memory */
230 else if (bufsize > size && size > (bufsize / 2)) {
231 /* is it closer to the required size than the
232 * last appropriate buffer found. try to save
234 if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
240 /* if an acceptable buffer was found in the pool, remove it
241 * from the pool and return it */
243 buf = pool->buffers[bestfit];
244 gpu_buffer_pool_remove_index(pool, bestfit);
248 /* no acceptable buffer found in the pool, create a new one */
249 buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
253 /* create a new VBO and initialize it to the requested
255 glGenBuffersARB(1, &buf->id);
256 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
257 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
258 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
261 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
263 /* purpose of this seems to be dealing with
264 * out-of-memory errors? looks a bit iffy to me
265 * though, at least on Linux I expect malloc() would
266 * just overcommit. --nicholas */
267 while (!buf->pointer && pool->totbuf > 0) {
268 gpu_buffer_pool_delete_last(pool);
269 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
278 /* Same as above, but safe for threading. */
279 GPUBuffer *GPU_buffer_alloc(int size)
284 /* Early out, no lock needed in this case. */
288 BLI_mutex_lock(&buffer_mutex);
289 buffer = gpu_buffer_alloc_intern(size);
290 BLI_mutex_unlock(&buffer_mutex);
295 /* release a GPUBuffer; does not free the actual buffer or its data,
296 * but rather moves it to the pool of recently-freed buffers for
299 * Thread-unsafe version for internal usage only.
301 static void gpu_buffer_free_intern(GPUBuffer *buffer)
309 pool = gpu_get_global_buffer_pool();
311 /* free the last used buffer in the queue if no more space, but only
312 * if we are in the main thread. for e.g. rendering or baking it can
313 * happen that we are in other thread and can't call OpenGL, in that
314 * case cleanup will be done GPU_buffer_pool_free_unused */
315 if (BLI_thread_is_main()) {
316 /* in main thread, safe to decrease size of pool back
317 * down to MAX_FREE_GPU_BUFFERS */
318 while (pool->totbuf >= MAX_FREE_GPU_BUFFERS)
319 gpu_buffer_pool_delete_last(pool);
322 /* outside of main thread, can't safely delete the
323 * buffer, so increase pool size */
324 if (pool->maxsize == pool->totbuf) {
325 pool->maxsize += MAX_FREE_GPU_BUFFERS;
326 pool->buffers = MEM_reallocN(pool->buffers,
327 sizeof(GPUBuffer *) * pool->maxsize);
331 /* shift pool entries up by one */
332 for (i = pool->totbuf; i > 0; i--)
333 pool->buffers[i] = pool->buffers[i - 1];
335 /* insert the buffer into the beginning of the pool */
336 pool->buffers[0] = buffer;
340 /* Same as above, but safe for threading. */
341 void GPU_buffer_free(GPUBuffer *buffer)
344 /* Early output, no need to lock in this case, */
348 BLI_mutex_lock(&buffer_mutex);
349 gpu_buffer_free_intern(buffer);
350 BLI_mutex_unlock(&buffer_mutex);
353 typedef struct GPUVertPointLink {
354 struct GPUVertPointLink *next;
355 /* -1 means uninitialized */
359 /* add a new point to the list of points related to a particular
361 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
363 GPUVertPointLink *lnk;
365 lnk = &gdo->vert_points[vert_index];
367 /* if first link is in use, add a new link at the end */
368 if (lnk->point_index != -1) {
370 for (; lnk->next; lnk = lnk->next) ;
372 /* add a new link from the pool */
373 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
374 gdo->vert_points_usage++;
377 lnk->point_index = point_index;
380 /* update the vert_points and triangle_to_mface fields with a new
382 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
383 int base_point_index,
385 int v1, int v2, int v3)
387 int i, v[3] = {v1, v2, v3};
388 for (i = 0; i < 3; i++)
389 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
390 gdo->triangle_to_mface[base_point_index / 3] = face_index;
393 /* for each vertex, build a list of points related to it; these lists
394 * are stored in an array sized to the number of vertices */
395 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
397 GPUBufferMaterial *mat;
398 int i, mat_orig_to_new[MAX_MATERIALS];
400 /* allocate the array and space for links */
401 gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
402 "GPUDrawObject.vert_points");
403 gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
404 "GPUDrawObject.vert_points_mem");
405 gdo->vert_points_usage = 0;
407 /* build a map from the original material indices to the new
408 * GPUBufferMaterial indices */
409 for (i = 0; i < gdo->totmaterial; i++)
410 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
412 /* -1 indicates the link is not yet used */
413 for (i = 0; i < gdo->totvert; i++)
414 gdo->vert_points[i].point_index = -1;
416 for (i = 0; i < totface; i++, f++) {
417 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
420 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
421 i, f->v1, f->v2, f->v3);
424 /* add second triangle for quads */
426 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
427 i, f->v3, f->v4, f->v1);
432 /* map any unused vertices to loose points */
433 for (i = 0; i < gdo->totvert; i++) {
434 if (gdo->vert_points[i].point_index == -1) {
435 gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
436 gdo->tot_loose_point++;
441 /* see GPUDrawObject's structure definition for a description of the
442 * data being initialized here */
443 GPUDrawObject *GPU_drawobject_new(DerivedMesh *dm)
447 int points_per_mat[MAX_MATERIALS];
448 int i, curmat, curpoint, totface;
450 mface = dm->getTessFaceArray(dm);
451 totface = dm->getNumTessFaces(dm);
453 /* get the number of points used by each material, treating
454 * each quad as two triangles */
455 memset(points_per_mat, 0, sizeof(int) * MAX_MATERIALS);
456 for (i = 0; i < totface; i++)
457 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
459 /* create the GPUDrawObject */
460 gdo = MEM_callocN(sizeof(GPUDrawObject), "GPUDrawObject");
461 gdo->totvert = dm->getNumVerts(dm);
462 gdo->totedge = dm->getNumEdges(dm);
464 /* count the number of materials used by this DerivedMesh */
465 for (i = 0; i < MAX_MATERIALS; i++) {
466 if (points_per_mat[i] > 0)
470 /* allocate an array of materials used by this DerivedMesh */
471 gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
472 "GPUDrawObject.materials");
474 /* initialize the materials array */
475 for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
476 if (points_per_mat[i] > 0) {
477 gdo->materials[curmat].start = curpoint;
478 gdo->materials[curmat].totpoint = 0;
479 gdo->materials[curmat].mat_nr = i;
481 curpoint += points_per_mat[i];
486 /* store total number of points used for triangles */
487 gdo->tot_triangle_point = curpoint;
489 gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
490 "GPUDrawObject.triangle_to_mface");
492 gpu_drawobject_init_vert_points(gdo, mface, totface);
497 void GPU_drawobject_free(DerivedMesh *dm)
501 if (!dm || !(gdo = dm->drawObject))
504 MEM_freeN(gdo->materials);
505 MEM_freeN(gdo->triangle_to_mface);
506 MEM_freeN(gdo->vert_points);
507 MEM_freeN(gdo->vert_points_mem);
508 GPU_buffer_free(gdo->points);
509 GPU_buffer_free(gdo->normals);
510 GPU_buffer_free(gdo->uv);
511 GPU_buffer_free(gdo->colors);
512 GPU_buffer_free(gdo->edges);
513 GPU_buffer_free(gdo->uvedges);
516 dm->drawObject = NULL;
519 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
520 int *mat_orig_to_new, void *user_data);
522 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
523 int vector_size, int size, GLenum target,
524 void *user, GPUBufferCopyFunc copy_f)
529 int mat_orig_to_new[MAX_MATERIALS];
530 int *cur_index_per_mat;
535 pool = gpu_get_global_buffer_pool();
537 BLI_mutex_lock(&buffer_mutex);
539 /* alloc a GPUBuffer; fall back to legacy mode on failure */
540 if (!(buffer = gpu_buffer_alloc_intern(size)))
541 dm->drawObject->legacy = 1;
543 /* nothing to do for legacy mode */
544 if (dm->drawObject->legacy) {
545 BLI_mutex_unlock(&buffer_mutex);
549 cur_index_per_mat = MEM_mallocN(sizeof(int) * object->totmaterial,
550 "GPU_buffer_setup.cur_index_per_mat");
551 for (i = 0; i < object->totmaterial; i++) {
552 /* for each material, the current index to copy data to */
553 cur_index_per_mat[i] = object->materials[i].start * vector_size;
555 /* map from original material index to new
556 * GPUBufferMaterial index */
557 mat_orig_to_new[object->materials[i].mat_nr] = i;
564 /* bind the buffer and discard previous data,
565 * avoids stalling gpu */
566 glBindBufferARB(target, buffer->id);
567 glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
569 /* attempt to map the buffer */
570 if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
571 /* failed to map the buffer; delete it */
572 gpu_buffer_free_intern(buffer);
573 gpu_buffer_pool_delete_last(pool);
576 /* try freeing an entry from the pool
577 * and reallocating the buffer */
578 if (pool->totbuf > 0) {
579 gpu_buffer_pool_delete_last(pool);
580 buffer = gpu_buffer_alloc_intern(size);
583 /* allocation still failed; fall back
586 dm->drawObject->legacy = 1;
595 /* check legacy fallback didn't happen */
596 if (dm->drawObject->legacy == 0) {
598 /* attempt to upload the data to the VBO */
599 while (uploaded == GL_FALSE) {
600 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
601 /* glUnmapBuffer returns GL_FALSE if
602 * the data store is corrupted; retry
604 uploaded = glUnmapBufferARB(target);
607 glBindBufferARB(target, 0);
610 /* VBO not supported, use vertex array fallback */
611 if (buffer->pointer) {
612 varray = buffer->pointer;
613 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
616 dm->drawObject->legacy = 1;
620 MEM_freeN(cur_index_per_mat);
622 BLI_mutex_unlock(&buffer_mutex);
627 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
631 int i, j, start, totface;
633 mvert = dm->getVertArray(dm);
634 f = dm->getTessFaceArray(dm);
636 totface = dm->getNumTessFaces(dm);
637 for (i = 0; i < totface; i++, f++) {
638 start = index[mat_orig_to_new[f->mat_nr]];
641 copy_v3_v3(&varray[start], mvert[f->v1].co);
642 copy_v3_v3(&varray[start + 3], mvert[f->v2].co);
643 copy_v3_v3(&varray[start + 6], mvert[f->v3].co);
644 index[mat_orig_to_new[f->mat_nr]] += 9;
648 copy_v3_v3(&varray[start + 9], mvert[f->v3].co);
649 copy_v3_v3(&varray[start + 12], mvert[f->v4].co);
650 copy_v3_v3(&varray[start + 15], mvert[f->v1].co);
651 index[mat_orig_to_new[f->mat_nr]] += 9;
655 /* copy loose points */
656 j = dm->drawObject->tot_triangle_point * 3;
657 for (i = 0; i < dm->drawObject->totvert; i++) {
658 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
659 copy_v3_v3(&varray[j], mvert[i].co);
665 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
671 float *nors = dm->getTessFaceDataArray(dm, CD_NORMAL);
672 MVert *mvert = dm->getVertArray(dm);
673 MFace *f = dm->getTessFaceArray(dm);
675 totface = dm->getNumTessFaces(dm);
676 for (i = 0; i < totface; i++, f++) {
677 const int smoothnormal = (f->flag & ME_SMOOTH);
679 start = index[mat_orig_to_new[f->mat_nr]];
680 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
683 /* copy vertex normal */
684 normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
685 normal_short_to_float_v3(&varray[start + 3], mvert[f->v2].no);
686 normal_short_to_float_v3(&varray[start + 6], mvert[f->v3].no);
689 normal_short_to_float_v3(&varray[start + 9], mvert[f->v3].no);
690 normal_short_to_float_v3(&varray[start + 12], mvert[f->v4].no);
691 normal_short_to_float_v3(&varray[start + 15], mvert[f->v1].no);
695 /* copy cached face normal */
696 copy_v3_v3(&varray[start], &nors[i * 3]);
697 copy_v3_v3(&varray[start + 3], &nors[i * 3]);
698 copy_v3_v3(&varray[start + 6], &nors[i * 3]);
701 copy_v3_v3(&varray[start + 9], &nors[i * 3]);
702 copy_v3_v3(&varray[start + 12], &nors[i * 3]);
703 copy_v3_v3(&varray[start + 15], &nors[i * 3]);
707 /* calculate face normal */
709 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
711 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
713 copy_v3_v3(&varray[start], f_no);
714 copy_v3_v3(&varray[start + 3], f_no);
715 copy_v3_v3(&varray[start + 6], f_no);
718 copy_v3_v3(&varray[start + 9], f_no);
719 copy_v3_v3(&varray[start + 12], f_no);
720 copy_v3_v3(&varray[start + 15], f_no);
726 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
734 if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
736 f = dm->getTessFaceArray(dm);
738 totface = dm->getNumTessFaces(dm);
739 for (i = 0; i < totface; i++, f++) {
740 start = index[mat_orig_to_new[f->mat_nr]];
743 copy_v2_v2(&varray[start], mtface[i].uv[0]);
744 copy_v2_v2(&varray[start + 2], mtface[i].uv[1]);
745 copy_v2_v2(&varray[start + 4], mtface[i].uv[2]);
746 index[mat_orig_to_new[f->mat_nr]] += 6;
750 copy_v2_v2(&varray[start + 6], mtface[i].uv[2]);
751 copy_v2_v2(&varray[start + 8], mtface[i].uv[3]);
752 copy_v2_v2(&varray[start + 10], mtface[i].uv[0]);
753 index[mat_orig_to_new[f->mat_nr]] += 6;
758 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
765 /* treat varray_ as an array of MCol, four MCol's per face */
766 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
769 unsigned char *varray = (unsigned char *)varray_;
770 unsigned char *mcol = (unsigned char *)user;
771 MFace *f = dm->getTessFaceArray(dm);
773 totface = dm->getNumTessFaces(dm);
774 for (i = 0; i < totface; i++, f++) {
775 int start = index[mat_orig_to_new[f->mat_nr]];
778 copy_mcol_uc3(&varray[start], &mcol[i * 16]);
779 copy_mcol_uc3(&varray[start + 3], &mcol[i * 16 + 4]);
780 copy_mcol_uc3(&varray[start + 6], &mcol[i * 16 + 8]);
781 index[mat_orig_to_new[f->mat_nr]] += 9;
785 copy_mcol_uc3(&varray[start + 9], &mcol[i * 16 + 8]);
786 copy_mcol_uc3(&varray[start + 12], &mcol[i * 16 + 12]);
787 copy_mcol_uc3(&varray[start + 15], &mcol[i * 16]);
788 index[mat_orig_to_new[f->mat_nr]] += 9;
793 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
796 unsigned int *varray = (unsigned int *)varray_;
799 medge = dm->getEdgeArray(dm);
800 totedge = dm->getNumEdges(dm);
802 for (i = 0; i < totedge; i++, medge++) {
803 varray[i * 2] = dm->drawObject->vert_points[medge->v1].point_index;
804 varray[i * 2 + 1] = dm->drawObject->vert_points[medge->v2].point_index;
808 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
810 MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
816 for (i = 0; i < dm->numTessFaceData; i++, tf++) {
818 dm->getTessFace(dm, i, &mf);
820 copy_v2_v2(&varray[j], tf->uv[0]);
821 copy_v2_v2(&varray[j + 2], tf->uv[1]);
823 copy_v2_v2(&varray[j + 4], tf->uv[1]);
824 copy_v2_v2(&varray[j + 6], tf->uv[2]);
827 copy_v2_v2(&varray[j + 8], tf->uv[2]);
828 copy_v2_v2(&varray[j + 10], tf->uv[0]);
832 copy_v2_v2(&varray[j + 8], tf->uv[2]);
833 copy_v2_v2(&varray[j + 10], tf->uv[3]);
835 copy_v2_v2(&varray[j + 12], tf->uv[3]);
836 copy_v2_v2(&varray[j + 14], tf->uv[0]);
843 GPU_BUFFER_VERTEX = 0,
852 GPUBufferCopyFunc copy;
853 GLenum gl_buffer_type;
855 } GPUBufferTypeSettings;
857 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
858 {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
859 {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
860 {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
861 {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
862 {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
863 {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
866 /* get the GPUDrawObject buffer associated with a type */
867 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
870 case GPU_BUFFER_VERTEX:
872 case GPU_BUFFER_NORMAL:
873 return &gdo->normals;
874 case GPU_BUFFER_COLOR:
878 case GPU_BUFFER_EDGE:
880 case GPU_BUFFER_UVEDGE:
881 return &gdo->uvedges;
887 /* get the amount of space to allocate for a buffer of a particular type */
888 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
891 case GPU_BUFFER_VERTEX:
892 return sizeof(float) * 3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
893 case GPU_BUFFER_NORMAL:
894 return sizeof(float) * 3 * dm->drawObject->tot_triangle_point;
895 case GPU_BUFFER_COLOR:
896 return sizeof(char) * 3 * dm->drawObject->tot_triangle_point;
898 return sizeof(float) * 2 * dm->drawObject->tot_triangle_point;
899 case GPU_BUFFER_EDGE:
900 return sizeof(int) * 2 * dm->drawObject->totedge;
901 case GPU_BUFFER_UVEDGE:
902 /* each face gets 3 points, 3 edges per triangle, and
903 * each edge has its own, non-shared coords, so each
904 * tri corner needs minimum of 4 floats, quads used
905 * less so here we can over allocate and assume all
907 return sizeof(float) * 4 * dm->drawObject->tot_triangle_point;
913 /* call gpu_buffer_setup with settings for a particular type of buffer */
914 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
916 const GPUBufferTypeSettings *ts;
917 void *user_data = NULL;
920 ts = &gpu_buffer_type_settings[type];
922 /* special handling for MCol and UV buffers */
923 if (type == GPU_BUFFER_COLOR) {
924 if (!(user_data = DM_get_tessface_data_layer(dm, dm->drawObject->colType)))
927 else if (type == GPU_BUFFER_UV) {
928 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
932 buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
933 gpu_buffer_size_from_type(dm, type),
934 ts->gl_buffer_type, user_data, ts->copy);
939 /* get the buffer of `type', initializing the GPUDrawObject and
940 * buffer if needed */
941 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
946 dm->drawObject = GPU_drawobject_new(dm);
948 buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
950 *buf = gpu_buffer_setup_type(dm, type);
955 void GPU_vertex_setup(DerivedMesh *dm)
957 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
960 glEnableClientState(GL_VERTEX_ARRAY);
962 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
963 glVertexPointer(3, GL_FLOAT, 0, 0);
966 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
969 GLStates |= GPU_BUFFER_VERTEX_STATE;
972 void GPU_normal_setup(DerivedMesh *dm)
974 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
977 glEnableClientState(GL_NORMAL_ARRAY);
979 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
980 glNormalPointer(GL_FLOAT, 0, 0);
983 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
986 GLStates |= GPU_BUFFER_NORMAL_STATE;
989 void GPU_uv_setup(DerivedMesh *dm)
991 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
994 glEnableClientState(GL_TEXTURE_COORD_ARRAY);
996 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
997 glTexCoordPointer(2, GL_FLOAT, 0, 0);
1000 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
1003 GLStates |= GPU_BUFFER_TEXCOORD_STATE;
1006 void GPU_color_setup(DerivedMesh *dm, int colType)
1008 if (!dm->drawObject) {
1009 /* XXX Not really nice, but we need a valid gpu draw object to set the colType...
1010 * Else we would have to add a new param to gpu_buffer_setup_common. */
1011 dm->drawObject = GPU_drawobject_new(dm);
1012 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1013 dm->drawObject->colType = colType;
1015 /* In paint mode, dm may stay the same during stroke, however we still want to update colors!
1016 * Also check in case we changed color type (i.e. which MCol cdlayer we use). */
1017 else if ((dm->dirty & DM_DIRTY_MCOL_UPDATE_DRAW) || (colType != dm->drawObject->colType)) {
1018 GPUBuffer **buf = gpu_drawobject_buffer_from_type(dm->drawObject, GPU_BUFFER_COLOR);
1019 /* XXX Freeing this buffer is a bit stupid, as geometry has not changed, size should remain the same.
1020 * Not sure though it would be worth defining a sort of gpu_buffer_update func - nor whether
1021 * it is even possible ! */
1022 GPU_buffer_free(*buf);
1024 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1025 dm->drawObject->colType = colType;
1028 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
1031 glEnableClientState(GL_COLOR_ARRAY);
1033 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1034 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1037 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1040 GLStates |= GPU_BUFFER_COLOR_STATE;
1043 void GPU_edge_setup(DerivedMesh *dm)
1045 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1048 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1051 glEnableClientState(GL_VERTEX_ARRAY);
1053 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1054 glVertexPointer(3, GL_FLOAT, 0, 0);
1057 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1060 GLStates |= GPU_BUFFER_VERTEX_STATE;
1063 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1065 GLStates |= GPU_BUFFER_ELEMENT_STATE;
1068 void GPU_uvedge_setup(DerivedMesh *dm)
1070 if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1073 glEnableClientState(GL_VERTEX_ARRAY);
1075 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1076 glVertexPointer(2, GL_FLOAT, 0, 0);
1079 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1082 GLStates |= GPU_BUFFER_VERTEX_STATE;
1085 static int GPU_typesize(int type)
1089 return sizeof(float);
1092 case GL_UNSIGNED_INT:
1093 return sizeof(unsigned int);
1095 return sizeof(char);
1096 case GL_UNSIGNED_BYTE:
1097 return sizeof(unsigned char);
1103 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1105 int i, elementsize = 0;
1107 for (i = 0; i < numdata; i++) {
1108 int typesize = GPU_typesize(data[i].type);
1110 elementsize += typesize * data[i].size;
1115 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1119 intptr_t offset = 0;
1121 for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1122 if (attribData[i].index != -1) {
1123 glDisableVertexAttribArrayARB(attribData[i].index);
1128 elementsize = GPU_attrib_element_size(data, numdata);
1131 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1132 for (i = 0; i < numdata; i++) {
1133 glEnableVertexAttribArrayARB(data[i].index);
1134 glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1135 GL_FALSE, elementsize, (void *)offset);
1136 offset += data[i].size * GPU_typesize(data[i].type);
1138 attribData[i].index = data[i].index;
1139 attribData[i].size = data[i].size;
1140 attribData[i].type = data[i].type;
1142 attribData[numdata].index = -1;
1145 for (i = 0; i < numdata; i++) {
1146 glEnableVertexAttribArrayARB(data[i].index);
1147 glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1148 GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1149 offset += data[i].size * GPU_typesize(data[i].type);
1155 void GPU_buffer_unbind(void)
1159 if (GLStates & GPU_BUFFER_VERTEX_STATE)
1160 glDisableClientState(GL_VERTEX_ARRAY);
1161 if (GLStates & GPU_BUFFER_NORMAL_STATE)
1162 glDisableClientState(GL_NORMAL_ARRAY);
1163 if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1164 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1165 if (GLStates & GPU_BUFFER_COLOR_STATE)
1166 glDisableClientState(GL_COLOR_ARRAY);
1167 if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1169 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1172 GLStates &= ~(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1173 GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1174 GPU_BUFFER_ELEMENT_STATE);
1176 for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1177 if (attribData[i].index != -1) {
1178 glDisableVertexAttribArrayARB(attribData[i].index);
1185 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1188 void GPU_color_switch(int mode)
1191 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1192 glEnableClientState(GL_COLOR_ARRAY);
1193 GLStates |= GPU_BUFFER_COLOR_STATE;
1196 if (GLStates & GPU_BUFFER_COLOR_STATE)
1197 glDisableClientState(GL_COLOR_ARRAY);
1198 GLStates &= ~GPU_BUFFER_COLOR_STATE;
1202 /* return 1 if drawing should be done using old immediate-mode
1203 * code, 0 otherwise */
1204 int GPU_buffer_legacy(DerivedMesh *dm)
1206 int test = (U.gameflags & USER_DISABLE_VBO);
1210 if (dm->drawObject == 0)
1211 dm->drawObject = GPU_drawobject_new(dm);
1212 return dm->drawObject->legacy;
1215 void *GPU_buffer_lock(GPUBuffer *buffer)
1223 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1224 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1228 return buffer->pointer;
1232 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1240 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1241 /* discard previous data, avoid stalling gpu */
1242 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1243 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1247 return buffer->pointer;
1251 void GPU_buffer_unlock(GPUBuffer *buffer)
1255 /* note: this operation can fail, could return
1256 * an error code from this function? */
1257 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1259 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1263 /* used for drawing edges */
1264 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1266 glDrawElements(mode, count, GL_UNSIGNED_INT,
1268 (void *)(start * sizeof(unsigned int)) :
1269 ((int *)elements->pointer) + start));
1273 /* XXX: the rest of the code in this file is used for optimized PBVH
1274 * drawing and doesn't interact at all with the buffer code above */
1276 /* Return false if VBO is either unavailable or disabled by the user,
1278 static int gpu_vbo_enabled(void)
1280 return (GLEW_ARB_vertex_buffer_object &&
1281 !(U.gameflags & USER_DISABLE_VBO));
1284 /* Convenience struct for building the VBO. */
1289 /* inserting this to align the 'color' field to a four-byte
1290 * boundary; drastically increases viewport performance on my
1291 * drivers (Gallium/Radeon) --nicholasbishop */
1294 unsigned char color[3];
1295 } VertexBufferFormat;
1297 struct GPU_Buffers {
1298 /* opengl buffer handles */
1299 GLuint vert_buf, index_buf;
1302 /* mesh pointers in case buffer allocation fails */
1312 const DMFlagMat *grid_flag_mats;
1313 BLI_bitmap * const *grid_hidden;
1320 unsigned int tot_tri, tot_quad;
1322 /* The PBVH ensures that either all faces in the node are
1323 * smooth-shaded or all faces are flat-shaded */
1326 int show_diffuse_color;
1327 float diffuse_color[4];
1334 static void gpu_colors_enable(VBO_State vbo_state)
1336 glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
1337 glEnable(GL_COLOR_MATERIAL);
1338 if (vbo_state == VBO_ENABLED)
1339 glEnableClientState(GL_COLOR_ARRAY);
1342 static void gpu_colors_disable(VBO_State vbo_state)
1344 glDisable(GL_COLOR_MATERIAL);
1345 if (vbo_state == VBO_ENABLED)
1346 glDisableClientState(GL_COLOR_ARRAY);
1349 static float gpu_color_from_mask(float mask)
1351 return 1.0f - mask * 0.75f;
1354 static void gpu_color_from_mask_copy(float mask, const float diffuse_color[4], unsigned char out[3])
1358 mask_color = gpu_color_from_mask(mask) * 255.0f;
1360 out[0] = diffuse_color[0] * mask_color;
1361 out[1] = diffuse_color[1] * mask_color;
1362 out[2] = diffuse_color[2] * mask_color;
1365 static void gpu_color_from_mask_set(float mask, float diffuse_color[4])
1367 float color = gpu_color_from_mask(mask);
1368 glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1371 static float gpu_color_from_mask_quad(const CCGKey *key,
1372 CCGElem *a, CCGElem *b,
1373 CCGElem *c, CCGElem *d)
1375 return gpu_color_from_mask((*CCG_elem_mask(key, a) +
1376 *CCG_elem_mask(key, b) +
1377 *CCG_elem_mask(key, c) +
1378 *CCG_elem_mask(key, d)) * 0.25f);
1381 static void gpu_color_from_mask_quad_copy(const CCGKey *key,
1382 CCGElem *a, CCGElem *b,
1383 CCGElem *c, CCGElem *d,
1384 const float *diffuse_color,
1385 unsigned char out[3])
1388 gpu_color_from_mask((*CCG_elem_mask(key, a) +
1389 *CCG_elem_mask(key, b) +
1390 *CCG_elem_mask(key, c) +
1391 *CCG_elem_mask(key, d)) * 0.25f) * 255.0f;
1393 out[0] = diffuse_color[0] * mask_color;
1394 out[1] = diffuse_color[1] * mask_color;
1395 out[2] = diffuse_color[2] * mask_color;
1398 static void gpu_color_from_mask_quad_set(const CCGKey *key,
1399 CCGElem *a, CCGElem *b,
1400 CCGElem *c, CCGElem *d,
1401 float diffuse_color[4])
1403 float color = gpu_color_from_mask_quad(key, a, b, c, d);
1404 glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1407 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert,
1408 int *vert_indices, int totvert, const float *vmask,
1409 int (*face_vert_indices)[4], int show_diffuse_color)
1411 VertexBufferFormat *vert_data;
1414 buffers->vmask = vmask;
1415 buffers->show_diffuse_color = show_diffuse_color;
1417 if (buffers->vert_buf) {
1418 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
1419 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1421 if (buffers->show_diffuse_color) {
1422 MFace *f = buffers->mface + buffers->face_indices[0];
1424 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
1427 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1430 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1431 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1432 sizeof(VertexBufferFormat) * totelem,
1433 NULL, GL_STATIC_DRAW_ARB);
1435 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1438 /* Vertex data is shared if smooth-shaded, but separate
1439 * copies are made for flat shading because normals
1440 * shouldn't be shared. */
1441 if (buffers->smooth) {
1442 for (i = 0; i < totvert; ++i) {
1443 MVert *v = mvert + vert_indices[i];
1444 VertexBufferFormat *out = vert_data + i;
1446 copy_v3_v3(out->co, v->co);
1447 memcpy(out->no, v->no, sizeof(short) * 3);
1450 #define UPDATE_VERTEX(face, vertex, index, diffuse_color) \
1452 VertexBufferFormat *out = vert_data + face_vert_indices[face][index]; \
1454 gpu_color_from_mask_copy(vmask[vertex], diffuse_color, out->color); \
1456 rgb_float_to_uchar(out->color, diffuse_color); \
1459 for (i = 0; i < buffers->totface; i++) {
1460 MFace *f = buffers->mface + buffers->face_indices[i];
1462 UPDATE_VERTEX(i, f->v1, 0, diffuse_color);
1463 UPDATE_VERTEX(i, f->v2, 1, diffuse_color);
1464 UPDATE_VERTEX(i, f->v3, 2, diffuse_color);
1466 UPDATE_VERTEX(i, f->v4, 3, diffuse_color);
1468 #undef UPDATE_VERTEX
1471 for (i = 0; i < buffers->totface; ++i) {
1472 const MFace *f = &buffers->mface[buffers->face_indices[i]];
1473 const unsigned int *fv = &f->v1;
1474 const int vi[2][3] = {{0, 1, 2}, {3, 0, 2}};
1480 if (paint_is_face_hidden(f, mvert))
1483 /* Face normal and mask */
1491 fmask = (vmask[fv[0]] +
1494 vmask[fv[3]]) * 0.25f;
1503 fmask = (vmask[fv[0]] +
1505 vmask[fv[2]]) / 3.0f;
1508 normal_float_to_short_v3(no, fno);
1510 for (j = 0; j < (f->v4 ? 2 : 1); j++) {
1511 for (k = 0; k < 3; k++) {
1512 const MVert *v = &mvert[fv[vi[j][k]]];
1513 VertexBufferFormat *out = vert_data;
1515 copy_v3_v3(out->co, v->co);
1516 memcpy(out->no, no, sizeof(short) * 3);
1519 gpu_color_from_mask_copy(fmask, diffuse_color, out->color);
1521 rgb_float_to_uchar(out->color, diffuse_color);
1529 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1532 glDeleteBuffersARB(1, &buffers->vert_buf);
1533 buffers->vert_buf = 0;
1536 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1539 buffers->mvert = mvert;
1542 GPU_Buffers *GPU_build_mesh_buffers(int (*face_vert_indices)[4],
1543 MFace *mface, MVert *mvert,
1547 GPU_Buffers *buffers;
1548 unsigned short *tri_data;
1549 int i, j, k, tottri;
1551 buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1552 buffers->index_type = GL_UNSIGNED_SHORT;
1553 buffers->smooth = mface[face_indices[0]].flag & ME_SMOOTH;
1555 buffers->show_diffuse_color = FALSE;
1557 /* Count the number of visible triangles */
1558 for (i = 0, tottri = 0; i < totface; ++i) {
1559 const MFace *f = &mface[face_indices[i]];
1560 if (!paint_is_face_hidden(f, mvert))
1561 tottri += f->v4 ? 2 : 1;
1564 /* An element index buffer is used for smooth shading, but flat
1565 * shading requires separate vertex normals so an index buffer is
1566 * can't be used there. */
1567 if (gpu_vbo_enabled() && buffers->smooth)
1568 glGenBuffersARB(1, &buffers->index_buf);
1570 if (buffers->index_buf) {
1571 /* Generate index buffer object */
1572 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1573 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1574 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1576 /* Fill the triangle buffer */
1577 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1579 for (i = 0; i < totface; ++i) {
1580 const MFace *f = mface + face_indices[i];
1583 /* Skip hidden faces */
1584 if (paint_is_face_hidden(f, mvert))
1591 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1592 for (k = 0; k < 3; ++k) {
1593 *tri_data = face_vert_indices[i][v[k]];
1601 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1604 glDeleteBuffersARB(1, &buffers->index_buf);
1605 buffers->index_buf = 0;
1608 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1611 if (gpu_vbo_enabled() && (buffers->index_buf || !buffers->smooth))
1612 glGenBuffersARB(1, &buffers->vert_buf);
1614 buffers->tot_tri = tottri;
1616 buffers->mface = mface;
1617 buffers->face_indices = face_indices;
1618 buffers->totface = totface;
1623 void GPU_update_grid_buffers(GPU_Buffers *buffers, CCGElem **grids,
1624 const DMFlagMat *grid_flag_mats, int *grid_indices,
1625 int totgrid, const CCGKey *key, int show_diffuse_color)
1627 VertexBufferFormat *vert_data;
1630 buffers->show_diffuse_color = show_diffuse_color;
1633 if (buffers->vert_buf) {
1634 int totvert = key->grid_area * totgrid;
1635 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1636 const int has_mask = key->has_mask;
1637 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1639 if (buffers->show_diffuse_color) {
1640 const DMFlagMat *flags = &grid_flag_mats[grid_indices[0]];
1642 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
1645 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1647 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1648 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1649 sizeof(VertexBufferFormat) * totvert,
1650 NULL, GL_STATIC_DRAW_ARB);
1651 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1653 for (i = 0; i < totgrid; ++i) {
1654 VertexBufferFormat *vd = vert_data;
1655 CCGElem *grid = grids[grid_indices[i]];
1657 for (y = 0; y < key->grid_size; y++) {
1658 for (x = 0; x < key->grid_size; x++) {
1659 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
1661 copy_v3_v3(vd->co, CCG_elem_co(key, elem));
1663 normal_float_to_short_v3(vd->no, CCG_elem_no(key, elem));
1666 gpu_color_from_mask_copy(*CCG_elem_mask(key, elem),
1667 diffuse_color, vd->color);
1675 /* for flat shading, recalc normals and set the last vertex of
1676 * each quad in the index buffer to have the flat normal as
1677 * that is what opengl will use */
1678 for (j = 0; j < key->grid_size - 1; j++) {
1679 for (k = 0; k < key->grid_size - 1; k++) {
1680 CCGElem *elems[4] = {
1681 CCG_grid_elem(key, grid, k, j + 1),
1682 CCG_grid_elem(key, grid, k + 1, j + 1),
1683 CCG_grid_elem(key, grid, k + 1, j),
1684 CCG_grid_elem(key, grid, k, j)
1689 CCG_elem_co(key, elems[0]),
1690 CCG_elem_co(key, elems[1]),
1691 CCG_elem_co(key, elems[2]),
1692 CCG_elem_co(key, elems[3]));
1694 vd = vert_data + (j + 1) * key->grid_size + (k + 1);
1695 normal_float_to_short_v3(vd->no, fno);
1698 gpu_color_from_mask_quad_copy(key,
1710 vert_data += key->grid_area;
1712 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1715 glDeleteBuffersARB(1, &buffers->vert_buf);
1716 buffers->vert_buf = 0;
1718 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1721 buffers->grids = grids;
1722 buffers->grid_indices = grid_indices;
1723 buffers->totgrid = totgrid;
1724 buffers->grid_flag_mats = grid_flag_mats;
1725 buffers->gridkey = *key;
1727 buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1729 //printf("node updated %p\n", buffers);
1732 /* Returns the number of visible quads in the nodes' grids. */
1733 static int gpu_count_grid_quads(BLI_bitmap **grid_hidden,
1734 int *grid_indices, int totgrid,
1737 int gridarea = (gridsize - 1) * (gridsize - 1);
1738 int i, x, y, totquad;
1740 /* grid hidden layer is present, so have to check each grid for
1743 for (i = 0, totquad = 0; i < totgrid; i++) {
1744 const BLI_bitmap *gh = grid_hidden[grid_indices[i]];
1747 /* grid hidden are present, have to check each element */
1748 for (y = 0; y < gridsize - 1; y++) {
1749 for (x = 0; x < gridsize - 1; x++) {
1750 if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1756 totquad += gridarea;
1762 /* Build the element array buffer of grid indices using either
1763 * unsigned shorts or unsigned ints. */
1764 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_) \
1770 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, \
1771 sizeof(type_) * (tot_quad_) * 4, NULL, \
1772 GL_STATIC_DRAW_ARB); \
1774 /* Fill the quad buffer */ \
1775 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, \
1776 GL_WRITE_ONLY_ARB); \
1778 for (i = 0; i < totgrid; ++i) { \
1779 BLI_bitmap *gh = NULL; \
1781 gh = grid_hidden[(grid_indices)[i]]; \
1783 for (j = 0; j < gridsize - 1; ++j) { \
1784 for (k = 0; k < gridsize - 1; ++k) { \
1785 /* Skip hidden grid face */ \
1787 paint_is_grid_face_hidden(gh, \
1791 *(quad_data++) = offset + j * gridsize + k + 1; \
1792 *(quad_data++) = offset + j * gridsize + k; \
1793 *(quad_data++) = offset + (j + 1) * gridsize + k; \
1794 *(quad_data++) = offset + (j + 1) * gridsize + k + 1; \
1798 offset += gridsize * gridsize; \
1800 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); \
1803 glDeleteBuffersARB(1, &(buffer_)); \
1807 /* end FILL_QUAD_BUFFER */
1809 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1811 static int prev_gridsize = -1;
1812 static GLenum prev_index_type = 0;
1813 static GLuint buffer = 0;
1814 static unsigned prev_totquad;
1816 /* used in the FILL_QUAD_BUFFER macro */
1817 BLI_bitmap * const *grid_hidden = NULL;
1818 int *grid_indices = NULL;
1821 /* VBO is disabled; delete the previous buffer (if it exists) and
1822 * return an invalid handle */
1823 if (!gpu_vbo_enabled()) {
1825 glDeleteBuffersARB(1, &buffer);
1829 /* VBO is already built */
1830 if (buffer && prev_gridsize == gridsize) {
1831 *index_type = prev_index_type;
1832 *totquad = prev_totquad;
1837 glGenBuffersARB(1, &buffer);
1839 *totquad = (gridsize - 1) * (gridsize - 1);
1841 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1843 if (gridsize * gridsize < USHRT_MAX) {
1844 *index_type = GL_UNSIGNED_SHORT;
1845 FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1848 *index_type = GL_UNSIGNED_INT;
1849 FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1852 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1855 prev_gridsize = gridsize;
1856 prev_index_type = *index_type;
1857 prev_totquad = *totquad;
1861 GPU_Buffers *GPU_build_grid_buffers(int *grid_indices, int totgrid,
1862 BLI_bitmap **grid_hidden, int gridsize)
1864 GPU_Buffers *buffers;
1866 int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
1868 buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1869 buffers->grid_hidden = grid_hidden;
1870 buffers->totgrid = totgrid;
1872 buffers->show_diffuse_color = FALSE;
1874 /* Count the number of quads */
1875 totquad = gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1877 if (totquad == fully_visible_totquad) {
1878 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1879 buffers->has_hidden = 0;
1881 else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1883 glGenBuffersARB(1, &buffers->index_buf);
1884 if (buffers->index_buf) {
1885 buffers->tot_quad = totquad;
1887 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1889 if (totgrid * gridsize * gridsize < USHRT_MAX) {
1890 buffers->index_type = GL_UNSIGNED_SHORT;
1891 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1894 buffers->index_type = GL_UNSIGNED_INT;
1895 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1898 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1901 buffers->has_hidden = 1;
1904 /* Build coord/normal VBO */
1905 if (buffers->index_buf)
1906 glGenBuffersARB(1, &buffers->vert_buf);
1911 #undef FILL_QUAD_BUFFER
1913 /* Output a BMVert into a VertexBufferFormat array
1915 * The vertex is skipped if hidden, otherwise the output goes into
1916 * index '*v_index' in the 'vert_data' array and '*v_index' is
1919 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
1920 VertexBufferFormat *vert_data,
1924 const int cd_vert_mask_offset)
1926 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
1927 VertexBufferFormat *vd = &vert_data[*v_index];
1929 /* TODO: should use material color */
1930 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1932 /* Set coord, normal, and mask */
1933 copy_v3_v3(vd->co, v->co);
1934 normal_float_to_short_v3(vd->no, fno ? fno : v->no);
1936 gpu_color_from_mask_copy(
1938 BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset),
1943 /* Assign index for use in the triangle index buffer */
1944 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
1950 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
1951 static int gpu_bmesh_vert_visible_count(GHash *bm_unique_verts,
1952 GHash *bm_other_verts)
1954 GHashIterator gh_iter;
1957 GHASH_ITER (gh_iter, bm_unique_verts) {
1958 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1959 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1962 GHASH_ITER (gh_iter, bm_other_verts) {
1963 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1964 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1971 /* Return the total number of visible faces */
1972 static int gpu_bmesh_face_visible_count(GHash *bm_faces)
1974 GHashIterator gh_iter;
1977 GHASH_ITER (gh_iter, bm_faces) {
1978 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
1980 if (!paint_is_bmesh_face_hidden(f))
1987 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
1988 * shading, an element index buffer. */
1989 void GPU_update_bmesh_buffers(GPU_Buffers *buffers,
1992 GHash *bm_unique_verts,
1993 GHash *bm_other_verts)
1995 VertexBufferFormat *vert_data;
1997 int tottri, totvert, maxvert = 0;
1999 /* TODO, make mask layer optional for bmesh buffer */
2000 const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
2002 if (!buffers->vert_buf || (buffers->smooth && !buffers->index_buf))
2005 /* Count visible triangles */
2006 tottri = gpu_bmesh_face_visible_count(bm_faces);
2008 if (buffers->smooth) {
2009 /* Count visible vertices */
2010 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
2013 totvert = tottri * 3;
2015 /* Initialize vertex buffer */
2016 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2017 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
2018 sizeof(VertexBufferFormat) * totvert,
2019 NULL, GL_STATIC_DRAW_ARB);
2021 /* Fill vertex buffer */
2022 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2024 GHashIterator gh_iter;
2027 if (buffers->smooth) {
2028 /* Vertices get an index assigned for use in the triangle
2030 bm->elem_index_dirty |= BM_VERT;
2032 GHASH_ITER (gh_iter, bm_unique_verts) {
2033 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2034 vert_data, &v_index, NULL, NULL,
2035 cd_vert_mask_offset);
2038 GHASH_ITER (gh_iter, bm_other_verts) {
2039 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2040 vert_data, &v_index, NULL, NULL,
2041 cd_vert_mask_offset);
2047 GHASH_ITER (gh_iter, bm_faces) {
2048 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2050 BLI_assert(f->len == 3);
2052 if (!paint_is_bmesh_face_hidden(f)) {
2057 // BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void**)v, 3);
2058 BM_face_as_array_vert_tri(f, v);
2060 /* Average mask value */
2061 for (i = 0; i < 3; i++) {
2062 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
2066 for (i = 0; i < 3; i++) {
2067 gpu_bmesh_vert_to_buffer_copy(v[i], vert_data,
2068 &v_index, f->no, &fmask,
2069 cd_vert_mask_offset);
2074 buffers->tot_tri = tottri;
2077 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
2080 /* Memory map failed */
2081 glDeleteBuffersARB(1, &buffers->vert_buf);
2082 buffers->vert_buf = 0;
2086 if (buffers->smooth) {
2087 const int use_short = (maxvert < USHRT_MAX);
2089 /* Initialize triangle index buffer */
2090 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2091 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
2093 sizeof(unsigned short) :
2094 sizeof(unsigned int)) * 3 * tottri,
2095 NULL, GL_STATIC_DRAW_ARB);
2097 /* Fill triangle index buffer */
2098 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2100 GHashIterator gh_iter;
2102 GHASH_ITER (gh_iter, bm_faces) {
2103 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2105 if (!paint_is_bmesh_face_hidden(f)) {
2109 l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2111 BMVert *v = l_iter->v;
2113 unsigned short *elem = tri_data;
2114 (*elem) = BM_elem_index_get(v);
2119 unsigned int *elem = tri_data;
2120 (*elem) = BM_elem_index_get(v);
2124 } while ((l_iter = l_iter->next) != l_first);
2128 glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
2130 buffers->tot_tri = tottri;
2131 buffers->index_type = (use_short ?
2136 /* Memory map failed */
2137 glDeleteBuffersARB(1, &buffers->index_buf);
2138 buffers->index_buf = 0;
2143 GPU_Buffers *GPU_build_bmesh_buffers(int smooth_shading)
2145 GPU_Buffers *buffers;
2147 buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
2149 glGenBuffersARB(1, &buffers->index_buf);
2150 glGenBuffersARB(1, &buffers->vert_buf);
2151 buffers->use_bmesh = TRUE;
2152 buffers->smooth = smooth_shading;
2157 static void gpu_draw_buffers_legacy_mesh(GPU_Buffers *buffers)
2159 const MVert *mvert = buffers->mvert;
2161 const int has_mask = (buffers->vmask != NULL);
2162 const MFace *face = &buffers->mface[buffers->face_indices[0]];
2163 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2165 if (buffers->show_diffuse_color)
2166 GPU_material_diffuse_get(face->mat_nr + 1, diffuse_color);
2169 gpu_colors_enable(VBO_DISABLED);
2172 for (i = 0; i < buffers->totface; ++i) {
2173 MFace *f = buffers->mface + buffers->face_indices[i];
2174 int S = f->v4 ? 4 : 3;
2175 unsigned int *fv = &f->v1;
2177 if (paint_is_face_hidden(f, buffers->mvert))
2180 glBegin((f->v4) ? GL_QUADS : GL_TRIANGLES);
2182 if (buffers->smooth) {
2183 for (j = 0; j < S; j++) {
2185 gpu_color_from_mask_set(buffers->vmask[fv[j]], diffuse_color);
2187 glNormal3sv(mvert[fv[j]].no);
2188 glVertex3fv(mvert[fv[j]].co);
2194 /* calculate face normal */
2196 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
2197 mvert[fv[2]].co, mvert[fv[3]].co);
2200 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
2206 /* calculate face mask color */
2207 fmask = (buffers->vmask[fv[0]] +
2208 buffers->vmask[fv[1]] +
2209 buffers->vmask[fv[2]]);
2211 fmask = (fmask + buffers->vmask[fv[3]]) * 0.25f;
2214 gpu_color_from_mask_set(fmask, diffuse_color);
2217 for (j = 0; j < S; j++)
2218 glVertex3fv(mvert[fv[j]].co);
2225 gpu_colors_disable(VBO_DISABLED);
2229 static void gpu_draw_buffers_legacy_grids(GPU_Buffers *buffers)
2231 const CCGKey *key = &buffers->gridkey;
2232 int i, j, x, y, gridsize = buffers->gridkey.grid_size;
2233 const int has_mask = key->has_mask;
2234 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2235 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2237 if (buffers->show_diffuse_color)
2238 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2241 gpu_colors_enable(VBO_DISABLED);
2244 for (i = 0; i < buffers->totgrid; ++i) {
2245 int g = buffers->grid_indices[i];
2246 CCGElem *grid = buffers->grids[g];
2247 BLI_bitmap *gh = buffers->grid_hidden[g];
2249 /* TODO: could use strips with hiding as well */
2254 for (y = 0; y < gridsize - 1; y++) {
2255 for (x = 0; x < gridsize - 1; x++) {
2257 CCG_grid_elem(key, grid, x + 1, y + 1),
2258 CCG_grid_elem(key, grid, x + 1, y),
2259 CCG_grid_elem(key, grid, x, y),
2260 CCG_grid_elem(key, grid, x, y + 1)
2263 /* skip face if any of its corners are hidden */
2264 if (paint_is_grid_face_hidden(gh, gridsize, x, y))
2267 if (buffers->smooth) {
2268 for (j = 0; j < 4; j++) {
2270 gpu_color_from_mask_set(*CCG_elem_mask(key, e[j]), diffuse_color);
2272 glNormal3fv(CCG_elem_no(key, e[j]));
2273 glVertex3fv(CCG_elem_co(key, e[j]));
2279 CCG_elem_co(key, e[0]),
2280 CCG_elem_co(key, e[1]),
2281 CCG_elem_co(key, e[2]),
2282 CCG_elem_co(key, e[3]));
2286 gpu_color_from_mask_quad_set(key, e[0], e[1], e[2], e[3], diffuse_color);
2289 for (j = 0; j < 4; j++)
2290 glVertex3fv(CCG_elem_co(key, e[j]));
2297 else if (buffers->smooth) {
2298 for (y = 0; y < gridsize - 1; y++) {
2299 glBegin(GL_QUAD_STRIP);
2300 for (x = 0; x < gridsize; x++) {
2301 CCGElem *a = CCG_grid_elem(key, grid, x, y);
2302 CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2305 gpu_color_from_mask_set(*CCG_elem_mask(key, a), diffuse_color);
2307 glNormal3fv(CCG_elem_no(key, a));
2308 glVertex3fv(CCG_elem_co(key, a));
2310 gpu_color_from_mask_set(*CCG_elem_mask(key, b), diffuse_color);
2312 glNormal3fv(CCG_elem_no(key, b));
2313 glVertex3fv(CCG_elem_co(key, b));
2319 for (y = 0; y < gridsize - 1; y++) {
2320 glBegin(GL_QUAD_STRIP);
2321 for (x = 0; x < gridsize; x++) {
2322 CCGElem *a = CCG_grid_elem(key, grid, x, y);
2323 CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2326 CCGElem *c = CCG_grid_elem(key, grid, x - 1, y);
2327 CCGElem *d = CCG_grid_elem(key, grid, x - 1, y + 1);
2331 CCG_elem_co(key, d),
2332 CCG_elem_co(key, b),
2333 CCG_elem_co(key, a),
2334 CCG_elem_co(key, c));
2338 gpu_color_from_mask_quad_set(key, a, b, c, d, diffuse_color);
2342 glVertex3fv(CCG_elem_co(key, a));
2343 glVertex3fv(CCG_elem_co(key, b));
2351 gpu_colors_disable(VBO_DISABLED);
2355 void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial,
2358 /* sets material from the first face, to solve properly face would need to
2359 * be sorted in buckets by materials */
2361 if (buffers->totface) {
2362 const MFace *f = &buffers->mface[buffers->face_indices[0]];
2363 if (!setMaterial(f->mat_nr + 1, NULL))
2366 else if (buffers->totgrid) {
2367 const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2368 if (!setMaterial(f->mat_nr + 1, NULL))
2372 if (!setMaterial(1, NULL))
2377 glShadeModel((buffers->smooth || buffers->totface) ? GL_SMOOTH : GL_FLAT);
2379 if (buffers->vert_buf) {
2380 glEnableClientState(GL_VERTEX_ARRAY);
2382 glEnableClientState(GL_NORMAL_ARRAY);
2383 gpu_colors_enable(VBO_ENABLED);
2386 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2388 if (buffers->index_buf)
2389 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2392 glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
2394 if (buffers->tot_quad) {
2396 int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
2397 for (i = 0; i < last; i++) {
2398 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2399 offset + offsetof(VertexBufferFormat, co));
2400 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2401 offset + offsetof(VertexBufferFormat, no));
2402 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2403 offset + offsetof(VertexBufferFormat, color));
2405 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
2407 offset += buffers->gridkey.grid_area * sizeof(VertexBufferFormat);
2411 int totelem = buffers->tot_tri * 3;
2413 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2414 (void *)offsetof(VertexBufferFormat, co));
2415 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2416 (void *)offsetof(VertexBufferFormat, no));
2417 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2418 (void *)offsetof(VertexBufferFormat, color));
2420 if (buffers->index_buf)
2421 glDrawElements(GL_TRIANGLES, totelem, buffers->index_type, 0);
2423 glDrawArrays(GL_TRIANGLES, 0, totelem);
2427 glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
2429 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
2430 if (buffers->index_buf)
2431 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
2433 glDisableClientState(GL_VERTEX_ARRAY);
2435 glDisableClientState(GL_NORMAL_ARRAY);
2436 gpu_colors_disable(VBO_ENABLED);
2439 /* fallbacks if we are out of memory or VBO is disabled */
2440 else if (buffers->totface) {
2441 gpu_draw_buffers_legacy_mesh(buffers);
2443 else if (buffers->totgrid) {
2444 gpu_draw_buffers_legacy_grids(buffers);
2448 int GPU_buffers_diffuse_changed(GPU_Buffers *buffers, int show_diffuse_color)
2450 float diffuse_color[4];
2452 if (buffers->show_diffuse_color != show_diffuse_color)
2455 if (buffers->show_diffuse_color == FALSE)
2458 if (buffers->mface) {
2459 MFace *f = buffers->mface + buffers->face_indices[0];
2461 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
2464 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2466 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2469 return diffuse_color[0] != buffers->diffuse_color[0] ||
2470 diffuse_color[1] != buffers->diffuse_color[1] ||
2471 diffuse_color[2] != buffers->diffuse_color[2];
2474 void GPU_free_buffers(GPU_Buffers *buffers)
2477 if (buffers->vert_buf)
2478 glDeleteBuffersARB(1, &buffers->vert_buf);
2479 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
2480 glDeleteBuffersARB(1, &buffers->index_buf);