Fix #36420: ends of curves - caps & twist not good
[blender-staging.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2005 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): Brecht Van Lommel.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/gpu/intern/gpu_buffers.c
29  *  \ingroup gpu
30  */
31
32
33 #include <limits.h>
34 #include <stddef.h>
35 #include <string.h>
36
37 #include "GL/glew.h"
38
39 #include "MEM_guardedalloc.h"
40
41 #include "BLI_bitmap.h"
42 #include "BLI_math.h"
43 #include "BLI_utildefines.h"
44 #include "BLI_ghash.h"
45 #include "BLI_threads.h"
46
47 #include "DNA_meshdata_types.h"
48 #include "DNA_material_types.h"
49
50 #include "BKE_ccg.h"
51 #include "BKE_DerivedMesh.h"
52 #include "BKE_paint.h"
53 #include "BKE_subsurf.h"
54
55 #include "DNA_userdef_types.h"
56
57 #include "GPU_buffers.h"
58 #include "GPU_draw.h"
59
60 #include "bmesh.h"
61
62 typedef enum {
63         GPU_BUFFER_VERTEX_STATE = 1,
64         GPU_BUFFER_NORMAL_STATE = 2,
65         GPU_BUFFER_TEXCOORD_STATE = 4,
66         GPU_BUFFER_COLOR_STATE = 8,
67         GPU_BUFFER_ELEMENT_STATE = 16,
68 } GPUBufferState;
69
70 #define MAX_GPU_ATTRIB_DATA 32
71
72 /* material number is an 16-bit signed short and the range (assume material number is non-negative) */
73 #define MAX_MATERIALS MAXMAT
74
75 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
76 static int useVBOs = -1;
77 static GPUBufferState GLStates = 0;
78 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
79
80 /* stores recently-deleted buffers so that new buffers won't have to
81  * be recreated as often
82  *
83  * only one instance of this pool is created, stored in
84  * gpu_buffer_pool
85  *
86  * note that the number of buffers in the pool is usually limited to
87  * MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
88  * when a GPUBuffer is released outside the main thread; due to OpenGL
89  * restrictions it cannot be immediately released
90  */
91 typedef struct GPUBufferPool {
92         /* number of allocated buffers stored */
93         int totbuf;
94         /* actual allocated length of the array */
95         int maxsize;
96         GPUBuffer **buffers;
97 } GPUBufferPool;
98 #define MAX_FREE_GPU_BUFFERS 8
99
100 /* create a new GPUBufferPool */
101 static GPUBufferPool *gpu_buffer_pool_new(void)
102 {
103         GPUBufferPool *pool;
104
105         /* enable VBOs if supported */
106         if (useVBOs == -1)
107                 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
108
109         pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer_Pool");
110
111         pool->maxsize = MAX_FREE_GPU_BUFFERS;
112         pool->buffers = MEM_callocN(sizeof(GPUBuffer *) * pool->maxsize,
113                                     "GPUBuffer.buffers");
114
115         return pool;
116 }
117
118 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
119 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
120 {
121         int i;
122
123         if (!pool || index < 0 || index >= pool->totbuf)
124                 return;
125
126         /* shift entries down, overwriting the buffer at `index' */
127         for (i = index; i < pool->totbuf - 1; i++)
128                 pool->buffers[i] = pool->buffers[i + 1];
129
130         /* clear the last entry */
131         if (pool->totbuf > 0)
132                 pool->buffers[pool->totbuf - 1] = NULL;
133
134         pool->totbuf--;
135 }
136
137 /* delete the last entry in the pool */
138 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
139 {
140         GPUBuffer *last;
141
142         if (pool->totbuf <= 0)
143                 return;
144
145         /* get the last entry */
146         if (!(last = pool->buffers[pool->totbuf - 1]))
147                 return;
148
149         /* delete the buffer's data */
150         if (useVBOs)
151                 glDeleteBuffersARB(1, &last->id);
152         else
153                 MEM_freeN(last->pointer);
154
155         /* delete the buffer and remove from pool */
156         MEM_freeN(last);
157         pool->totbuf--;
158         pool->buffers[pool->totbuf] = NULL;
159 }
160
161 /* free a GPUBufferPool; also frees the data in the pool's
162  * GPUBuffers */
163 static void gpu_buffer_pool_free(GPUBufferPool *pool)
164 {
165         if (!pool)
166                 return;
167         
168         while (pool->totbuf)
169                 gpu_buffer_pool_delete_last(pool);
170
171         MEM_freeN(pool->buffers);
172         MEM_freeN(pool);
173 }
174
175 static void gpu_buffer_pool_free_unused(GPUBufferPool *pool)
176 {
177         if (!pool)
178                 return;
179         
180         while (pool->totbuf)
181                 gpu_buffer_pool_delete_last(pool);
182 }
183
184 static GPUBufferPool *gpu_buffer_pool = NULL;
185 static GPUBufferPool *gpu_get_global_buffer_pool(void)
186 {
187         /* initialize the pool */
188         if (!gpu_buffer_pool)
189                 gpu_buffer_pool = gpu_buffer_pool_new();
190
191         return gpu_buffer_pool;
192 }
193
194 void GPU_global_buffer_pool_free(void)
195 {
196         gpu_buffer_pool_free(gpu_buffer_pool);
197         gpu_buffer_pool = NULL;
198 }
199
200 void GPU_global_buffer_pool_free_unused(void)
201 {
202         gpu_buffer_pool_free_unused(gpu_buffer_pool);
203 }
204
205 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
206  * pool if possible, otherwise creates a new one */
207 GPUBuffer *GPU_buffer_alloc(int size)
208 {
209         GPUBufferPool *pool;
210         GPUBuffer *buf;
211         int i, bufsize, bestfit = -1;
212
213         /* bad case, leads to leak of buf since buf->pointer will allocate
214          * NULL, leading to return without cleanup. In any case better detect early
215          * psy-fi */
216         if (size == 0)
217                 return NULL;
218
219         pool = gpu_get_global_buffer_pool();
220
221         /* not sure if this buffer pool code has been profiled much,
222          * seems to me that the graphics driver and system memory
223          * management might do this stuff anyway. --nicholas
224          */
225
226         /* check the global buffer pool for a recently-deleted buffer
227          * that is at least as big as the request, but not more than
228          * twice as big */
229         for (i = 0; i < pool->totbuf; i++) {
230                 bufsize = pool->buffers[i]->size;
231
232                 /* check for an exact size match */
233                 if (bufsize == size) {
234                         bestfit = i;
235                         break;
236                 }
237                 /* smaller buffers won't fit data and buffers at least
238                  * twice as big are a waste of memory */
239                 else if (bufsize > size && size > (bufsize / 2)) {
240                         /* is it closer to the required size than the
241                          * last appropriate buffer found. try to save
242                          * memory */
243                         if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
244                                 bestfit = i;
245                         }
246                 }
247         }
248
249         /* if an acceptable buffer was found in the pool, remove it
250          * from the pool and return it */
251         if (bestfit != -1) {
252                 buf = pool->buffers[bestfit];
253                 gpu_buffer_pool_remove_index(pool, bestfit);
254                 return buf;
255         }
256
257         /* no acceptable buffer found in the pool, create a new one */
258         buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
259         buf->size = size;
260
261         if (useVBOs == 1) {
262                 /* create a new VBO and initialize it to the requested
263                  * size */
264                 glGenBuffersARB(1, &buf->id);
265                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
266                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
267                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
268         }
269         else {
270                 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
271                 
272                 /* purpose of this seems to be dealing with
273                  * out-of-memory errors? looks a bit iffy to me
274                  * though, at least on Linux I expect malloc() would
275                  * just overcommit. --nicholas */
276                 while (!buf->pointer && pool->totbuf > 0) {
277                         gpu_buffer_pool_delete_last(pool);
278                         buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
279                 }
280                 if (!buf->pointer)
281                         return NULL;
282         }
283
284         return buf;
285 }
286
287 /* release a GPUBuffer; does not free the actual buffer or its data,
288  * but rather moves it to the pool of recently-freed buffers for
289  * possible re-use*/
290 void GPU_buffer_free(GPUBuffer *buffer)
291 {
292         GPUBufferPool *pool;
293         int i;
294
295         if (!buffer)
296                 return;
297
298         pool = gpu_get_global_buffer_pool();
299
300         /* free the last used buffer in the queue if no more space, but only
301          * if we are in the main thread. for e.g. rendering or baking it can
302          * happen that we are in other thread and can't call OpenGL, in that
303          * case cleanup will be done GPU_buffer_pool_free_unused */
304         if (BLI_thread_is_main()) {
305                 /* in main thread, safe to decrease size of pool back
306                  * down to MAX_FREE_GPU_BUFFERS */
307                 while (pool->totbuf >= MAX_FREE_GPU_BUFFERS)
308                         gpu_buffer_pool_delete_last(pool);
309         }
310         else {
311                 /* outside of main thread, can't safely delete the
312                  * buffer, so increase pool size */
313                 if (pool->maxsize == pool->totbuf) {
314                         pool->maxsize += MAX_FREE_GPU_BUFFERS;
315                         pool->buffers = MEM_reallocN(pool->buffers,
316                                                      sizeof(GPUBuffer *) * pool->maxsize);
317                 }
318         }
319
320         /* shift pool entries up by one */
321         for (i = pool->totbuf; i > 0; i--)
322                 pool->buffers[i] = pool->buffers[i - 1];
323
324         /* insert the buffer into the beginning of the pool */
325         pool->buffers[0] = buffer;
326         pool->totbuf++;
327 }
328
329 typedef struct GPUVertPointLink {
330         struct GPUVertPointLink *next;
331         /* -1 means uninitialized */
332         int point_index;
333 } GPUVertPointLink;
334
335 /* add a new point to the list of points related to a particular
336  * vertex */
337 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
338 {
339         GPUVertPointLink *lnk;
340
341         lnk = &gdo->vert_points[vert_index];
342
343         /* if first link is in use, add a new link at the end */
344         if (lnk->point_index != -1) {
345                 /* get last link */
346                 for (; lnk->next; lnk = lnk->next) ;
347
348                 /* add a new link from the pool */
349                 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
350                 gdo->vert_points_usage++;
351         }
352
353         lnk->point_index = point_index;
354 }
355
356 /* update the vert_points and triangle_to_mface fields with a new
357  * triangle */
358 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
359                                         int base_point_index,
360                                         int face_index,
361                                         int v1, int v2, int v3)
362 {
363         int i, v[3] = {v1, v2, v3};
364         for (i = 0; i < 3; i++)
365                 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
366         gdo->triangle_to_mface[base_point_index / 3] = face_index;
367 }
368
369 /* for each vertex, build a list of points related to it; these lists
370  * are stored in an array sized to the number of vertices */
371 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
372 {
373         GPUBufferMaterial *mat;
374         int i, mat_orig_to_new[MAX_MATERIALS];
375
376         /* allocate the array and space for links */
377         gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
378                                        "GPUDrawObject.vert_points");
379         gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
380                                            "GPUDrawObject.vert_points_mem");
381         gdo->vert_points_usage = 0;
382
383         /* build a map from the original material indices to the new
384          * GPUBufferMaterial indices */
385         for (i = 0; i < gdo->totmaterial; i++)
386                 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
387
388         /* -1 indicates the link is not yet used */
389         for (i = 0; i < gdo->totvert; i++)
390                 gdo->vert_points[i].point_index = -1;
391
392         for (i = 0; i < totface; i++, f++) {
393                 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
394
395                 /* add triangle */
396                 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
397                                             i, f->v1, f->v2, f->v3);
398                 mat->totpoint += 3;
399
400                 /* add second triangle for quads */
401                 if (f->v4) {
402                         gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
403                                                     i, f->v3, f->v4, f->v1);
404                         mat->totpoint += 3;
405                 }
406         }
407
408         /* map any unused vertices to loose points */
409         for (i = 0; i < gdo->totvert; i++) {
410                 if (gdo->vert_points[i].point_index == -1) {
411                         gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
412                         gdo->tot_loose_point++;
413                 }
414         }
415 }
416
417 /* see GPUDrawObject's structure definition for a description of the
418  * data being initialized here */
419 GPUDrawObject *GPU_drawobject_new(DerivedMesh *dm)
420 {
421         GPUDrawObject *gdo;
422         MFace *mface;
423         int points_per_mat[MAX_MATERIALS];
424         int i, curmat, curpoint, totface;
425
426         mface = dm->getTessFaceArray(dm);
427         totface = dm->getNumTessFaces(dm);
428
429         /* get the number of points used by each material, treating
430          * each quad as two triangles */
431         memset(points_per_mat, 0, sizeof(int) * MAX_MATERIALS);
432         for (i = 0; i < totface; i++)
433                 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
434
435         /* create the GPUDrawObject */
436         gdo = MEM_callocN(sizeof(GPUDrawObject), "GPUDrawObject");
437         gdo->totvert = dm->getNumVerts(dm);
438         gdo->totedge = dm->getNumEdges(dm);
439
440         /* count the number of materials used by this DerivedMesh */
441         for (i = 0; i < MAX_MATERIALS; i++) {
442                 if (points_per_mat[i] > 0)
443                         gdo->totmaterial++;
444         }
445
446         /* allocate an array of materials used by this DerivedMesh */
447         gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
448                                      "GPUDrawObject.materials");
449
450         /* initialize the materials array */
451         for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
452                 if (points_per_mat[i] > 0) {
453                         gdo->materials[curmat].start = curpoint;
454                         gdo->materials[curmat].totpoint = 0;
455                         gdo->materials[curmat].mat_nr = i;
456
457                         curpoint += points_per_mat[i];
458                         curmat++;
459                 }
460         }
461
462         /* store total number of points used for triangles */
463         gdo->tot_triangle_point = curpoint;
464
465         gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
466                                              "GPUDrawObject.triangle_to_mface");
467
468         gpu_drawobject_init_vert_points(gdo, mface, totface);
469
470         return gdo;
471 }
472
473 void GPU_drawobject_free(DerivedMesh *dm)
474 {
475         GPUDrawObject *gdo;
476
477         if (!dm || !(gdo = dm->drawObject))
478                 return;
479
480         MEM_freeN(gdo->materials);
481         MEM_freeN(gdo->triangle_to_mface);
482         MEM_freeN(gdo->vert_points);
483         MEM_freeN(gdo->vert_points_mem);
484         GPU_buffer_free(gdo->points);
485         GPU_buffer_free(gdo->normals);
486         GPU_buffer_free(gdo->uv);
487         GPU_buffer_free(gdo->colors);
488         GPU_buffer_free(gdo->edges);
489         GPU_buffer_free(gdo->uvedges);
490
491         MEM_freeN(gdo);
492         dm->drawObject = NULL;
493 }
494
495 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
496                                   int *mat_orig_to_new, void *user_data);
497
498 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
499                                    int vector_size, int size, GLenum target,
500                                    void *user, GPUBufferCopyFunc copy_f)
501 {
502         GPUBufferPool *pool;
503         GPUBuffer *buffer;
504         float *varray;
505         int mat_orig_to_new[MAX_MATERIALS];
506         int *cur_index_per_mat;
507         int i;
508         int success;
509         GLboolean uploaded;
510
511         pool = gpu_get_global_buffer_pool();
512
513         /* alloc a GPUBuffer; fall back to legacy mode on failure */
514         if (!(buffer = GPU_buffer_alloc(size)))
515                 dm->drawObject->legacy = 1;
516
517         /* nothing to do for legacy mode */
518         if (dm->drawObject->legacy)
519                 return NULL;
520
521         cur_index_per_mat = MEM_mallocN(sizeof(int) * object->totmaterial,
522                                         "GPU_buffer_setup.cur_index_per_mat");
523         for (i = 0; i < object->totmaterial; i++) {
524                 /* for each material, the current index to copy data to */
525                 cur_index_per_mat[i] = object->materials[i].start * vector_size;
526
527                 /* map from original material index to new
528                  * GPUBufferMaterial index */
529                 mat_orig_to_new[object->materials[i].mat_nr] = i;
530         }
531
532         if (useVBOs) {
533                 success = 0;
534
535                 while (!success) {
536                         /* bind the buffer and discard previous data,
537                          * avoids stalling gpu */
538                         glBindBufferARB(target, buffer->id);
539                         glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
540
541                         /* attempt to map the buffer */
542                         if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
543                                 /* failed to map the buffer; delete it */
544                                 GPU_buffer_free(buffer);
545                                 gpu_buffer_pool_delete_last(pool);
546                                 buffer = NULL;
547
548                                 /* try freeing an entry from the pool
549                                  * and reallocating the buffer */
550                                 if (pool->totbuf > 0) {
551                                         gpu_buffer_pool_delete_last(pool);
552                                         buffer = GPU_buffer_alloc(size);
553                                 }
554
555                                 /* allocation still failed; fall back
556                                  * to legacy mode */
557                                 if (!buffer) {
558                                         dm->drawObject->legacy = 1;
559                                         success = 1;
560                                 }
561                         }
562                         else {
563                                 success = 1;
564                         }
565                 }
566
567                 /* check legacy fallback didn't happen */
568                 if (dm->drawObject->legacy == 0) {
569                         uploaded = GL_FALSE;
570                         /* attempt to upload the data to the VBO */
571                         while (uploaded == GL_FALSE) {
572                                 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
573                                 /* glUnmapBuffer returns GL_FALSE if
574                                  * the data store is corrupted; retry
575                                  * in that case */
576                                 uploaded = glUnmapBufferARB(target);
577                         }
578                 }
579                 glBindBufferARB(target, 0);
580         }
581         else {
582                 /* VBO not supported, use vertex array fallback */
583                 if (buffer->pointer) {
584                         varray = buffer->pointer;
585                         (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
586                 }
587                 else {
588                         dm->drawObject->legacy = 1;
589                 }
590         }
591
592         MEM_freeN(cur_index_per_mat);
593
594         return buffer;
595 }
596
597 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
598 {
599         MVert *mvert;
600         MFace *f;
601         int i, j, start, totface;
602
603         mvert = dm->getVertArray(dm);
604         f = dm->getTessFaceArray(dm);
605
606         totface = dm->getNumTessFaces(dm);
607         for (i = 0; i < totface; i++, f++) {
608                 start = index[mat_orig_to_new[f->mat_nr]];
609
610                 /* v1 v2 v3 */
611                 copy_v3_v3(&varray[start], mvert[f->v1].co);
612                 copy_v3_v3(&varray[start + 3], mvert[f->v2].co);
613                 copy_v3_v3(&varray[start + 6], mvert[f->v3].co);
614                 index[mat_orig_to_new[f->mat_nr]] += 9;
615
616                 if (f->v4) {
617                         /* v3 v4 v1 */
618                         copy_v3_v3(&varray[start + 9], mvert[f->v3].co);
619                         copy_v3_v3(&varray[start + 12], mvert[f->v4].co);
620                         copy_v3_v3(&varray[start + 15], mvert[f->v1].co);
621                         index[mat_orig_to_new[f->mat_nr]] += 9;
622                 }
623         }
624
625         /* copy loose points */
626         j = dm->drawObject->tot_triangle_point * 3;
627         for (i = 0; i < dm->drawObject->totvert; i++) {
628                 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
629                         copy_v3_v3(&varray[j], mvert[i].co);
630                         j += 3;
631                 }
632         }
633 }
634
635 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
636 {
637         int i, totface;
638         int start;
639         float f_no[3];
640
641         float *nors = dm->getTessFaceDataArray(dm, CD_NORMAL);
642         MVert *mvert = dm->getVertArray(dm);
643         MFace *f = dm->getTessFaceArray(dm);
644
645         totface = dm->getNumTessFaces(dm);
646         for (i = 0; i < totface; i++, f++) {
647                 const int smoothnormal = (f->flag & ME_SMOOTH);
648
649                 start = index[mat_orig_to_new[f->mat_nr]];
650                 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
651
652                 if (smoothnormal) {
653                         /* copy vertex normal */
654                         normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
655                         normal_short_to_float_v3(&varray[start + 3], mvert[f->v2].no);
656                         normal_short_to_float_v3(&varray[start + 6], mvert[f->v3].no);
657
658                         if (f->v4) {
659                                 normal_short_to_float_v3(&varray[start + 9], mvert[f->v3].no);
660                                 normal_short_to_float_v3(&varray[start + 12], mvert[f->v4].no);
661                                 normal_short_to_float_v3(&varray[start + 15], mvert[f->v1].no);
662                         }
663                 }
664                 else if (nors) {
665                         /* copy cached face normal */
666                         copy_v3_v3(&varray[start], &nors[i * 3]);
667                         copy_v3_v3(&varray[start + 3], &nors[i * 3]);
668                         copy_v3_v3(&varray[start + 6], &nors[i * 3]);
669
670                         if (f->v4) {
671                                 copy_v3_v3(&varray[start + 9], &nors[i * 3]);
672                                 copy_v3_v3(&varray[start + 12], &nors[i * 3]);
673                                 copy_v3_v3(&varray[start + 15], &nors[i * 3]);
674                         }
675                 }
676                 else {
677                         /* calculate face normal */
678                         if (f->v4)
679                                 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
680                         else
681                                 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
682
683                         copy_v3_v3(&varray[start], f_no);
684                         copy_v3_v3(&varray[start + 3], f_no);
685                         copy_v3_v3(&varray[start + 6], f_no);
686
687                         if (f->v4) {
688                                 copy_v3_v3(&varray[start + 9], f_no);
689                                 copy_v3_v3(&varray[start + 12], f_no);
690                                 copy_v3_v3(&varray[start + 15], f_no);
691                         }
692                 }
693         }
694 }
695
696 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
697 {
698         int start;
699         int i, totface;
700
701         MTFace *mtface;
702         MFace *f;
703
704         if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
705                 return;
706         f = dm->getTessFaceArray(dm);
707                 
708         totface = dm->getNumTessFaces(dm);
709         for (i = 0; i < totface; i++, f++) {
710                 start = index[mat_orig_to_new[f->mat_nr]];
711
712                 /* v1 v2 v3 */
713                 copy_v2_v2(&varray[start], mtface[i].uv[0]);
714                 copy_v2_v2(&varray[start + 2], mtface[i].uv[1]);
715                 copy_v2_v2(&varray[start + 4], mtface[i].uv[2]);
716                 index[mat_orig_to_new[f->mat_nr]] += 6;
717
718                 if (f->v4) {
719                         /* v3 v4 v1 */
720                         copy_v2_v2(&varray[start + 6], mtface[i].uv[2]);
721                         copy_v2_v2(&varray[start + 8], mtface[i].uv[3]);
722                         copy_v2_v2(&varray[start + 10], mtface[i].uv[0]);
723                         index[mat_orig_to_new[f->mat_nr]] += 6;
724                 }
725         }
726 }
727
728 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
729 {
730         v[0] = col[3];
731         v[1] = col[2];
732         v[2] = col[1];
733 }
734
735 /* treat varray_ as an array of MCol, four MCol's per face */
736 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
737 {
738         int i, totface;
739         unsigned char *varray = (unsigned char *)varray_;
740         unsigned char *mcol = (unsigned char *)user;
741         MFace *f = dm->getTessFaceArray(dm);
742
743         totface = dm->getNumTessFaces(dm);
744         for (i = 0; i < totface; i++, f++) {
745                 int start = index[mat_orig_to_new[f->mat_nr]];
746
747                 /* v1 v2 v3 */
748                 copy_mcol_uc3(&varray[start], &mcol[i * 16]);
749                 copy_mcol_uc3(&varray[start + 3], &mcol[i * 16 + 4]);
750                 copy_mcol_uc3(&varray[start + 6], &mcol[i * 16 + 8]);
751                 index[mat_orig_to_new[f->mat_nr]] += 9;
752
753                 if (f->v4) {
754                         /* v3 v4 v1 */
755                         copy_mcol_uc3(&varray[start + 9], &mcol[i * 16 + 8]);
756                         copy_mcol_uc3(&varray[start + 12], &mcol[i * 16 + 12]);
757                         copy_mcol_uc3(&varray[start + 15], &mcol[i * 16]);
758                         index[mat_orig_to_new[f->mat_nr]] += 9;
759                 }
760         }
761 }
762
763 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
764 {
765         MEdge *medge;
766         unsigned int *varray = (unsigned int *)varray_;
767         int i, totedge;
768  
769         medge = dm->getEdgeArray(dm);
770         totedge = dm->getNumEdges(dm);
771
772         for (i = 0; i < totedge; i++, medge++) {
773                 varray[i * 2] = dm->drawObject->vert_points[medge->v1].point_index;
774                 varray[i * 2 + 1] = dm->drawObject->vert_points[medge->v2].point_index;
775         }
776 }
777
778 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
779 {
780         MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
781         int i, j = 0;
782
783         if (!tf)
784                 return;
785
786         for (i = 0; i < dm->numTessFaceData; i++, tf++) {
787                 MFace mf;
788                 dm->getTessFace(dm, i, &mf);
789
790                 copy_v2_v2(&varray[j], tf->uv[0]);
791                 copy_v2_v2(&varray[j + 2], tf->uv[1]);
792
793                 copy_v2_v2(&varray[j + 4], tf->uv[1]);
794                 copy_v2_v2(&varray[j + 6], tf->uv[2]);
795
796                 if (!mf.v4) {
797                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
798                         copy_v2_v2(&varray[j + 10], tf->uv[0]);
799                         j += 12;
800                 }
801                 else {
802                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
803                         copy_v2_v2(&varray[j + 10], tf->uv[3]);
804
805                         copy_v2_v2(&varray[j + 12], tf->uv[3]);
806                         copy_v2_v2(&varray[j + 14], tf->uv[0]);
807                         j += 16;
808                 }
809         }
810 }
811
812 typedef enum {
813         GPU_BUFFER_VERTEX = 0,
814         GPU_BUFFER_NORMAL,
815         GPU_BUFFER_COLOR,
816         GPU_BUFFER_UV,
817         GPU_BUFFER_EDGE,
818         GPU_BUFFER_UVEDGE,
819 } GPUBufferType;
820
821 typedef struct {
822         GPUBufferCopyFunc copy;
823         GLenum gl_buffer_type;
824         int vector_size;
825 } GPUBufferTypeSettings;
826
827 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
828         {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
829         {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
830         {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
831         {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
832         {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
833         {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
834 };
835
836 /* get the GPUDrawObject buffer associated with a type */
837 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
838 {
839         switch (type) {
840                 case GPU_BUFFER_VERTEX:
841                         return &gdo->points;
842                 case GPU_BUFFER_NORMAL:
843                         return &gdo->normals;
844                 case GPU_BUFFER_COLOR:
845                         return &gdo->colors;
846                 case GPU_BUFFER_UV:
847                         return &gdo->uv;
848                 case GPU_BUFFER_EDGE:
849                         return &gdo->edges;
850                 case GPU_BUFFER_UVEDGE:
851                         return &gdo->uvedges;
852                 default:
853                         return NULL;
854         }
855 }
856
857 /* get the amount of space to allocate for a buffer of a particular type */
858 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
859 {
860         switch (type) {
861                 case GPU_BUFFER_VERTEX:
862                         return sizeof(float) * 3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
863                 case GPU_BUFFER_NORMAL:
864                         return sizeof(float) * 3 * dm->drawObject->tot_triangle_point;
865                 case GPU_BUFFER_COLOR:
866                         return sizeof(char) * 3 * dm->drawObject->tot_triangle_point;
867                 case GPU_BUFFER_UV:
868                         return sizeof(float) * 2 * dm->drawObject->tot_triangle_point;
869                 case GPU_BUFFER_EDGE:
870                         return sizeof(int) * 2 * dm->drawObject->totedge;
871                 case GPU_BUFFER_UVEDGE:
872                         /* each face gets 3 points, 3 edges per triangle, and
873                          * each edge has its own, non-shared coords, so each
874                          * tri corner needs minimum of 4 floats, quads used
875                          * less so here we can over allocate and assume all
876                          * tris. */
877                         return sizeof(float) * 4 * dm->drawObject->tot_triangle_point;
878                 default:
879                         return -1;
880         }
881 }
882
883 /* call gpu_buffer_setup with settings for a particular type of buffer */
884 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
885 {
886         const GPUBufferTypeSettings *ts;
887         void *user_data = NULL;
888         GPUBuffer *buf;
889
890         ts = &gpu_buffer_type_settings[type];
891
892         /* special handling for MCol and UV buffers */
893         if (type == GPU_BUFFER_COLOR) {
894                 if (!(user_data = DM_get_tessface_data_layer(dm, dm->drawObject->colType)))
895                         return NULL;
896         }
897         else if (type == GPU_BUFFER_UV) {
898                 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
899                         return NULL;
900         }
901
902         buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
903                                gpu_buffer_size_from_type(dm, type),
904                                ts->gl_buffer_type, user_data, ts->copy);
905
906         return buf;
907 }
908
909 /* get the buffer of `type', initializing the GPUDrawObject and
910  * buffer if needed */
911 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
912 {
913         GPUBuffer **buf;
914
915         if (!dm->drawObject)
916                 dm->drawObject = GPU_drawobject_new(dm);
917
918         buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
919         if (!(*buf))
920                 *buf = gpu_buffer_setup_type(dm, type);
921
922         return *buf;
923 }
924
925 void GPU_vertex_setup(DerivedMesh *dm)
926 {
927         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
928                 return;
929
930         glEnableClientState(GL_VERTEX_ARRAY);
931         if (useVBOs) {
932                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
933                 glVertexPointer(3, GL_FLOAT, 0, 0);
934         }
935         else {
936                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
937         }
938         
939         GLStates |= GPU_BUFFER_VERTEX_STATE;
940 }
941
942 void GPU_normal_setup(DerivedMesh *dm)
943 {
944         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
945                 return;
946
947         glEnableClientState(GL_NORMAL_ARRAY);
948         if (useVBOs) {
949                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
950                 glNormalPointer(GL_FLOAT, 0, 0);
951         }
952         else {
953                 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
954         }
955
956         GLStates |= GPU_BUFFER_NORMAL_STATE;
957 }
958
959 void GPU_uv_setup(DerivedMesh *dm)
960 {
961         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
962                 return;
963
964         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
965         if (useVBOs) {
966                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
967                 glTexCoordPointer(2, GL_FLOAT, 0, 0);
968         }
969         else {
970                 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
971         }
972
973         GLStates |= GPU_BUFFER_TEXCOORD_STATE;
974 }
975
976 void GPU_color_setup(DerivedMesh *dm, int colType)
977 {
978         if (!dm->drawObject) {
979                 /* XXX Not really nice, but we need a valid gpu draw object to set the colType...
980                  *     Else we would have to add a new param to gpu_buffer_setup_common. */
981                 dm->drawObject = GPU_drawobject_new(dm);
982                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
983                 dm->drawObject->colType = colType;
984         }
985         /* In paint mode, dm may stay the same during stroke, however we still want to update colors!
986          * Also check in case we changed color type (i.e. which MCol cdlayer we use). */
987         else if ((dm->dirty & DM_DIRTY_MCOL_UPDATE_DRAW) || (colType != dm->drawObject->colType)) {
988                 GPUBuffer **buf = gpu_drawobject_buffer_from_type(dm->drawObject, GPU_BUFFER_COLOR);
989                 /* XXX Freeing this buffer is a bit stupid, as geometry has not changed, size should remain the same.
990                  *     Not sure though it would be worth defining a sort of gpu_buffer_update func - nor whether
991                  *     it is even possible ! */
992                 GPU_buffer_free(*buf);
993                 *buf = NULL;
994                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
995                 dm->drawObject->colType = colType;
996         }
997
998         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
999                 return;
1000
1001         glEnableClientState(GL_COLOR_ARRAY);
1002         if (useVBOs) {
1003                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1004                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1005         }
1006         else {
1007                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1008         }
1009
1010         GLStates |= GPU_BUFFER_COLOR_STATE;
1011 }
1012
1013 void GPU_edge_setup(DerivedMesh *dm)
1014 {
1015         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1016                 return;
1017
1018         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1019                 return;
1020
1021         glEnableClientState(GL_VERTEX_ARRAY);
1022         if (useVBOs) {
1023                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1024                 glVertexPointer(3, GL_FLOAT, 0, 0);
1025         }
1026         else {
1027                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1028         }
1029         
1030         GLStates |= GPU_BUFFER_VERTEX_STATE;
1031
1032         if (useVBOs)
1033                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1034
1035         GLStates |= GPU_BUFFER_ELEMENT_STATE;
1036 }
1037
1038 void GPU_uvedge_setup(DerivedMesh *dm)
1039 {
1040         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1041                 return;
1042
1043         glEnableClientState(GL_VERTEX_ARRAY);
1044         if (useVBOs) {
1045                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1046                 glVertexPointer(2, GL_FLOAT, 0, 0);
1047         }
1048         else {
1049                 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1050         }
1051         
1052         GLStates |= GPU_BUFFER_VERTEX_STATE;
1053 }
1054
1055 static int GPU_typesize(int type)
1056 {
1057         switch (type) {
1058                 case GL_FLOAT:
1059                         return sizeof(float);
1060                 case GL_INT:
1061                         return sizeof(int);
1062                 case GL_UNSIGNED_INT:
1063                         return sizeof(unsigned int);
1064                 case GL_BYTE:
1065                         return sizeof(char);
1066                 case GL_UNSIGNED_BYTE:
1067                         return sizeof(unsigned char);
1068                 default:
1069                         return 0;
1070         }
1071 }
1072
1073 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1074 {
1075         int i, elementsize = 0;
1076
1077         for (i = 0; i < numdata; i++) {
1078                 int typesize = GPU_typesize(data[i].type);
1079                 if (typesize != 0)
1080                         elementsize += typesize * data[i].size;
1081         }
1082         return elementsize;
1083 }
1084
1085 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1086 {
1087         int i;
1088         int elementsize;
1089         intptr_t offset = 0;
1090
1091         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1092                 if (attribData[i].index != -1) {
1093                         glDisableVertexAttribArrayARB(attribData[i].index);
1094                 }
1095                 else
1096                         break;
1097         }
1098         elementsize = GPU_attrib_element_size(data, numdata);
1099
1100         if (useVBOs) {
1101                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1102                 for (i = 0; i < numdata; i++) {
1103                         glEnableVertexAttribArrayARB(data[i].index);
1104                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1105                                                  GL_FALSE, elementsize, (void *)offset);
1106                         offset += data[i].size * GPU_typesize(data[i].type);
1107
1108                         attribData[i].index = data[i].index;
1109                         attribData[i].size = data[i].size;
1110                         attribData[i].type = data[i].type;
1111                 }
1112                 attribData[numdata].index = -1;
1113         }
1114         else {
1115                 for (i = 0; i < numdata; i++) {
1116                         glEnableVertexAttribArrayARB(data[i].index);
1117                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1118                                                  GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1119                         offset += data[i].size * GPU_typesize(data[i].type);
1120                 }
1121         }
1122 }
1123
1124
1125 void GPU_buffer_unbind(void)
1126 {
1127         int i;
1128
1129         if (GLStates & GPU_BUFFER_VERTEX_STATE)
1130                 glDisableClientState(GL_VERTEX_ARRAY);
1131         if (GLStates & GPU_BUFFER_NORMAL_STATE)
1132                 glDisableClientState(GL_NORMAL_ARRAY);
1133         if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1134                 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1135         if (GLStates & GPU_BUFFER_COLOR_STATE)
1136                 glDisableClientState(GL_COLOR_ARRAY);
1137         if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1138                 if (useVBOs) {
1139                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1140                 }
1141         }
1142         GLStates &= ~(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1143                       GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1144                       GPU_BUFFER_ELEMENT_STATE);
1145
1146         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1147                 if (attribData[i].index != -1) {
1148                         glDisableVertexAttribArrayARB(attribData[i].index);
1149                 }
1150                 else
1151                         break;
1152         }
1153
1154         if (useVBOs)
1155                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1156 }
1157
1158 void GPU_color_switch(int mode)
1159 {
1160         if (mode) {
1161                 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1162                         glEnableClientState(GL_COLOR_ARRAY);
1163                 GLStates |= GPU_BUFFER_COLOR_STATE;
1164         }
1165         else {
1166                 if (GLStates & GPU_BUFFER_COLOR_STATE)
1167                         glDisableClientState(GL_COLOR_ARRAY);
1168                 GLStates &= ~GPU_BUFFER_COLOR_STATE;
1169         }
1170 }
1171
1172 /* return 1 if drawing should be done using old immediate-mode
1173  * code, 0 otherwise */
1174 int GPU_buffer_legacy(DerivedMesh *dm)
1175 {
1176         int test = (U.gameflags & USER_DISABLE_VBO);
1177         if (test)
1178                 return 1;
1179
1180         if (dm->drawObject == 0)
1181                 dm->drawObject = GPU_drawobject_new(dm);
1182         return dm->drawObject->legacy;
1183 }
1184
1185 void *GPU_buffer_lock(GPUBuffer *buffer)
1186 {
1187         float *varray;
1188
1189         if (!buffer)
1190                 return 0;
1191
1192         if (useVBOs) {
1193                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1194                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1195                 return varray;
1196         }
1197         else {
1198                 return buffer->pointer;
1199         }
1200 }
1201
1202 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1203 {
1204         float *varray;
1205
1206         if (!buffer)
1207                 return 0;
1208
1209         if (useVBOs) {
1210                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1211                 /* discard previous data, avoid stalling gpu */
1212                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1213                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1214                 return varray;
1215         }
1216         else {
1217                 return buffer->pointer;
1218         }
1219 }
1220
1221 void GPU_buffer_unlock(GPUBuffer *buffer)
1222 {
1223         if (useVBOs) {
1224                 if (buffer) {
1225                         /* note: this operation can fail, could return
1226                          * an error code from this function? */
1227                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1228                 }
1229                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1230         }
1231 }
1232
1233 /* used for drawing edges */
1234 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1235 {
1236         glDrawElements(mode, count, GL_UNSIGNED_INT,
1237                        (useVBOs ?
1238                         (void *)(start * sizeof(unsigned int)) :
1239                         ((int *)elements->pointer) + start));
1240 }
1241
1242
1243 /* XXX: the rest of the code in this file is used for optimized PBVH
1244  * drawing and doesn't interact at all with the buffer code above */
1245
1246 /* Return false if VBO is either unavailable or disabled by the user,
1247  * true otherwise */
1248 static int gpu_vbo_enabled(void)
1249 {
1250         return (GLEW_ARB_vertex_buffer_object &&
1251                 !(U.gameflags & USER_DISABLE_VBO));
1252 }
1253
1254 /* Convenience struct for building the VBO. */
1255 typedef struct {
1256         float co[3];
1257         short no[3];
1258
1259         /* inserting this to align the 'color' field to a four-byte
1260          * boundary; drastically increases viewport performance on my
1261          * drivers (Gallium/Radeon) --nicholasbishop */
1262         char pad[2];
1263         
1264         unsigned char color[3];
1265 } VertexBufferFormat;
1266
1267 struct GPU_Buffers {
1268         /* opengl buffer handles */
1269         GLuint vert_buf, index_buf;
1270         GLenum index_type;
1271
1272         /* mesh pointers in case buffer allocation fails */
1273         MFace *mface;
1274         MVert *mvert;
1275         int *face_indices;
1276         int totface;
1277         const float *vmask;
1278
1279         /* grid pointers */
1280         CCGKey gridkey;
1281         CCGElem **grids;
1282         const DMFlagMat *grid_flag_mats;
1283         BLI_bitmap * const *grid_hidden;
1284         int *grid_indices;
1285         int totgrid;
1286         int has_hidden;
1287
1288         int use_bmesh;
1289
1290         unsigned int tot_tri, tot_quad;
1291
1292         /* The PBVH ensures that either all faces in the node are
1293          * smooth-shaded or all faces are flat-shaded */
1294         int smooth;
1295
1296         int show_diffuse_color;
1297         float diffuse_color[4];
1298 };
1299 typedef enum {
1300         VBO_ENABLED,
1301         VBO_DISABLED
1302 } VBO_State;
1303
1304 static void gpu_colors_enable(VBO_State vbo_state)
1305 {
1306         glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
1307         glEnable(GL_COLOR_MATERIAL);
1308         if (vbo_state == VBO_ENABLED)
1309                 glEnableClientState(GL_COLOR_ARRAY);
1310 }
1311
1312 static void gpu_colors_disable(VBO_State vbo_state)
1313 {
1314         glDisable(GL_COLOR_MATERIAL);
1315         if (vbo_state == VBO_ENABLED)
1316                 glDisableClientState(GL_COLOR_ARRAY);
1317 }
1318
1319 static float gpu_color_from_mask(float mask)
1320 {
1321         return 1.0f - mask * 0.75f;
1322 }
1323
1324 static void gpu_color_from_mask_copy(float mask, const float diffuse_color[4], unsigned char out[3])
1325 {
1326         float mask_color;
1327
1328         mask_color = gpu_color_from_mask(mask) * 255.0f;
1329
1330         out[0] = diffuse_color[0] * mask_color;
1331         out[1] = diffuse_color[1] * mask_color;
1332         out[2] = diffuse_color[2] * mask_color;
1333 }
1334
1335 static void gpu_color_from_mask_set(float mask, float diffuse_color[4])
1336 {
1337         float color = gpu_color_from_mask(mask);
1338         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1339 }
1340
1341 static float gpu_color_from_mask_quad(const CCGKey *key,
1342                                       CCGElem *a, CCGElem *b,
1343                                       CCGElem *c, CCGElem *d)
1344 {
1345         return gpu_color_from_mask((*CCG_elem_mask(key, a) +
1346                                     *CCG_elem_mask(key, b) +
1347                                     *CCG_elem_mask(key, c) +
1348                                     *CCG_elem_mask(key, d)) * 0.25f);
1349 }
1350
1351 static void gpu_color_from_mask_quad_copy(const CCGKey *key,
1352                                           CCGElem *a, CCGElem *b,
1353                                           CCGElem *c, CCGElem *d,
1354                                           const float *diffuse_color,
1355                                           unsigned char out[3])
1356 {
1357         float mask_color =
1358             gpu_color_from_mask((*CCG_elem_mask(key, a) +
1359                                  *CCG_elem_mask(key, b) +
1360                                  *CCG_elem_mask(key, c) +
1361                                  *CCG_elem_mask(key, d)) * 0.25f) * 255.0f;
1362
1363         out[0] = diffuse_color[0] * mask_color;
1364         out[1] = diffuse_color[1] * mask_color;
1365         out[2] = diffuse_color[2] * mask_color;
1366 }
1367
1368 static void gpu_color_from_mask_quad_set(const CCGKey *key,
1369                                          CCGElem *a, CCGElem *b,
1370                                          CCGElem *c, CCGElem *d,
1371                                          float diffuse_color[4])
1372 {
1373         float color = gpu_color_from_mask_quad(key, a, b, c, d);
1374         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1375 }
1376
1377 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert,
1378                              int *vert_indices, int totvert, const float *vmask,
1379                              int (*face_vert_indices)[4], int show_diffuse_color)
1380 {
1381         VertexBufferFormat *vert_data;
1382         int i, j, k;
1383
1384         buffers->vmask = vmask;
1385         buffers->show_diffuse_color = show_diffuse_color;
1386
1387         if (buffers->vert_buf) {
1388                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
1389                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1390
1391                 if (buffers->show_diffuse_color) {
1392                         MFace *f = buffers->mface + buffers->face_indices[0];
1393
1394                         GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
1395                 }
1396
1397                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1398
1399                 /* Build VBO */
1400                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1401                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1402                                                 sizeof(VertexBufferFormat) * totelem,
1403                                                 NULL, GL_STATIC_DRAW_ARB);
1404
1405                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1406
1407                 if (vert_data) {
1408                         /* Vertex data is shared if smooth-shaded, but separate
1409                          * copies are made for flat shading because normals
1410                          * shouldn't be shared. */
1411                         if (buffers->smooth) {
1412                                 for (i = 0; i < totvert; ++i) {
1413                                         MVert *v = mvert + vert_indices[i];
1414                                         VertexBufferFormat *out = vert_data + i;
1415
1416                                         copy_v3_v3(out->co, v->co);
1417                                         memcpy(out->no, v->no, sizeof(short) * 3);
1418                                 }
1419
1420 #define UPDATE_VERTEX(face, vertex, index, diffuse_color) \
1421                                 { \
1422                                         VertexBufferFormat *out = vert_data + face_vert_indices[face][index]; \
1423                                         if (vmask) \
1424                                                 gpu_color_from_mask_copy(vmask[vertex], diffuse_color, out->color); \
1425                                         else \
1426                                                 rgb_float_to_uchar(out->color, diffuse_color); \
1427                                 } (void)0
1428
1429                                 for (i = 0; i < buffers->totface; i++) {
1430                                         MFace *f = buffers->mface + buffers->face_indices[i];
1431
1432                                         UPDATE_VERTEX(i, f->v1, 0, diffuse_color);
1433                                         UPDATE_VERTEX(i, f->v2, 1, diffuse_color);
1434                                         UPDATE_VERTEX(i, f->v3, 2, diffuse_color);
1435                                         if (f->v4)
1436                                                 UPDATE_VERTEX(i, f->v4, 3, diffuse_color);
1437                                 }
1438 #undef UPDATE_VERTEX
1439                         }
1440                         else {
1441                                 for (i = 0; i < buffers->totface; ++i) {
1442                                         const MFace *f = &buffers->mface[buffers->face_indices[i]];
1443                                         const unsigned int *fv = &f->v1;
1444                                         const int vi[2][3] = {{0, 1, 2}, {3, 0, 2}};
1445                                         float fno[3];
1446                                         short no[3];
1447
1448                                         float fmask;
1449
1450                                         if (paint_is_face_hidden(f, mvert))
1451                                                 continue;
1452
1453                                         /* Face normal and mask */
1454                                         if (f->v4) {
1455                                                 normal_quad_v3(fno,
1456                                                                            mvert[fv[0]].co,
1457                                                                            mvert[fv[1]].co,
1458                                                                            mvert[fv[2]].co,
1459                                                                            mvert[fv[3]].co);
1460                                                 if (vmask) {
1461                                                         fmask = (vmask[fv[0]] +
1462                                                                          vmask[fv[1]] +
1463                                                                          vmask[fv[2]] +
1464                                                                          vmask[fv[3]]) * 0.25f;
1465                                                 }
1466                                         }
1467                                         else {
1468                                                 normal_tri_v3(fno,
1469                                                                           mvert[fv[0]].co,
1470                                                                           mvert[fv[1]].co,
1471                                                                           mvert[fv[2]].co);
1472                                                 if (vmask) {
1473                                                         fmask = (vmask[fv[0]] +
1474                                                                          vmask[fv[1]] +
1475                                                                          vmask[fv[2]]) / 3.0f;
1476                                                 }
1477                                         }
1478                                         normal_float_to_short_v3(no, fno);
1479
1480                                         for (j = 0; j < (f->v4 ? 2 : 1); j++) {
1481                                                 for (k = 0; k < 3; k++) {
1482                                                         const MVert *v = &mvert[fv[vi[j][k]]];
1483                                                         VertexBufferFormat *out = vert_data;
1484
1485                                                         copy_v3_v3(out->co, v->co);
1486                                                         memcpy(out->no, no, sizeof(short) * 3);
1487
1488                                                         if (vmask)
1489                                                                 gpu_color_from_mask_copy(fmask, diffuse_color, out->color);
1490                                                         else
1491                                                                 rgb_float_to_uchar(out->color, diffuse_color);
1492
1493                                                         vert_data++;
1494                                                 }
1495                                         }
1496                                 }
1497                         }
1498
1499                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1500                 }
1501                 else {
1502                         glDeleteBuffersARB(1, &buffers->vert_buf);
1503                         buffers->vert_buf = 0;
1504                 }
1505
1506                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1507         }
1508
1509         buffers->mvert = mvert;
1510 }
1511
1512 GPU_Buffers *GPU_build_mesh_buffers(int (*face_vert_indices)[4],
1513                                     MFace *mface, MVert *mvert,
1514                                     int *face_indices,
1515                                     int totface)
1516 {
1517         GPU_Buffers *buffers;
1518         unsigned short *tri_data;
1519         int i, j, k, tottri;
1520
1521         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1522         buffers->index_type = GL_UNSIGNED_SHORT;
1523         buffers->smooth = mface[face_indices[0]].flag & ME_SMOOTH;
1524
1525         buffers->show_diffuse_color = FALSE;
1526
1527         /* Count the number of visible triangles */
1528         for (i = 0, tottri = 0; i < totface; ++i) {
1529                 const MFace *f = &mface[face_indices[i]];
1530                 if (!paint_is_face_hidden(f, mvert))
1531                         tottri += f->v4 ? 2 : 1;
1532         }
1533
1534         /* An element index buffer is used for smooth shading, but flat
1535          * shading requires separate vertex normals so an index buffer is
1536          * can't be used there. */
1537         if (gpu_vbo_enabled() && buffers->smooth)
1538                 glGenBuffersARB(1, &buffers->index_buf);
1539
1540         if (buffers->index_buf) {
1541                 /* Generate index buffer object */
1542                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1543                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1544                                 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1545
1546                 /* Fill the triangle buffer */
1547                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1548                 if (tri_data) {
1549                         for (i = 0; i < totface; ++i) {
1550                                 const MFace *f = mface + face_indices[i];
1551                                 int v[3];
1552
1553                                 /* Skip hidden faces */
1554                                 if (paint_is_face_hidden(f, mvert))
1555                                         continue;
1556
1557                                 v[0] = 0;
1558                                 v[1] = 1;
1559                                 v[2] = 2;
1560
1561                                 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1562                                         for (k = 0; k < 3; ++k) {
1563                                                 *tri_data = face_vert_indices[i][v[k]];
1564                                                 tri_data++;
1565                                         }
1566                                         v[0] = 3;
1567                                         v[1] = 0;
1568                                         v[2] = 2;
1569                                 }
1570                         }
1571                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1572                 }
1573                 else {
1574                         glDeleteBuffersARB(1, &buffers->index_buf);
1575                         buffers->index_buf = 0;
1576                 }
1577
1578                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1579         }
1580
1581         if (gpu_vbo_enabled() && (buffers->index_buf || !buffers->smooth))
1582                 glGenBuffersARB(1, &buffers->vert_buf);
1583
1584         buffers->tot_tri = tottri;
1585
1586         buffers->mface = mface;
1587         buffers->face_indices = face_indices;
1588         buffers->totface = totface;
1589
1590         return buffers;
1591 }
1592
1593 void GPU_update_grid_buffers(GPU_Buffers *buffers, CCGElem **grids,
1594                              const DMFlagMat *grid_flag_mats, int *grid_indices,
1595                              int totgrid, const CCGKey *key, int show_diffuse_color)
1596 {
1597         VertexBufferFormat *vert_data;
1598         int i, j, k, x, y;
1599
1600         buffers->show_diffuse_color = show_diffuse_color;
1601
1602         /* Build VBO */
1603         if (buffers->vert_buf) {
1604                 int totvert = key->grid_area * totgrid;
1605                 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1606                 const int has_mask = key->has_mask;
1607                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1608
1609                 if (buffers->show_diffuse_color) {
1610                         const DMFlagMat *flags = &grid_flag_mats[grid_indices[0]];
1611
1612                         GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
1613                 }
1614
1615                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1616
1617                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1618                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1619                                 sizeof(VertexBufferFormat) * totvert,
1620                                 NULL, GL_STATIC_DRAW_ARB);
1621                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1622                 if (vert_data) {
1623                         for (i = 0; i < totgrid; ++i) {
1624                                 VertexBufferFormat *vd = vert_data;
1625                                 CCGElem *grid = grids[grid_indices[i]];
1626
1627                                 for (y = 0; y < key->grid_size; y++) {
1628                                         for (x = 0; x < key->grid_size; x++) {
1629                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
1630                                                 
1631                                                 copy_v3_v3(vd->co, CCG_elem_co(key, elem));
1632                                                 if (smooth) {
1633                                                         normal_float_to_short_v3(vd->no, CCG_elem_no(key, elem));
1634
1635                                                         if (has_mask) {
1636                                                                 gpu_color_from_mask_copy(*CCG_elem_mask(key, elem),
1637                                                                                          diffuse_color, vd->color);
1638                                                         }
1639                                                 }
1640                                                 vd++;
1641                                         }
1642                                 }
1643                                 
1644                                 if (!smooth) {
1645                                         /* for flat shading, recalc normals and set the last vertex of
1646                                          * each quad in the index buffer to have the flat normal as
1647                                          * that is what opengl will use */
1648                                         for (j = 0; j < key->grid_size - 1; j++) {
1649                                                 for (k = 0; k < key->grid_size - 1; k++) {
1650                                                         CCGElem *elems[4] = {
1651                                                                 CCG_grid_elem(key, grid, k, j + 1),
1652                                                                 CCG_grid_elem(key, grid, k + 1, j + 1),
1653                                                                 CCG_grid_elem(key, grid, k + 1, j),
1654                                                                 CCG_grid_elem(key, grid, k, j)
1655                                                         };
1656                                                         float fno[3];
1657
1658                                                         normal_quad_v3(fno,
1659                                                                        CCG_elem_co(key, elems[0]),
1660                                                                        CCG_elem_co(key, elems[1]),
1661                                                                        CCG_elem_co(key, elems[2]),
1662                                                                        CCG_elem_co(key, elems[3]));
1663
1664                                                         vd = vert_data + (j + 1) * key->grid_size + (k + 1);
1665                                                         normal_float_to_short_v3(vd->no, fno);
1666
1667                                                         if (has_mask) {
1668                                                                 gpu_color_from_mask_quad_copy(key,
1669                                                                                               elems[0],
1670                                                                                               elems[1],
1671                                                                                               elems[2],
1672                                                                                               elems[3],
1673                                                                                               diffuse_color,
1674                                                                                               vd->color);
1675                                                         }
1676                                                 }
1677                                         }
1678                                 }
1679
1680                                 vert_data += key->grid_area;
1681                         }
1682                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1683                 }
1684                 else {
1685                         glDeleteBuffersARB(1, &buffers->vert_buf);
1686                         buffers->vert_buf = 0;
1687                 }
1688                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1689         }
1690
1691         buffers->grids = grids;
1692         buffers->grid_indices = grid_indices;
1693         buffers->totgrid = totgrid;
1694         buffers->grid_flag_mats = grid_flag_mats;
1695         buffers->gridkey = *key;
1696
1697         buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1698
1699         //printf("node updated %p\n", buffers);
1700 }
1701
1702 /* Returns the number of visible quads in the nodes' grids. */
1703 static int gpu_count_grid_quads(BLI_bitmap **grid_hidden,
1704                                 int *grid_indices, int totgrid,
1705                                 int gridsize)
1706 {
1707         int gridarea = (gridsize - 1) * (gridsize - 1);
1708         int i, x, y, totquad;
1709
1710         /* grid hidden layer is present, so have to check each grid for
1711          * visibility */
1712
1713         for (i = 0, totquad = 0; i < totgrid; i++) {
1714                 const BLI_bitmap *gh = grid_hidden[grid_indices[i]];
1715
1716                 if (gh) {
1717                         /* grid hidden are present, have to check each element */
1718                         for (y = 0; y < gridsize - 1; y++) {
1719                                 for (x = 0; x < gridsize - 1; x++) {
1720                                         if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1721                                                 totquad++;
1722                                 }
1723                         }
1724                 }
1725                 else
1726                         totquad += gridarea;
1727         }
1728
1729         return totquad;
1730 }
1731
1732 /* Build the element array buffer of grid indices using either
1733  * unsigned shorts or unsigned ints. */
1734 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_)                     \
1735         {                                                                   \
1736                 type_ *quad_data;                                               \
1737                 int offset = 0;                                                 \
1738                 int i, j, k;                                                    \
1739                                                                                 \
1740                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,                    \
1741                                 sizeof(type_) * (tot_quad_) * 4, NULL,          \
1742                                 GL_STATIC_DRAW_ARB);                            \
1743                                                                                 \
1744                 /* Fill the quad buffer */                                      \
1745                 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB,         \
1746                                            GL_WRITE_ONLY_ARB);                  \
1747                 if (quad_data) {                                                \
1748                         for (i = 0; i < totgrid; ++i) {                             \
1749                                 BLI_bitmap *gh = NULL;                                  \
1750                                 if (grid_hidden)                                        \
1751                                         gh = grid_hidden[(grid_indices)[i]];                \
1752                                                                                                                                                 \
1753                                 for (j = 0; j < gridsize - 1; ++j) {                    \
1754                                         for (k = 0; k < gridsize - 1; ++k) {                \
1755                                                 /* Skip hidden grid face */                     \
1756                                                 if (gh &&                                       \
1757                                                     paint_is_grid_face_hidden(gh,               \
1758                                                                               gridsize, k, j))  \
1759                                                         continue;                                   \
1760                                                                                                                                                 \
1761                                                 *(quad_data++) = offset + j * gridsize + k + 1; \
1762                                                 *(quad_data++) = offset + j * gridsize + k;     \
1763                                                 *(quad_data++) = offset + (j + 1) * gridsize + k; \
1764                                                 *(quad_data++) = offset + (j + 1) * gridsize + k + 1; \
1765                                         }                                                   \
1766                                 }                                                       \
1767                                                                                                                                                 \
1768                                 offset += gridsize * gridsize;                          \
1769                         }                                                           \
1770                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);              \
1771                 }                                                               \
1772                 else {                                                          \
1773                         glDeleteBuffersARB(1, &(buffer_));                          \
1774                         (buffer_) = 0;                                              \
1775                 }                                                               \
1776         } (void)0
1777 /* end FILL_QUAD_BUFFER */
1778
1779 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1780 {
1781         static int prev_gridsize = -1;
1782         static GLenum prev_index_type = 0;
1783         static GLuint buffer = 0;
1784         static unsigned prev_totquad;
1785
1786         /* used in the FILL_QUAD_BUFFER macro */
1787         BLI_bitmap * const *grid_hidden = NULL;
1788         int *grid_indices = NULL;
1789         int totgrid = 1;
1790
1791         /* VBO is disabled; delete the previous buffer (if it exists) and
1792          * return an invalid handle */
1793         if (!gpu_vbo_enabled()) {
1794                 if (buffer)
1795                         glDeleteBuffersARB(1, &buffer);
1796                 return 0;
1797         }
1798
1799         /* VBO is already built */
1800         if (buffer && prev_gridsize == gridsize) {
1801                 *index_type = prev_index_type;
1802                 *totquad = prev_totquad;
1803                 return buffer;
1804         }
1805
1806         /* Build new VBO */
1807         glGenBuffersARB(1, &buffer);
1808         if (buffer) {
1809                 *totquad = (gridsize - 1) * (gridsize - 1);
1810
1811                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1812
1813                 if (gridsize * gridsize < USHRT_MAX) {
1814                         *index_type = GL_UNSIGNED_SHORT;
1815                         FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1816                 }
1817                 else {
1818                         *index_type = GL_UNSIGNED_INT;
1819                         FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1820                 }
1821
1822                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1823         }
1824
1825         prev_gridsize = gridsize;
1826         prev_index_type = *index_type;
1827         prev_totquad = *totquad;
1828         return buffer;
1829 }
1830
1831 GPU_Buffers *GPU_build_grid_buffers(int *grid_indices, int totgrid,
1832                                     BLI_bitmap **grid_hidden, int gridsize)
1833 {
1834         GPU_Buffers *buffers;
1835         int totquad;
1836         int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
1837
1838         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1839         buffers->grid_hidden = grid_hidden;
1840         buffers->totgrid = totgrid;
1841
1842         buffers->show_diffuse_color = FALSE;
1843
1844         /* Count the number of quads */
1845         totquad = gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1846
1847         if (totquad == fully_visible_totquad) {
1848                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1849                 buffers->has_hidden = 0;
1850         }
1851         else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1852                 /* Build new VBO */
1853                 glGenBuffersARB(1, &buffers->index_buf);
1854                 if (buffers->index_buf) {
1855                         buffers->tot_quad = totquad;
1856
1857                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1858
1859                         if (totgrid * gridsize * gridsize < USHRT_MAX) {
1860                                 buffers->index_type = GL_UNSIGNED_SHORT;
1861                                 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1862                         }
1863                         else {
1864                                 buffers->index_type = GL_UNSIGNED_INT;
1865                                 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1866                         }
1867
1868                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1869                 }
1870
1871                 buffers->has_hidden = 1;
1872         }
1873
1874         /* Build coord/normal VBO */
1875         if (buffers->index_buf)
1876                 glGenBuffersARB(1, &buffers->vert_buf);
1877
1878         return buffers;
1879 }
1880
1881 #undef FILL_QUAD_BUFFER
1882
1883 /* Output a BMVert into a VertexBufferFormat array
1884  *
1885  * The vertex is skipped if hidden, otherwise the output goes into
1886  * index '*v_index' in the 'vert_data' array and '*v_index' is
1887  * incremented.
1888  */
1889 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
1890                                           VertexBufferFormat *vert_data,
1891                                           int *v_index,
1892                                           const float fno[3],
1893                                           const float *fmask,
1894                                           const int cd_vert_mask_offset)
1895 {
1896         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
1897                 VertexBufferFormat *vd = &vert_data[*v_index];
1898
1899                 /* TODO: should use material color */
1900                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1901
1902                 /* Set coord, normal, and mask */
1903                 copy_v3_v3(vd->co, v->co);
1904                 normal_float_to_short_v3(vd->no, fno ? fno : v->no);
1905
1906                 gpu_color_from_mask_copy(
1907                         fmask ? *fmask :
1908                                 BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset),
1909                         diffuse_color,
1910                         vd->color);
1911                 
1912
1913                 /* Assign index for use in the triangle index buffer */
1914                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
1915
1916                 (*v_index)++;
1917         }
1918 }
1919
1920 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
1921 static int gpu_bmesh_vert_visible_count(GHash *bm_unique_verts,
1922                                                                                 GHash *bm_other_verts)
1923 {
1924         GHashIterator gh_iter;
1925         int totvert = 0;
1926
1927         GHASH_ITER (gh_iter, bm_unique_verts) {
1928                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1929                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1930                         totvert++;
1931         }
1932         GHASH_ITER (gh_iter, bm_other_verts) {
1933                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1934                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1935                         totvert++;
1936         }
1937
1938         return totvert;
1939 }
1940
1941 /* Return the total number of visible faces */
1942 static int gpu_bmesh_face_visible_count(GHash *bm_faces)
1943 {
1944         GHashIterator gh_iter;
1945         int totface = 0;
1946
1947         GHASH_ITER (gh_iter, bm_faces) {
1948                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
1949
1950                 if (!paint_is_bmesh_face_hidden(f))
1951                         totface++;
1952         }
1953
1954         return totface;
1955 }
1956
1957 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
1958  * shading, an element index buffer. */
1959 void GPU_update_bmesh_buffers(GPU_Buffers *buffers,
1960                                                           BMesh *bm,
1961                                                           GHash *bm_faces,
1962                                                           GHash *bm_unique_verts,
1963                                                           GHash *bm_other_verts)
1964 {
1965         VertexBufferFormat *vert_data;
1966         void *tri_data;
1967         int tottri, totvert, maxvert = 0;
1968
1969         /* TODO, make mask layer optional for bmesh buffer */
1970         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
1971
1972         if (!buffers->vert_buf || (buffers->smooth && !buffers->index_buf))
1973                 return;
1974
1975         /* Count visible triangles */
1976         tottri = gpu_bmesh_face_visible_count(bm_faces);
1977
1978         if (buffers->smooth) {
1979                 /* Count visible vertices */
1980                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
1981         }
1982         else
1983                 totvert = tottri * 3;
1984
1985         /* Initialize vertex buffer */
1986         glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1987         glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1988                                         sizeof(VertexBufferFormat) * totvert,
1989                                         NULL, GL_STATIC_DRAW_ARB);
1990
1991         /* Fill vertex buffer */
1992         vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1993         if (vert_data) {
1994                 GHashIterator gh_iter;
1995                 int v_index = 0;
1996
1997                 if (buffers->smooth) {
1998                         /* Vertices get an index assigned for use in the triangle
1999                          * index buffer */
2000                         bm->elem_index_dirty |= BM_VERT;
2001
2002                         GHASH_ITER (gh_iter, bm_unique_verts) {
2003                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2004                                                               vert_data, &v_index, NULL, NULL,
2005                                                               cd_vert_mask_offset);
2006                         }
2007
2008                         GHASH_ITER (gh_iter, bm_other_verts) {
2009                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2010                                                               vert_data, &v_index, NULL, NULL,
2011                                                               cd_vert_mask_offset);
2012                         }
2013
2014                         maxvert = v_index;
2015                 }
2016                 else {
2017                         GHASH_ITER (gh_iter, bm_faces) {
2018                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2019
2020                                 BLI_assert(f->len == 3);
2021
2022                                 if (!paint_is_bmesh_face_hidden(f)) {
2023                                         BMVert *v[3];
2024                                         float fmask = 0;
2025                                         int i;
2026
2027                                         // BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void**)v, 3);
2028                                         BM_face_as_array_vert_tri(f, v);
2029
2030                                         /* Average mask value */
2031                                         for (i = 0; i < 3; i++) {
2032                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
2033                                         }
2034                                         fmask /= 3.0f;
2035                                         
2036                                         for (i = 0; i < 3; i++) {
2037                                                 gpu_bmesh_vert_to_buffer_copy(v[i], vert_data,
2038                                                                               &v_index, f->no, &fmask,
2039                                                                               cd_vert_mask_offset);
2040                                         }
2041                                 }
2042                         }
2043
2044                         buffers->tot_tri = tottri;
2045                 }
2046
2047                 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
2048         }
2049         else {
2050                 /* Memory map failed */
2051                 glDeleteBuffersARB(1, &buffers->vert_buf);
2052                 buffers->vert_buf = 0;
2053                 return;
2054         }
2055
2056         if (buffers->smooth) {
2057                 const int use_short = (maxvert < USHRT_MAX);
2058
2059                 /* Initialize triangle index buffer */
2060                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2061                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
2062                                                 (use_short ?
2063                                                  sizeof(unsigned short) :
2064                                                  sizeof(unsigned int)) * 3 * tottri,
2065                                                 NULL, GL_STATIC_DRAW_ARB);
2066
2067                 /* Fill triangle index buffer */
2068                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2069                 if (tri_data) {
2070                         GHashIterator gh_iter;
2071
2072                         GHASH_ITER (gh_iter, bm_faces) {
2073                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2074
2075                                 if (!paint_is_bmesh_face_hidden(f)) {
2076                                         BMLoop *l_iter;
2077                                         BMLoop *l_first;
2078
2079                                         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2080                                         do {
2081                                                 BMVert *v = l_iter->v;
2082                                                 if (use_short) {
2083                                                         unsigned short *elem = tri_data;
2084                                                         (*elem) = BM_elem_index_get(v);
2085                                                         elem++;
2086                                                         tri_data = elem;
2087                                                 }
2088                                                 else {
2089                                                         unsigned int *elem = tri_data;
2090                                                         (*elem) = BM_elem_index_get(v);
2091                                                         elem++;
2092                                                         tri_data = elem;
2093                                                 }
2094                                         } while ((l_iter = l_iter->next) != l_first);
2095                                 }
2096                         }
2097
2098                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
2099
2100                         buffers->tot_tri = tottri;
2101                         buffers->index_type = (use_short ?
2102                                                                    GL_UNSIGNED_SHORT :
2103                                                                    GL_UNSIGNED_INT);
2104                 }
2105                 else {
2106                         /* Memory map failed */
2107                         glDeleteBuffersARB(1, &buffers->index_buf);
2108                         buffers->index_buf = 0;
2109                 }
2110         }
2111 }
2112
2113 GPU_Buffers *GPU_build_bmesh_buffers(int smooth_shading)
2114 {
2115         GPU_Buffers *buffers;
2116
2117         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
2118         if (smooth_shading)
2119                 glGenBuffersARB(1, &buffers->index_buf);
2120         glGenBuffersARB(1, &buffers->vert_buf);
2121         buffers->use_bmesh = TRUE;
2122         buffers->smooth = smooth_shading;
2123
2124         return buffers;
2125 }
2126
2127 static void gpu_draw_buffers_legacy_mesh(GPU_Buffers *buffers)
2128 {
2129         const MVert *mvert = buffers->mvert;
2130         int i, j;
2131         const int has_mask = (buffers->vmask != NULL);
2132         const MFace *face = &buffers->mface[buffers->face_indices[0]];
2133         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2134
2135         if (buffers->show_diffuse_color)
2136                 GPU_material_diffuse_get(face->mat_nr + 1, diffuse_color);
2137
2138         if (has_mask) {
2139                 gpu_colors_enable(VBO_DISABLED);
2140         }
2141
2142         for (i = 0; i < buffers->totface; ++i) {
2143                 MFace *f = buffers->mface + buffers->face_indices[i];
2144                 int S = f->v4 ? 4 : 3;
2145                 unsigned int *fv = &f->v1;
2146
2147                 if (paint_is_face_hidden(f, buffers->mvert))
2148                         continue;
2149
2150                 glBegin((f->v4) ? GL_QUADS : GL_TRIANGLES);
2151
2152                 if (buffers->smooth) {
2153                         for (j = 0; j < S; j++) {
2154                                 if (has_mask) {
2155                                         gpu_color_from_mask_set(buffers->vmask[fv[j]], diffuse_color);
2156                                 }
2157                                 glNormal3sv(mvert[fv[j]].no);
2158                                 glVertex3fv(mvert[fv[j]].co);
2159                         }
2160                 }
2161                 else {
2162                         float fno[3];
2163
2164                         /* calculate face normal */
2165                         if (f->v4) {
2166                                 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
2167                                                mvert[fv[2]].co, mvert[fv[3]].co);
2168                         }
2169                         else
2170                                 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
2171                         glNormal3fv(fno);
2172
2173                         if (has_mask) {
2174                                 float fmask;
2175
2176                                 /* calculate face mask color */
2177                                 fmask = (buffers->vmask[fv[0]] +
2178                                          buffers->vmask[fv[1]] +
2179                                          buffers->vmask[fv[2]]);
2180                                 if (f->v4)
2181                                         fmask = (fmask + buffers->vmask[fv[3]]) * 0.25f;
2182                                 else
2183                                         fmask /= 3.0f;
2184                                 gpu_color_from_mask_set(fmask, diffuse_color);
2185                         }
2186                         
2187                         for (j = 0; j < S; j++)
2188                                 glVertex3fv(mvert[fv[j]].co);
2189                 }
2190                 
2191                 glEnd();
2192         }
2193
2194         if (has_mask) {
2195                 gpu_colors_disable(VBO_DISABLED);
2196         }
2197 }
2198
2199 static void gpu_draw_buffers_legacy_grids(GPU_Buffers *buffers)
2200 {
2201         const CCGKey *key = &buffers->gridkey;
2202         int i, j, x, y, gridsize = buffers->gridkey.grid_size;
2203         const int has_mask = key->has_mask;
2204         const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2205         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2206
2207         if (buffers->show_diffuse_color)
2208                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2209
2210         if (has_mask) {
2211                 gpu_colors_enable(VBO_DISABLED);
2212         }
2213
2214         for (i = 0; i < buffers->totgrid; ++i) {
2215                 int g = buffers->grid_indices[i];
2216                 CCGElem *grid = buffers->grids[g];
2217                 BLI_bitmap *gh = buffers->grid_hidden[g];
2218
2219                 /* TODO: could use strips with hiding as well */
2220
2221                 if (gh) {
2222                         glBegin(GL_QUADS);
2223                         
2224                         for (y = 0; y < gridsize - 1; y++) {
2225                                 for (x = 0; x < gridsize - 1; x++) {
2226                                         CCGElem *e[4] = {
2227                                                 CCG_grid_elem(key, grid, x + 1, y + 1),
2228                                                 CCG_grid_elem(key, grid, x + 1, y),
2229                                                 CCG_grid_elem(key, grid, x, y),
2230                                                 CCG_grid_elem(key, grid, x, y + 1)
2231                                         };
2232
2233                                         /* skip face if any of its corners are hidden */
2234                                         if (paint_is_grid_face_hidden(gh, gridsize, x, y))
2235                                                 continue;
2236
2237                                         if (buffers->smooth) {
2238                                                 for (j = 0; j < 4; j++) {
2239                                                         if (has_mask) {
2240                                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, e[j]), diffuse_color);
2241                                                         }
2242                                                         glNormal3fv(CCG_elem_no(key, e[j]));
2243                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2244                                                 }
2245                                         }
2246                                         else {
2247                                                 float fno[3];
2248                                                 normal_quad_v3(fno,
2249                                                                CCG_elem_co(key, e[0]),
2250                                                                CCG_elem_co(key, e[1]),
2251                                                                CCG_elem_co(key, e[2]),
2252                                                                CCG_elem_co(key, e[3]));
2253                                                 glNormal3fv(fno);
2254
2255                                                 if (has_mask) {
2256                                                         gpu_color_from_mask_quad_set(key, e[0], e[1], e[2], e[3], diffuse_color);
2257                                                 }
2258
2259                                                 for (j = 0; j < 4; j++)
2260                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2261                                         }
2262                                 }
2263                         }
2264
2265                         glEnd();
2266                 }
2267                 else if (buffers->smooth) {
2268                         for (y = 0; y < gridsize - 1; y++) {
2269                                 glBegin(GL_QUAD_STRIP);
2270                                 for (x = 0; x < gridsize; x++) {
2271                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2272                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2273
2274                                         if (has_mask) {
2275                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, a), diffuse_color);
2276                                         }
2277                                         glNormal3fv(CCG_elem_no(key, a));
2278                                         glVertex3fv(CCG_elem_co(key, a));
2279                                         if (has_mask) {
2280                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, b), diffuse_color);
2281                                         }
2282                                         glNormal3fv(CCG_elem_no(key, b));
2283                                         glVertex3fv(CCG_elem_co(key, b));
2284                                 }
2285                                 glEnd();
2286                         }
2287                 }
2288                 else {
2289                         for (y = 0; y < gridsize - 1; y++) {
2290                                 glBegin(GL_QUAD_STRIP);
2291                                 for (x = 0; x < gridsize; x++) {
2292                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2293                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2294
2295                                         if (x > 0) {
2296                                                 CCGElem *c = CCG_grid_elem(key, grid, x - 1, y);
2297                                                 CCGElem *d = CCG_grid_elem(key, grid, x - 1, y + 1);
2298
2299                                                 float fno[3];
2300                                                 normal_quad_v3(fno,
2301                                                                CCG_elem_co(key, d),
2302                                                                CCG_elem_co(key, b),
2303                                                                CCG_elem_co(key, a),
2304                                                                CCG_elem_co(key, c));
2305                                                 glNormal3fv(fno);
2306
2307                                                 if (has_mask) {
2308                                                         gpu_color_from_mask_quad_set(key, a, b, c, d, diffuse_color);
2309                                                 }
2310                                         }
2311
2312                                         glVertex3fv(CCG_elem_co(key, a));
2313                                         glVertex3fv(CCG_elem_co(key, b));
2314                                 }
2315                                 glEnd();
2316                         }
2317                 }
2318         }
2319
2320         if (has_mask) {
2321                 gpu_colors_disable(VBO_DISABLED);
2322         }
2323 }
2324
2325 void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial,
2326                                           int wireframe)
2327 {
2328         /* sets material from the first face, to solve properly face would need to
2329          * be sorted in buckets by materials */
2330         if (setMaterial) {
2331                 if (buffers->totface) {
2332                         const MFace *f = &buffers->mface[buffers->face_indices[0]];
2333                         if (!setMaterial(f->mat_nr + 1, NULL))
2334                                 return;
2335                 }
2336                 else if (buffers->totgrid) {
2337                         const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2338                         if (!setMaterial(f->mat_nr + 1, NULL))
2339                                 return;
2340                 }
2341                 else {
2342                         if (!setMaterial(1, NULL))
2343                                 return;
2344                 }
2345         }
2346
2347         glShadeModel((buffers->smooth || buffers->totface) ? GL_SMOOTH : GL_FLAT);
2348
2349         if (buffers->vert_buf) {
2350                 glEnableClientState(GL_VERTEX_ARRAY);
2351                 if (!wireframe) {
2352                         glEnableClientState(GL_NORMAL_ARRAY);
2353                         gpu_colors_enable(VBO_ENABLED);
2354                 }
2355
2356                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2357
2358                 if (buffers->index_buf)
2359                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2360
2361                 if (wireframe)
2362                         glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
2363
2364                 if (buffers->tot_quad) {
2365                         char *offset = 0;
2366                         int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
2367                         for (i = 0; i < last; i++) {
2368                                 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2369                                                 offset + offsetof(VertexBufferFormat, co));
2370                                 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2371                                                 offset + offsetof(VertexBufferFormat, no));
2372                                 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2373                                                offset + offsetof(VertexBufferFormat, color));
2374                                 
2375                                 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
2376
2377                                 offset += buffers->gridkey.grid_area * sizeof(VertexBufferFormat);
2378                         }
2379                 }
2380                 else {
2381                         int totelem = buffers->tot_tri * 3;
2382
2383                         glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2384                                         (void *)offsetof(VertexBufferFormat, co));
2385                         glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2386                                         (void *)offsetof(VertexBufferFormat, no));
2387                         glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2388                                        (void *)offsetof(VertexBufferFormat, color));
2389
2390                         if (buffers->index_buf)
2391                                 glDrawElements(GL_TRIANGLES, totelem, buffers->index_type, 0);
2392                         else
2393                                 glDrawArrays(GL_TRIANGLES, 0, totelem);
2394                 }
2395
2396                 if (wireframe)
2397                         glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
2398
2399                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
2400                 if (buffers->index_buf)
2401                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
2402
2403                 glDisableClientState(GL_VERTEX_ARRAY);
2404                 if (!wireframe) {
2405                         glDisableClientState(GL_NORMAL_ARRAY);
2406                         gpu_colors_disable(VBO_ENABLED);
2407                 }
2408         }
2409         /* fallbacks if we are out of memory or VBO is disabled */
2410         else if (buffers->totface) {
2411                 gpu_draw_buffers_legacy_mesh(buffers);
2412         }
2413         else if (buffers->totgrid) {
2414                 gpu_draw_buffers_legacy_grids(buffers);
2415         }
2416 }
2417
2418 int GPU_buffers_diffuse_changed(GPU_Buffers *buffers, int show_diffuse_color)
2419 {
2420         float diffuse_color[4];
2421
2422         if (buffers->show_diffuse_color != show_diffuse_color)
2423                 return TRUE;
2424
2425         if (buffers->show_diffuse_color == FALSE)
2426                 return FALSE;
2427
2428         if (buffers->mface) {
2429                 MFace *f = buffers->mface + buffers->face_indices[0];
2430
2431                 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
2432         }
2433         else {
2434                 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2435
2436                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2437         }
2438
2439         return diffuse_color[0] != buffers->diffuse_color[0] ||
2440                diffuse_color[1] != buffers->diffuse_color[1] ||
2441                diffuse_color[2] != buffers->diffuse_color[2];
2442 }
2443
2444 void GPU_free_buffers(GPU_Buffers *buffers)
2445 {
2446         if (buffers) {
2447                 if (buffers->vert_buf)
2448                         glDeleteBuffersARB(1, &buffers->vert_buf);
2449                 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
2450                         glDeleteBuffersARB(1, &buffers->index_buf);
2451
2452                 MEM_freeN(buffers);
2453         }
2454 }