Docs: doxygen file descriptions for BLF, GPU and WM
[blender.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2005 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): Brecht Van Lommel.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/gpu/intern/gpu_buffers.c
29  *  \ingroup gpu
30  *
31  * Mesh drawing using OpenGL VBO (Vertex Buffer Objects),
32  * with fall-back to vertex arrays.
33  */
34
35 #include <limits.h>
36 #include <stddef.h>
37 #include <string.h>
38
39 #include "GL/glew.h"
40
41 #include "MEM_guardedalloc.h"
42
43 #include "BLI_bitmap.h"
44 #include "BLI_math.h"
45 #include "BLI_utildefines.h"
46 #include "BLI_ghash.h"
47 #include "BLI_threads.h"
48
49 #include "DNA_meshdata_types.h"
50 #include "DNA_material_types.h"
51
52 #include "BKE_ccg.h"
53 #include "BKE_DerivedMesh.h"
54 #include "BKE_paint.h"
55 #include "BKE_subsurf.h"
56
57 #include "DNA_userdef_types.h"
58
59 #include "GPU_buffers.h"
60 #include "GPU_draw.h"
61
62 #include "bmesh.h"
63
64 typedef enum {
65         GPU_BUFFER_VERTEX_STATE = 1,
66         GPU_BUFFER_NORMAL_STATE = 2,
67         GPU_BUFFER_TEXCOORD_STATE = 4,
68         GPU_BUFFER_COLOR_STATE = 8,
69         GPU_BUFFER_ELEMENT_STATE = 16,
70 } GPUBufferState;
71
72 #define MAX_GPU_ATTRIB_DATA 32
73
74 /* material number is an 16-bit signed short and the range (assume material number is non-negative) */
75 #define MAX_MATERIALS MAXMAT
76
77 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
78 static int useVBOs = -1;
79 static GPUBufferState GLStates = 0;
80 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
81
82 static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
83
84 /* stores recently-deleted buffers so that new buffers won't have to
85  * be recreated as often
86  *
87  * only one instance of this pool is created, stored in
88  * gpu_buffer_pool
89  *
90  * note that the number of buffers in the pool is usually limited to
91  * MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
92  * when a GPUBuffer is released outside the main thread; due to OpenGL
93  * restrictions it cannot be immediately released
94  */
95 typedef struct GPUBufferPool {
96         /* number of allocated buffers stored */
97         int totbuf;
98         int totpbvhbufids;
99         /* actual allocated length of the arrays */
100         int maxsize;
101         int maxpbvhsize;
102         GPUBuffer **buffers;
103         GLuint *pbvhbufids;
104 } GPUBufferPool;
105 #define MAX_FREE_GPU_BUFFERS 8
106 #define MAX_FREE_GPU_BUFF_IDS 100
107
108 /* create a new GPUBufferPool */
109 static GPUBufferPool *gpu_buffer_pool_new(void)
110 {
111         GPUBufferPool *pool;
112
113         /* enable VBOs if supported */
114         if (useVBOs == -1)
115                 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
116
117         pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer_Pool");
118
119         pool->maxsize = MAX_FREE_GPU_BUFFERS;
120         pool->maxpbvhsize = MAX_FREE_GPU_BUFF_IDS;
121         pool->buffers = MEM_callocN(sizeof(*pool->buffers) * pool->maxsize,
122                                                                 "GPUBufferPool.buffers");
123         pool->pbvhbufids = MEM_callocN(sizeof(*pool->pbvhbufids) * pool->maxpbvhsize,
124                                                                 "GPUBufferPool.pbvhbuffers");
125         return pool;
126 }
127
128 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
129 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
130 {
131         int i;
132
133         if (!pool || index < 0 || index >= pool->totbuf)
134                 return;
135
136         /* shift entries down, overwriting the buffer at `index' */
137         for (i = index; i < pool->totbuf - 1; i++)
138                 pool->buffers[i] = pool->buffers[i + 1];
139
140         /* clear the last entry */
141         if (pool->totbuf > 0)
142                 pool->buffers[pool->totbuf - 1] = NULL;
143
144         pool->totbuf--;
145 }
146
147 /* delete the last entry in the pool */
148 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
149 {
150         GPUBuffer *last;
151
152         if (pool->totbuf <= 0)
153                 return;
154
155         /* get the last entry */
156         if (!(last = pool->buffers[pool->totbuf - 1]))
157                 return;
158
159         /* delete the buffer's data */
160         if (useVBOs)
161                 glDeleteBuffersARB(1, &last->id);
162         else
163                 MEM_freeN(last->pointer);
164
165         /* delete the buffer and remove from pool */
166         MEM_freeN(last);
167         pool->totbuf--;
168         pool->buffers[pool->totbuf] = NULL;
169 }
170
171 /* free a GPUBufferPool; also frees the data in the pool's
172  * GPUBuffers */
173 static void gpu_buffer_pool_free(GPUBufferPool *pool)
174 {
175         if (!pool)
176                 return;
177         
178         while (pool->totbuf)
179                 gpu_buffer_pool_delete_last(pool);
180
181         MEM_freeN(pool->buffers);
182         MEM_freeN(pool->pbvhbufids);
183         MEM_freeN(pool);
184 }
185
186 static void gpu_buffer_pool_free_unused(GPUBufferPool *pool)
187 {
188         if (!pool)
189                 return;
190         
191         while (pool->totbuf)
192                 gpu_buffer_pool_delete_last(pool);
193
194         glDeleteBuffersARB(pool->totpbvhbufids, pool->pbvhbufids);
195         pool->totpbvhbufids = 0;
196 }
197
198 static GPUBufferPool *gpu_buffer_pool = NULL;
199 static GPUBufferPool *gpu_get_global_buffer_pool(void)
200 {
201         /* initialize the pool */
202         if (!gpu_buffer_pool)
203                 gpu_buffer_pool = gpu_buffer_pool_new();
204
205         return gpu_buffer_pool;
206 }
207
208 void GPU_global_buffer_pool_free(void)
209 {
210         gpu_buffer_pool_free(gpu_buffer_pool);
211         gpu_buffer_pool = NULL;
212 }
213
214 void GPU_global_buffer_pool_free_unused(void)
215 {
216         gpu_buffer_pool_free_unused(gpu_buffer_pool);
217 }
218
219 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
220  * pool if possible, otherwise creates a new one
221  *
222  * Thread-unsafe version for internal usage only.
223  */
224 static GPUBuffer *gpu_buffer_alloc_intern(int size)
225 {
226         GPUBufferPool *pool;
227         GPUBuffer *buf;
228         int i, bufsize, bestfit = -1;
229
230         /* bad case, leads to leak of buf since buf->pointer will allocate
231          * NULL, leading to return without cleanup. In any case better detect early
232          * psy-fi */
233         if (size == 0)
234                 return NULL;
235
236         pool = gpu_get_global_buffer_pool();
237
238         /* not sure if this buffer pool code has been profiled much,
239          * seems to me that the graphics driver and system memory
240          * management might do this stuff anyway. --nicholas
241          */
242
243         /* check the global buffer pool for a recently-deleted buffer
244          * that is at least as big as the request, but not more than
245          * twice as big */
246         for (i = 0; i < pool->totbuf; i++) {
247                 bufsize = pool->buffers[i]->size;
248
249                 /* check for an exact size match */
250                 if (bufsize == size) {
251                         bestfit = i;
252                         break;
253                 }
254                 /* smaller buffers won't fit data and buffers at least
255                  * twice as big are a waste of memory */
256                 else if (bufsize > size && size > (bufsize / 2)) {
257                         /* is it closer to the required size than the
258                          * last appropriate buffer found. try to save
259                          * memory */
260                         if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
261                                 bestfit = i;
262                         }
263                 }
264         }
265
266         /* if an acceptable buffer was found in the pool, remove it
267          * from the pool and return it */
268         if (bestfit != -1) {
269                 buf = pool->buffers[bestfit];
270                 gpu_buffer_pool_remove_index(pool, bestfit);
271                 return buf;
272         }
273
274         /* no acceptable buffer found in the pool, create a new one */
275         buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
276         buf->size = size;
277
278         if (useVBOs == 1) {
279                 /* create a new VBO and initialize it to the requested
280                  * size */
281                 glGenBuffersARB(1, &buf->id);
282                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
283                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
284                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
285         }
286         else {
287                 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
288                 
289                 /* purpose of this seems to be dealing with
290                  * out-of-memory errors? looks a bit iffy to me
291                  * though, at least on Linux I expect malloc() would
292                  * just overcommit. --nicholas */
293                 while (!buf->pointer && pool->totbuf > 0) {
294                         gpu_buffer_pool_delete_last(pool);
295                         buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
296                 }
297                 if (!buf->pointer)
298                         return NULL;
299         }
300
301         return buf;
302 }
303
304 /* Same as above, but safe for threading. */
305 GPUBuffer *GPU_buffer_alloc(int size)
306 {
307         GPUBuffer *buffer;
308
309         if (size == 0) {
310                 /* Early out, no lock needed in this case. */
311                 return NULL;
312         }
313
314         BLI_mutex_lock(&buffer_mutex);
315         buffer = gpu_buffer_alloc_intern(size);
316         BLI_mutex_unlock(&buffer_mutex);
317
318         return buffer;
319 }
320
321 /* release a GPUBuffer; does not free the actual buffer or its data,
322  * but rather moves it to the pool of recently-freed buffers for
323  * possible re-use
324  *
325  * Thread-unsafe version for internal usage only.
326  */
327 static void gpu_buffer_free_intern(GPUBuffer *buffer)
328 {
329         GPUBufferPool *pool;
330         int i;
331
332         if (!buffer)
333                 return;
334
335         pool = gpu_get_global_buffer_pool();
336
337         /* free the last used buffer in the queue if no more space, but only
338          * if we are in the main thread. for e.g. rendering or baking it can
339          * happen that we are in other thread and can't call OpenGL, in that
340          * case cleanup will be done GPU_buffer_pool_free_unused */
341         if (BLI_thread_is_main()) {
342                 /* in main thread, safe to decrease size of pool back
343                  * down to MAX_FREE_GPU_BUFFERS */
344                 while (pool->totbuf >= MAX_FREE_GPU_BUFFERS)
345                         gpu_buffer_pool_delete_last(pool);
346         }
347         else {
348                 /* outside of main thread, can't safely delete the
349                  * buffer, so increase pool size */
350                 if (pool->maxsize == pool->totbuf) {
351                         pool->maxsize += MAX_FREE_GPU_BUFFERS;
352                         pool->buffers = MEM_reallocN(pool->buffers,
353                                                      sizeof(GPUBuffer *) * pool->maxsize);
354                 }
355         }
356
357         /* shift pool entries up by one */
358         for (i = pool->totbuf; i > 0; i--)
359                 pool->buffers[i] = pool->buffers[i - 1];
360
361         /* insert the buffer into the beginning of the pool */
362         pool->buffers[0] = buffer;
363         pool->totbuf++;
364 }
365
366 /* Same as above, but safe for threading. */
367 void GPU_buffer_free(GPUBuffer *buffer)
368 {
369         if (!buffer) {
370                 /* Early output, no need to lock in this case, */
371                 return;
372         }
373
374         BLI_mutex_lock(&buffer_mutex);
375         gpu_buffer_free_intern(buffer);
376         BLI_mutex_unlock(&buffer_mutex);
377 }
378
379 typedef struct GPUVertPointLink {
380         struct GPUVertPointLink *next;
381         /* -1 means uninitialized */
382         int point_index;
383 } GPUVertPointLink;
384
385 /* add a new point to the list of points related to a particular
386  * vertex */
387 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
388 {
389         GPUVertPointLink *lnk;
390
391         lnk = &gdo->vert_points[vert_index];
392
393         /* if first link is in use, add a new link at the end */
394         if (lnk->point_index != -1) {
395                 /* get last link */
396                 for (; lnk->next; lnk = lnk->next) ;
397
398                 /* add a new link from the pool */
399                 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
400                 gdo->vert_points_usage++;
401         }
402
403         lnk->point_index = point_index;
404 }
405
406 /* update the vert_points and triangle_to_mface fields with a new
407  * triangle */
408 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
409                                         int base_point_index,
410                                         int face_index,
411                                         int v1, int v2, int v3)
412 {
413         int i, v[3] = {v1, v2, v3};
414         for (i = 0; i < 3; i++)
415                 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
416         gdo->triangle_to_mface[base_point_index / 3] = face_index;
417 }
418
419 /* for each vertex, build a list of points related to it; these lists
420  * are stored in an array sized to the number of vertices */
421 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
422 {
423         GPUBufferMaterial *mat;
424         int i, mat_orig_to_new[MAX_MATERIALS];
425
426         /* allocate the array and space for links */
427         gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
428                                        "GPUDrawObject.vert_points");
429         gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
430                                            "GPUDrawObject.vert_points_mem");
431         gdo->vert_points_usage = 0;
432
433         /* build a map from the original material indices to the new
434          * GPUBufferMaterial indices */
435         for (i = 0; i < gdo->totmaterial; i++)
436                 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
437
438         /* -1 indicates the link is not yet used */
439         for (i = 0; i < gdo->totvert; i++)
440                 gdo->vert_points[i].point_index = -1;
441
442         for (i = 0; i < totface; i++, f++) {
443                 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
444
445                 /* add triangle */
446                 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
447                                             i, f->v1, f->v2, f->v3);
448                 mat->totpoint += 3;
449
450                 /* add second triangle for quads */
451                 if (f->v4) {
452                         gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
453                                                     i, f->v3, f->v4, f->v1);
454                         mat->totpoint += 3;
455                 }
456         }
457
458         /* map any unused vertices to loose points */
459         for (i = 0; i < gdo->totvert; i++) {
460                 if (gdo->vert_points[i].point_index == -1) {
461                         gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
462                         gdo->tot_loose_point++;
463                 }
464         }
465 }
466
467 /* see GPUDrawObject's structure definition for a description of the
468  * data being initialized here */
469 GPUDrawObject *GPU_drawobject_new(DerivedMesh *dm)
470 {
471         GPUDrawObject *gdo;
472         MFace *mface;
473         int points_per_mat[MAX_MATERIALS];
474         int i, curmat, curpoint, totface;
475
476         mface = dm->getTessFaceArray(dm);
477         totface = dm->getNumTessFaces(dm);
478
479         /* get the number of points used by each material, treating
480          * each quad as two triangles */
481         memset(points_per_mat, 0, sizeof(int) * MAX_MATERIALS);
482         for (i = 0; i < totface; i++)
483                 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
484
485         /* create the GPUDrawObject */
486         gdo = MEM_callocN(sizeof(GPUDrawObject), "GPUDrawObject");
487         gdo->totvert = dm->getNumVerts(dm);
488         gdo->totedge = dm->getNumEdges(dm);
489
490         /* count the number of materials used by this DerivedMesh */
491         for (i = 0; i < MAX_MATERIALS; i++) {
492                 if (points_per_mat[i] > 0)
493                         gdo->totmaterial++;
494         }
495
496         /* allocate an array of materials used by this DerivedMesh */
497         gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
498                                      "GPUDrawObject.materials");
499
500         /* initialize the materials array */
501         for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
502                 if (points_per_mat[i] > 0) {
503                         gdo->materials[curmat].start = curpoint;
504                         gdo->materials[curmat].totpoint = 0;
505                         gdo->materials[curmat].mat_nr = i;
506
507                         curpoint += points_per_mat[i];
508                         curmat++;
509                 }
510         }
511
512         /* store total number of points used for triangles */
513         gdo->tot_triangle_point = curpoint;
514
515         gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
516                                              "GPUDrawObject.triangle_to_mface");
517
518         gpu_drawobject_init_vert_points(gdo, mface, totface);
519
520         return gdo;
521 }
522
523 void GPU_drawobject_free(DerivedMesh *dm)
524 {
525         GPUDrawObject *gdo;
526
527         if (!dm || !(gdo = dm->drawObject))
528                 return;
529
530         MEM_freeN(gdo->materials);
531         MEM_freeN(gdo->triangle_to_mface);
532         MEM_freeN(gdo->vert_points);
533         MEM_freeN(gdo->vert_points_mem);
534         GPU_buffer_free(gdo->points);
535         GPU_buffer_free(gdo->normals);
536         GPU_buffer_free(gdo->uv);
537         GPU_buffer_free(gdo->colors);
538         GPU_buffer_free(gdo->edges);
539         GPU_buffer_free(gdo->uvedges);
540
541         MEM_freeN(gdo);
542         dm->drawObject = NULL;
543 }
544
545 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
546                                   int *mat_orig_to_new, void *user_data);
547
548 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
549                                    int vector_size, int size, GLenum target,
550                                    void *user, GPUBufferCopyFunc copy_f)
551 {
552         GPUBufferPool *pool;
553         GPUBuffer *buffer;
554         float *varray;
555         int mat_orig_to_new[MAX_MATERIALS];
556         int *cur_index_per_mat;
557         int i;
558         int success;
559         GLboolean uploaded;
560
561         pool = gpu_get_global_buffer_pool();
562
563         BLI_mutex_lock(&buffer_mutex);
564
565         /* alloc a GPUBuffer; fall back to legacy mode on failure */
566         if (!(buffer = gpu_buffer_alloc_intern(size)))
567                 dm->drawObject->legacy = 1;
568
569         /* nothing to do for legacy mode */
570         if (dm->drawObject->legacy) {
571                 BLI_mutex_unlock(&buffer_mutex);
572                 return NULL;
573         }
574
575         cur_index_per_mat = MEM_mallocN(sizeof(int) * object->totmaterial,
576                                         "GPU_buffer_setup.cur_index_per_mat");
577         for (i = 0; i < object->totmaterial; i++) {
578                 /* for each material, the current index to copy data to */
579                 cur_index_per_mat[i] = object->materials[i].start * vector_size;
580
581                 /* map from original material index to new
582                  * GPUBufferMaterial index */
583                 mat_orig_to_new[object->materials[i].mat_nr] = i;
584         }
585
586         if (useVBOs) {
587                 success = 0;
588
589                 while (!success) {
590                         /* bind the buffer and discard previous data,
591                          * avoids stalling gpu */
592                         glBindBufferARB(target, buffer->id);
593                         glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
594
595                         /* attempt to map the buffer */
596                         if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
597                                 /* failed to map the buffer; delete it */
598                                 gpu_buffer_free_intern(buffer);
599                                 gpu_buffer_pool_delete_last(pool);
600                                 buffer = NULL;
601
602                                 /* try freeing an entry from the pool
603                                  * and reallocating the buffer */
604                                 if (pool->totbuf > 0) {
605                                         gpu_buffer_pool_delete_last(pool);
606                                         buffer = gpu_buffer_alloc_intern(size);
607                                 }
608
609                                 /* allocation still failed; fall back
610                                  * to legacy mode */
611                                 if (!buffer) {
612                                         dm->drawObject->legacy = 1;
613                                         success = 1;
614                                 }
615                         }
616                         else {
617                                 success = 1;
618                         }
619                 }
620
621                 /* check legacy fallback didn't happen */
622                 if (dm->drawObject->legacy == 0) {
623                         uploaded = GL_FALSE;
624                         /* attempt to upload the data to the VBO */
625                         while (uploaded == GL_FALSE) {
626                                 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
627                                 /* glUnmapBuffer returns GL_FALSE if
628                                  * the data store is corrupted; retry
629                                  * in that case */
630                                 uploaded = glUnmapBufferARB(target);
631                         }
632                 }
633                 glBindBufferARB(target, 0);
634         }
635         else {
636                 /* VBO not supported, use vertex array fallback */
637                 if (buffer->pointer) {
638                         varray = buffer->pointer;
639                         (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
640                 }
641                 else {
642                         dm->drawObject->legacy = 1;
643                 }
644         }
645
646         MEM_freeN(cur_index_per_mat);
647
648         BLI_mutex_unlock(&buffer_mutex);
649
650         return buffer;
651 }
652
653 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
654 {
655         MVert *mvert;
656         MFace *f;
657         int i, j, start, totface;
658
659         mvert = dm->getVertArray(dm);
660         f = dm->getTessFaceArray(dm);
661
662         totface = dm->getNumTessFaces(dm);
663         for (i = 0; i < totface; i++, f++) {
664                 start = index[mat_orig_to_new[f->mat_nr]];
665
666                 /* v1 v2 v3 */
667                 copy_v3_v3(&varray[start], mvert[f->v1].co);
668                 copy_v3_v3(&varray[start + 3], mvert[f->v2].co);
669                 copy_v3_v3(&varray[start + 6], mvert[f->v3].co);
670                 index[mat_orig_to_new[f->mat_nr]] += 9;
671
672                 if (f->v4) {
673                         /* v3 v4 v1 */
674                         copy_v3_v3(&varray[start + 9], mvert[f->v3].co);
675                         copy_v3_v3(&varray[start + 12], mvert[f->v4].co);
676                         copy_v3_v3(&varray[start + 15], mvert[f->v1].co);
677                         index[mat_orig_to_new[f->mat_nr]] += 9;
678                 }
679         }
680
681         /* copy loose points */
682         j = dm->drawObject->tot_triangle_point * 3;
683         for (i = 0; i < dm->drawObject->totvert; i++) {
684                 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
685                         copy_v3_v3(&varray[j], mvert[i].co);
686                         j += 3;
687                 }
688         }
689 }
690
691 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
692 {
693         int i, totface;
694         int start;
695         float f_no[3];
696
697         float *nors = dm->getTessFaceDataArray(dm, CD_NORMAL);
698         MVert *mvert = dm->getVertArray(dm);
699         MFace *f = dm->getTessFaceArray(dm);
700
701         totface = dm->getNumTessFaces(dm);
702         for (i = 0; i < totface; i++, f++) {
703                 const int smoothnormal = (f->flag & ME_SMOOTH);
704
705                 start = index[mat_orig_to_new[f->mat_nr]];
706                 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
707
708                 if (smoothnormal) {
709                         /* copy vertex normal */
710                         normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
711                         normal_short_to_float_v3(&varray[start + 3], mvert[f->v2].no);
712                         normal_short_to_float_v3(&varray[start + 6], mvert[f->v3].no);
713
714                         if (f->v4) {
715                                 normal_short_to_float_v3(&varray[start + 9], mvert[f->v3].no);
716                                 normal_short_to_float_v3(&varray[start + 12], mvert[f->v4].no);
717                                 normal_short_to_float_v3(&varray[start + 15], mvert[f->v1].no);
718                         }
719                 }
720                 else if (nors) {
721                         /* copy cached face normal */
722                         copy_v3_v3(&varray[start], &nors[i * 3]);
723                         copy_v3_v3(&varray[start + 3], &nors[i * 3]);
724                         copy_v3_v3(&varray[start + 6], &nors[i * 3]);
725
726                         if (f->v4) {
727                                 copy_v3_v3(&varray[start + 9], &nors[i * 3]);
728                                 copy_v3_v3(&varray[start + 12], &nors[i * 3]);
729                                 copy_v3_v3(&varray[start + 15], &nors[i * 3]);
730                         }
731                 }
732                 else {
733                         /* calculate face normal */
734                         if (f->v4)
735                                 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
736                         else
737                                 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
738
739                         copy_v3_v3(&varray[start], f_no);
740                         copy_v3_v3(&varray[start + 3], f_no);
741                         copy_v3_v3(&varray[start + 6], f_no);
742
743                         if (f->v4) {
744                                 copy_v3_v3(&varray[start + 9], f_no);
745                                 copy_v3_v3(&varray[start + 12], f_no);
746                                 copy_v3_v3(&varray[start + 15], f_no);
747                         }
748                 }
749         }
750 }
751
752 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
753 {
754         int start;
755         int i, totface;
756
757         MTFace *mtface;
758         MFace *f;
759
760         if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
761                 return;
762         f = dm->getTessFaceArray(dm);
763                 
764         totface = dm->getNumTessFaces(dm);
765         for (i = 0; i < totface; i++, f++) {
766                 start = index[mat_orig_to_new[f->mat_nr]];
767
768                 /* v1 v2 v3 */
769                 copy_v2_v2(&varray[start], mtface[i].uv[0]);
770                 copy_v2_v2(&varray[start + 2], mtface[i].uv[1]);
771                 copy_v2_v2(&varray[start + 4], mtface[i].uv[2]);
772                 index[mat_orig_to_new[f->mat_nr]] += 6;
773
774                 if (f->v4) {
775                         /* v3 v4 v1 */
776                         copy_v2_v2(&varray[start + 6], mtface[i].uv[2]);
777                         copy_v2_v2(&varray[start + 8], mtface[i].uv[3]);
778                         copy_v2_v2(&varray[start + 10], mtface[i].uv[0]);
779                         index[mat_orig_to_new[f->mat_nr]] += 6;
780                 }
781         }
782 }
783
784 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
785 {
786         v[0] = col[3];
787         v[1] = col[2];
788         v[2] = col[1];
789 }
790
791 /* treat varray_ as an array of MCol, four MCol's per face */
792 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
793 {
794         int i, totface;
795         unsigned char *varray = (unsigned char *)varray_;
796         unsigned char *mcol = (unsigned char *)user;
797         MFace *f = dm->getTessFaceArray(dm);
798
799         totface = dm->getNumTessFaces(dm);
800         for (i = 0; i < totface; i++, f++) {
801                 int start = index[mat_orig_to_new[f->mat_nr]];
802
803                 /* v1 v2 v3 */
804                 copy_mcol_uc3(&varray[start], &mcol[i * 16]);
805                 copy_mcol_uc3(&varray[start + 3], &mcol[i * 16 + 4]);
806                 copy_mcol_uc3(&varray[start + 6], &mcol[i * 16 + 8]);
807                 index[mat_orig_to_new[f->mat_nr]] += 9;
808
809                 if (f->v4) {
810                         /* v3 v4 v1 */
811                         copy_mcol_uc3(&varray[start + 9], &mcol[i * 16 + 8]);
812                         copy_mcol_uc3(&varray[start + 12], &mcol[i * 16 + 12]);
813                         copy_mcol_uc3(&varray[start + 15], &mcol[i * 16]);
814                         index[mat_orig_to_new[f->mat_nr]] += 9;
815                 }
816         }
817 }
818
819 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
820 {
821         MEdge *medge;
822         unsigned int *varray = (unsigned int *)varray_;
823         int i, totedge;
824  
825         medge = dm->getEdgeArray(dm);
826         totedge = dm->getNumEdges(dm);
827
828         for (i = 0; i < totedge; i++, medge++) {
829                 varray[i * 2] = dm->drawObject->vert_points[medge->v1].point_index;
830                 varray[i * 2 + 1] = dm->drawObject->vert_points[medge->v2].point_index;
831         }
832 }
833
834 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
835 {
836         MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
837         int i, j = 0;
838
839         if (!tf)
840                 return;
841
842         for (i = 0; i < dm->numTessFaceData; i++, tf++) {
843                 MFace mf;
844                 dm->getTessFace(dm, i, &mf);
845
846                 copy_v2_v2(&varray[j], tf->uv[0]);
847                 copy_v2_v2(&varray[j + 2], tf->uv[1]);
848
849                 copy_v2_v2(&varray[j + 4], tf->uv[1]);
850                 copy_v2_v2(&varray[j + 6], tf->uv[2]);
851
852                 if (!mf.v4) {
853                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
854                         copy_v2_v2(&varray[j + 10], tf->uv[0]);
855                         j += 12;
856                 }
857                 else {
858                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
859                         copy_v2_v2(&varray[j + 10], tf->uv[3]);
860
861                         copy_v2_v2(&varray[j + 12], tf->uv[3]);
862                         copy_v2_v2(&varray[j + 14], tf->uv[0]);
863                         j += 16;
864                 }
865         }
866 }
867
868 typedef enum {
869         GPU_BUFFER_VERTEX = 0,
870         GPU_BUFFER_NORMAL,
871         GPU_BUFFER_COLOR,
872         GPU_BUFFER_UV,
873         GPU_BUFFER_EDGE,
874         GPU_BUFFER_UVEDGE,
875 } GPUBufferType;
876
877 typedef struct {
878         GPUBufferCopyFunc copy;
879         GLenum gl_buffer_type;
880         int vector_size;
881 } GPUBufferTypeSettings;
882
883 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
884         {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
885         {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
886         {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
887         {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
888         {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
889         {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
890 };
891
892 /* get the GPUDrawObject buffer associated with a type */
893 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
894 {
895         switch (type) {
896                 case GPU_BUFFER_VERTEX:
897                         return &gdo->points;
898                 case GPU_BUFFER_NORMAL:
899                         return &gdo->normals;
900                 case GPU_BUFFER_COLOR:
901                         return &gdo->colors;
902                 case GPU_BUFFER_UV:
903                         return &gdo->uv;
904                 case GPU_BUFFER_EDGE:
905                         return &gdo->edges;
906                 case GPU_BUFFER_UVEDGE:
907                         return &gdo->uvedges;
908                 default:
909                         return NULL;
910         }
911 }
912
913 /* get the amount of space to allocate for a buffer of a particular type */
914 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
915 {
916         switch (type) {
917                 case GPU_BUFFER_VERTEX:
918                         return sizeof(float) * 3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
919                 case GPU_BUFFER_NORMAL:
920                         return sizeof(float) * 3 * dm->drawObject->tot_triangle_point;
921                 case GPU_BUFFER_COLOR:
922                         return sizeof(char) * 3 * dm->drawObject->tot_triangle_point;
923                 case GPU_BUFFER_UV:
924                         return sizeof(float) * 2 * dm->drawObject->tot_triangle_point;
925                 case GPU_BUFFER_EDGE:
926                         return sizeof(int) * 2 * dm->drawObject->totedge;
927                 case GPU_BUFFER_UVEDGE:
928                         /* each face gets 3 points, 3 edges per triangle, and
929                          * each edge has its own, non-shared coords, so each
930                          * tri corner needs minimum of 4 floats, quads used
931                          * less so here we can over allocate and assume all
932                          * tris. */
933                         return sizeof(float) * 4 * dm->drawObject->tot_triangle_point;
934                 default:
935                         return -1;
936         }
937 }
938
939 /* call gpu_buffer_setup with settings for a particular type of buffer */
940 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
941 {
942         const GPUBufferTypeSettings *ts;
943         void *user_data = NULL;
944         GPUBuffer *buf;
945
946         ts = &gpu_buffer_type_settings[type];
947
948         /* special handling for MCol and UV buffers */
949         if (type == GPU_BUFFER_COLOR) {
950                 if (!(user_data = DM_get_tessface_data_layer(dm, dm->drawObject->colType)))
951                         return NULL;
952         }
953         else if (type == GPU_BUFFER_UV) {
954                 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
955                         return NULL;
956         }
957
958         buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
959                                gpu_buffer_size_from_type(dm, type),
960                                ts->gl_buffer_type, user_data, ts->copy);
961
962         return buf;
963 }
964
965 /* get the buffer of `type', initializing the GPUDrawObject and
966  * buffer if needed */
967 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
968 {
969         GPUBuffer **buf;
970
971         if (!dm->drawObject)
972                 dm->drawObject = GPU_drawobject_new(dm);
973
974         buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
975         if (!(*buf))
976                 *buf = gpu_buffer_setup_type(dm, type);
977
978         return *buf;
979 }
980
981 void GPU_vertex_setup(DerivedMesh *dm)
982 {
983         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
984                 return;
985
986         glEnableClientState(GL_VERTEX_ARRAY);
987         if (useVBOs) {
988                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
989                 glVertexPointer(3, GL_FLOAT, 0, 0);
990         }
991         else {
992                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
993         }
994         
995         GLStates |= GPU_BUFFER_VERTEX_STATE;
996 }
997
998 void GPU_normal_setup(DerivedMesh *dm)
999 {
1000         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
1001                 return;
1002
1003         glEnableClientState(GL_NORMAL_ARRAY);
1004         if (useVBOs) {
1005                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
1006                 glNormalPointer(GL_FLOAT, 0, 0);
1007         }
1008         else {
1009                 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
1010         }
1011
1012         GLStates |= GPU_BUFFER_NORMAL_STATE;
1013 }
1014
1015 void GPU_uv_setup(DerivedMesh *dm)
1016 {
1017         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
1018                 return;
1019
1020         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1021         if (useVBOs) {
1022                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
1023                 glTexCoordPointer(2, GL_FLOAT, 0, 0);
1024         }
1025         else {
1026                 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
1027         }
1028
1029         GLStates |= GPU_BUFFER_TEXCOORD_STATE;
1030 }
1031
1032 void GPU_color_setup(DerivedMesh *dm, int colType)
1033 {
1034         if (!dm->drawObject) {
1035                 /* XXX Not really nice, but we need a valid gpu draw object to set the colType...
1036                  *     Else we would have to add a new param to gpu_buffer_setup_common. */
1037                 dm->drawObject = GPU_drawobject_new(dm);
1038                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1039                 dm->drawObject->colType = colType;
1040         }
1041         /* In paint mode, dm may stay the same during stroke, however we still want to update colors!
1042          * Also check in case we changed color type (i.e. which MCol cdlayer we use). */
1043         else if ((dm->dirty & DM_DIRTY_MCOL_UPDATE_DRAW) || (colType != dm->drawObject->colType)) {
1044                 GPUBuffer **buf = gpu_drawobject_buffer_from_type(dm->drawObject, GPU_BUFFER_COLOR);
1045                 /* XXX Freeing this buffer is a bit stupid, as geometry has not changed, size should remain the same.
1046                  *     Not sure though it would be worth defining a sort of gpu_buffer_update func - nor whether
1047                  *     it is even possible ! */
1048                 GPU_buffer_free(*buf);
1049                 *buf = NULL;
1050                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1051                 dm->drawObject->colType = colType;
1052         }
1053
1054         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
1055                 return;
1056
1057         glEnableClientState(GL_COLOR_ARRAY);
1058         if (useVBOs) {
1059                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1060                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1061         }
1062         else {
1063                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1064         }
1065
1066         GLStates |= GPU_BUFFER_COLOR_STATE;
1067 }
1068
1069 void GPU_edge_setup(DerivedMesh *dm)
1070 {
1071         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1072                 return;
1073
1074         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1075                 return;
1076
1077         glEnableClientState(GL_VERTEX_ARRAY);
1078         if (useVBOs) {
1079                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1080                 glVertexPointer(3, GL_FLOAT, 0, 0);
1081         }
1082         else {
1083                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1084         }
1085         
1086         GLStates |= GPU_BUFFER_VERTEX_STATE;
1087
1088         if (useVBOs)
1089                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1090
1091         GLStates |= GPU_BUFFER_ELEMENT_STATE;
1092 }
1093
1094 void GPU_uvedge_setup(DerivedMesh *dm)
1095 {
1096         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1097                 return;
1098
1099         glEnableClientState(GL_VERTEX_ARRAY);
1100         if (useVBOs) {
1101                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1102                 glVertexPointer(2, GL_FLOAT, 0, 0);
1103         }
1104         else {
1105                 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1106         }
1107         
1108         GLStates |= GPU_BUFFER_VERTEX_STATE;
1109 }
1110
1111 static int GPU_typesize(int type)
1112 {
1113         switch (type) {
1114                 case GL_FLOAT:
1115                         return sizeof(float);
1116                 case GL_INT:
1117                         return sizeof(int);
1118                 case GL_UNSIGNED_INT:
1119                         return sizeof(unsigned int);
1120                 case GL_BYTE:
1121                         return sizeof(char);
1122                 case GL_UNSIGNED_BYTE:
1123                         return sizeof(unsigned char);
1124                 default:
1125                         return 0;
1126         }
1127 }
1128
1129 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1130 {
1131         int i, elementsize = 0;
1132
1133         for (i = 0; i < numdata; i++) {
1134                 int typesize = GPU_typesize(data[i].type);
1135                 if (typesize != 0)
1136                         elementsize += typesize * data[i].size;
1137         }
1138         return elementsize;
1139 }
1140
1141 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1142 {
1143         int i;
1144         int elementsize;
1145         intptr_t offset = 0;
1146
1147         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1148                 if (attribData[i].index != -1) {
1149                         glDisableVertexAttribArrayARB(attribData[i].index);
1150                 }
1151                 else
1152                         break;
1153         }
1154         elementsize = GPU_attrib_element_size(data, numdata);
1155
1156         if (useVBOs) {
1157                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1158                 for (i = 0; i < numdata; i++) {
1159                         glEnableVertexAttribArrayARB(data[i].index);
1160                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1161                                                  GL_FALSE, elementsize, (void *)offset);
1162                         offset += data[i].size * GPU_typesize(data[i].type);
1163
1164                         attribData[i].index = data[i].index;
1165                         attribData[i].size = data[i].size;
1166                         attribData[i].type = data[i].type;
1167                 }
1168                 attribData[numdata].index = -1;
1169         }
1170         else {
1171                 for (i = 0; i < numdata; i++) {
1172                         glEnableVertexAttribArrayARB(data[i].index);
1173                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1174                                                  GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1175                         offset += data[i].size * GPU_typesize(data[i].type);
1176                 }
1177         }
1178 }
1179
1180
1181 void GPU_buffer_unbind(void)
1182 {
1183         int i;
1184
1185         if (GLStates & GPU_BUFFER_VERTEX_STATE)
1186                 glDisableClientState(GL_VERTEX_ARRAY);
1187         if (GLStates & GPU_BUFFER_NORMAL_STATE)
1188                 glDisableClientState(GL_NORMAL_ARRAY);
1189         if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1190                 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1191         if (GLStates & GPU_BUFFER_COLOR_STATE)
1192                 glDisableClientState(GL_COLOR_ARRAY);
1193         if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1194                 if (useVBOs) {
1195                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1196                 }
1197         }
1198         GLStates &= ~(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1199                       GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1200                       GPU_BUFFER_ELEMENT_STATE);
1201
1202         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1203                 if (attribData[i].index != -1) {
1204                         glDisableVertexAttribArrayARB(attribData[i].index);
1205                 }
1206                 else
1207                         break;
1208         }
1209
1210         if (useVBOs)
1211                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1212 }
1213
1214 void GPU_color_switch(int mode)
1215 {
1216         if (mode) {
1217                 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1218                         glEnableClientState(GL_COLOR_ARRAY);
1219                 GLStates |= GPU_BUFFER_COLOR_STATE;
1220         }
1221         else {
1222                 if (GLStates & GPU_BUFFER_COLOR_STATE)
1223                         glDisableClientState(GL_COLOR_ARRAY);
1224                 GLStates &= ~GPU_BUFFER_COLOR_STATE;
1225         }
1226 }
1227
1228 /* return 1 if drawing should be done using old immediate-mode
1229  * code, 0 otherwise */
1230 int GPU_buffer_legacy(DerivedMesh *dm)
1231 {
1232         int test = (U.gameflags & USER_DISABLE_VBO);
1233         if (test)
1234                 return 1;
1235
1236         if (dm->drawObject == 0)
1237                 dm->drawObject = GPU_drawobject_new(dm);
1238         return dm->drawObject->legacy;
1239 }
1240
1241 void *GPU_buffer_lock(GPUBuffer *buffer)
1242 {
1243         float *varray;
1244
1245         if (!buffer)
1246                 return 0;
1247
1248         if (useVBOs) {
1249                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1250                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1251                 return varray;
1252         }
1253         else {
1254                 return buffer->pointer;
1255         }
1256 }
1257
1258 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1259 {
1260         float *varray;
1261
1262         if (!buffer)
1263                 return 0;
1264
1265         if (useVBOs) {
1266                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1267                 /* discard previous data, avoid stalling gpu */
1268                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1269                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1270                 return varray;
1271         }
1272         else {
1273                 return buffer->pointer;
1274         }
1275 }
1276
1277 void GPU_buffer_unlock(GPUBuffer *buffer)
1278 {
1279         if (useVBOs) {
1280                 if (buffer) {
1281                         /* note: this operation can fail, could return
1282                          * an error code from this function? */
1283                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1284                 }
1285                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1286         }
1287 }
1288
1289 /* used for drawing edges */
1290 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1291 {
1292         glDrawElements(mode, count, GL_UNSIGNED_INT,
1293                        (useVBOs ?
1294                         (void *)(start * sizeof(unsigned int)) :
1295                         ((int *)elements->pointer) + start));
1296 }
1297
1298
1299 /* XXX: the rest of the code in this file is used for optimized PBVH
1300  * drawing and doesn't interact at all with the buffer code above */
1301
1302 /* Return false if VBO is either unavailable or disabled by the user,
1303  * true otherwise */
1304 static int gpu_vbo_enabled(void)
1305 {
1306         return (GLEW_ARB_vertex_buffer_object &&
1307                 !(U.gameflags & USER_DISABLE_VBO));
1308 }
1309
1310 /* Convenience struct for building the VBO. */
1311 typedef struct {
1312         float co[3];
1313         short no[3];
1314
1315         /* inserting this to align the 'color' field to a four-byte
1316          * boundary; drastically increases viewport performance on my
1317          * drivers (Gallium/Radeon) --nicholasbishop */
1318         char pad[2];
1319         
1320         unsigned char color[3];
1321 } VertexBufferFormat;
1322
1323 struct GPU_PBVH_Buffers {
1324         /* opengl buffer handles */
1325         GLuint vert_buf, index_buf;
1326         GLenum index_type;
1327
1328         /* mesh pointers in case buffer allocation fails */
1329         MFace *mface;
1330         MVert *mvert;
1331         int *face_indices;
1332         int totface;
1333         const float *vmask;
1334
1335         /* grid pointers */
1336         CCGKey gridkey;
1337         CCGElem **grids;
1338         const DMFlagMat *grid_flag_mats;
1339         BLI_bitmap * const *grid_hidden;
1340         int *grid_indices;
1341         int totgrid;
1342         int has_hidden;
1343
1344         int use_bmesh;
1345
1346         unsigned int tot_tri, tot_quad;
1347
1348         /* The PBVH ensures that either all faces in the node are
1349          * smooth-shaded or all faces are flat-shaded */
1350         int smooth;
1351
1352         int show_diffuse_color;
1353         float diffuse_color[4];
1354 };
1355 typedef enum {
1356         VBO_ENABLED,
1357         VBO_DISABLED
1358 } VBO_State;
1359
1360 static void gpu_colors_enable(VBO_State vbo_state)
1361 {
1362         glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
1363         glEnable(GL_COLOR_MATERIAL);
1364         if (vbo_state == VBO_ENABLED)
1365                 glEnableClientState(GL_COLOR_ARRAY);
1366 }
1367
1368 static void gpu_colors_disable(VBO_State vbo_state)
1369 {
1370         glDisable(GL_COLOR_MATERIAL);
1371         if (vbo_state == VBO_ENABLED)
1372                 glDisableClientState(GL_COLOR_ARRAY);
1373 }
1374
1375 static float gpu_color_from_mask(float mask)
1376 {
1377         return 1.0f - mask * 0.75f;
1378 }
1379
1380 static void gpu_color_from_mask_copy(float mask, const float diffuse_color[4], unsigned char out[3])
1381 {
1382         float mask_color;
1383
1384         mask_color = gpu_color_from_mask(mask) * 255.0f;
1385
1386         out[0] = diffuse_color[0] * mask_color;
1387         out[1] = diffuse_color[1] * mask_color;
1388         out[2] = diffuse_color[2] * mask_color;
1389 }
1390
1391 static void gpu_color_from_mask_set(float mask, float diffuse_color[4])
1392 {
1393         float color = gpu_color_from_mask(mask);
1394         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1395 }
1396
1397 static float gpu_color_from_mask_quad(const CCGKey *key,
1398                                       CCGElem *a, CCGElem *b,
1399                                       CCGElem *c, CCGElem *d)
1400 {
1401         return gpu_color_from_mask((*CCG_elem_mask(key, a) +
1402                                     *CCG_elem_mask(key, b) +
1403                                     *CCG_elem_mask(key, c) +
1404                                     *CCG_elem_mask(key, d)) * 0.25f);
1405 }
1406
1407 static void gpu_color_from_mask_quad_copy(const CCGKey *key,
1408                                           CCGElem *a, CCGElem *b,
1409                                           CCGElem *c, CCGElem *d,
1410                                           const float *diffuse_color,
1411                                           unsigned char out[3])
1412 {
1413         float mask_color =
1414             gpu_color_from_mask((*CCG_elem_mask(key, a) +
1415                                  *CCG_elem_mask(key, b) +
1416                                  *CCG_elem_mask(key, c) +
1417                                  *CCG_elem_mask(key, d)) * 0.25f) * 255.0f;
1418
1419         out[0] = diffuse_color[0] * mask_color;
1420         out[1] = diffuse_color[1] * mask_color;
1421         out[2] = diffuse_color[2] * mask_color;
1422 }
1423
1424 static void gpu_color_from_mask_quad_set(const CCGKey *key,
1425                                          CCGElem *a, CCGElem *b,
1426                                          CCGElem *c, CCGElem *d,
1427                                          float diffuse_color[4])
1428 {
1429         float color = gpu_color_from_mask_quad(key, a, b, c, d);
1430         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1431 }
1432
1433 void GPU_update_mesh_pbvh_buffers(GPU_PBVH_Buffers *buffers, MVert *mvert,
1434                              int *vert_indices, int totvert, const float *vmask,
1435                              int (*face_vert_indices)[4], int show_diffuse_color)
1436 {
1437         VertexBufferFormat *vert_data;
1438         int i, j, k;
1439
1440         buffers->vmask = vmask;
1441         buffers->show_diffuse_color = show_diffuse_color;
1442
1443         if (buffers->vert_buf) {
1444                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
1445                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1446
1447                 if (buffers->show_diffuse_color) {
1448                         MFace *f = buffers->mface + buffers->face_indices[0];
1449
1450                         GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
1451                 }
1452
1453                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1454
1455                 /* Build VBO */
1456                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1457                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1458                                                 sizeof(VertexBufferFormat) * totelem,
1459                                                 NULL, GL_STATIC_DRAW_ARB);
1460
1461                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1462
1463                 if (vert_data) {
1464                         /* Vertex data is shared if smooth-shaded, but separate
1465                          * copies are made for flat shading because normals
1466                          * shouldn't be shared. */
1467                         if (buffers->smooth) {
1468                                 for (i = 0; i < totvert; ++i) {
1469                                         MVert *v = mvert + vert_indices[i];
1470                                         VertexBufferFormat *out = vert_data + i;
1471
1472                                         copy_v3_v3(out->co, v->co);
1473                                         memcpy(out->no, v->no, sizeof(short) * 3);
1474                                 }
1475
1476 #define UPDATE_VERTEX(face, vertex, index, diffuse_color) \
1477                                 { \
1478                                         VertexBufferFormat *out = vert_data + face_vert_indices[face][index]; \
1479                                         if (vmask) \
1480                                                 gpu_color_from_mask_copy(vmask[vertex], diffuse_color, out->color); \
1481                                         else \
1482                                                 rgb_float_to_uchar(out->color, diffuse_color); \
1483                                 } (void)0
1484
1485                                 for (i = 0; i < buffers->totface; i++) {
1486                                         MFace *f = buffers->mface + buffers->face_indices[i];
1487
1488                                         UPDATE_VERTEX(i, f->v1, 0, diffuse_color);
1489                                         UPDATE_VERTEX(i, f->v2, 1, diffuse_color);
1490                                         UPDATE_VERTEX(i, f->v3, 2, diffuse_color);
1491                                         if (f->v4)
1492                                                 UPDATE_VERTEX(i, f->v4, 3, diffuse_color);
1493                                 }
1494 #undef UPDATE_VERTEX
1495                         }
1496                         else {
1497                                 for (i = 0; i < buffers->totface; ++i) {
1498                                         const MFace *f = &buffers->mface[buffers->face_indices[i]];
1499                                         const unsigned int *fv = &f->v1;
1500                                         const int vi[2][3] = {{0, 1, 2}, {3, 0, 2}};
1501                                         float fno[3];
1502                                         short no[3];
1503
1504                                         float fmask;
1505
1506                                         if (paint_is_face_hidden(f, mvert))
1507                                                 continue;
1508
1509                                         /* Face normal and mask */
1510                                         if (f->v4) {
1511                                                 normal_quad_v3(fno,
1512                                                                            mvert[fv[0]].co,
1513                                                                            mvert[fv[1]].co,
1514                                                                            mvert[fv[2]].co,
1515                                                                            mvert[fv[3]].co);
1516                                                 if (vmask) {
1517                                                         fmask = (vmask[fv[0]] +
1518                                                                          vmask[fv[1]] +
1519                                                                          vmask[fv[2]] +
1520                                                                          vmask[fv[3]]) * 0.25f;
1521                                                 }
1522                                         }
1523                                         else {
1524                                                 normal_tri_v3(fno,
1525                                                                           mvert[fv[0]].co,
1526                                                                           mvert[fv[1]].co,
1527                                                                           mvert[fv[2]].co);
1528                                                 if (vmask) {
1529                                                         fmask = (vmask[fv[0]] +
1530                                                                          vmask[fv[1]] +
1531                                                                          vmask[fv[2]]) / 3.0f;
1532                                                 }
1533                                         }
1534                                         normal_float_to_short_v3(no, fno);
1535
1536                                         for (j = 0; j < (f->v4 ? 2 : 1); j++) {
1537                                                 for (k = 0; k < 3; k++) {
1538                                                         const MVert *v = &mvert[fv[vi[j][k]]];
1539                                                         VertexBufferFormat *out = vert_data;
1540
1541                                                         copy_v3_v3(out->co, v->co);
1542                                                         memcpy(out->no, no, sizeof(short) * 3);
1543
1544                                                         if (vmask)
1545                                                                 gpu_color_from_mask_copy(fmask, diffuse_color, out->color);
1546                                                         else
1547                                                                 rgb_float_to_uchar(out->color, diffuse_color);
1548
1549                                                         vert_data++;
1550                                                 }
1551                                         }
1552                                 }
1553                         }
1554
1555                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1556                 }
1557                 else {
1558                         glDeleteBuffersARB(1, &buffers->vert_buf);
1559                         buffers->vert_buf = 0;
1560                 }
1561
1562                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1563         }
1564
1565         buffers->mvert = mvert;
1566 }
1567
1568 GPU_PBVH_Buffers *GPU_build_pbvh_mesh_buffers(int (*face_vert_indices)[4],
1569                                     MFace *mface, MVert *mvert,
1570                                     int *face_indices,
1571                                     int totface)
1572 {
1573         GPU_PBVH_Buffers *buffers;
1574         unsigned short *tri_data;
1575         int i, j, k, tottri;
1576
1577         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
1578         buffers->index_type = GL_UNSIGNED_SHORT;
1579         buffers->smooth = mface[face_indices[0]].flag & ME_SMOOTH;
1580
1581         buffers->show_diffuse_color = FALSE;
1582
1583         /* Count the number of visible triangles */
1584         for (i = 0, tottri = 0; i < totface; ++i) {
1585                 const MFace *f = &mface[face_indices[i]];
1586                 if (!paint_is_face_hidden(f, mvert))
1587                         tottri += f->v4 ? 2 : 1;
1588         }
1589
1590         /* An element index buffer is used for smooth shading, but flat
1591          * shading requires separate vertex normals so an index buffer is
1592          * can't be used there. */
1593         if (gpu_vbo_enabled() && buffers->smooth)
1594                 glGenBuffersARB(1, &buffers->index_buf);
1595
1596         if (buffers->index_buf) {
1597                 /* Generate index buffer object */
1598                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1599                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1600                                 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1601
1602                 /* Fill the triangle buffer */
1603                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1604                 if (tri_data) {
1605                         for (i = 0; i < totface; ++i) {
1606                                 const MFace *f = mface + face_indices[i];
1607                                 int v[3];
1608
1609                                 /* Skip hidden faces */
1610                                 if (paint_is_face_hidden(f, mvert))
1611                                         continue;
1612
1613                                 v[0] = 0;
1614                                 v[1] = 1;
1615                                 v[2] = 2;
1616
1617                                 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1618                                         for (k = 0; k < 3; ++k) {
1619                                                 *tri_data = face_vert_indices[i][v[k]];
1620                                                 tri_data++;
1621                                         }
1622                                         v[0] = 3;
1623                                         v[1] = 0;
1624                                         v[2] = 2;
1625                                 }
1626                         }
1627                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1628                 }
1629                 else {
1630                         glDeleteBuffersARB(1, &buffers->index_buf);
1631                         buffers->index_buf = 0;
1632                 }
1633
1634                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1635         }
1636
1637         if (gpu_vbo_enabled() && (buffers->index_buf || !buffers->smooth))
1638                 glGenBuffersARB(1, &buffers->vert_buf);
1639
1640         buffers->tot_tri = tottri;
1641
1642         buffers->mface = mface;
1643         buffers->face_indices = face_indices;
1644         buffers->totface = totface;
1645
1646         return buffers;
1647 }
1648
1649 void GPU_update_grid_pbvh_buffers(GPU_PBVH_Buffers *buffers, CCGElem **grids,
1650                              const DMFlagMat *grid_flag_mats, int *grid_indices,
1651                              int totgrid, const CCGKey *key, int show_diffuse_color)
1652 {
1653         VertexBufferFormat *vert_data;
1654         int i, j, k, x, y;
1655
1656         buffers->show_diffuse_color = show_diffuse_color;
1657
1658         /* Build VBO */
1659         if (buffers->vert_buf) {
1660                 int totvert = key->grid_area * totgrid;
1661                 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1662                 const int has_mask = key->has_mask;
1663                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1664
1665                 if (buffers->show_diffuse_color) {
1666                         const DMFlagMat *flags = &grid_flag_mats[grid_indices[0]];
1667
1668                         GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
1669                 }
1670
1671                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1672
1673                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1674                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1675                                 sizeof(VertexBufferFormat) * totvert,
1676                                 NULL, GL_STATIC_DRAW_ARB);
1677                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1678                 if (vert_data) {
1679                         for (i = 0; i < totgrid; ++i) {
1680                                 VertexBufferFormat *vd = vert_data;
1681                                 CCGElem *grid = grids[grid_indices[i]];
1682
1683                                 for (y = 0; y < key->grid_size; y++) {
1684                                         for (x = 0; x < key->grid_size; x++) {
1685                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
1686                                                 
1687                                                 copy_v3_v3(vd->co, CCG_elem_co(key, elem));
1688                                                 if (smooth) {
1689                                                         normal_float_to_short_v3(vd->no, CCG_elem_no(key, elem));
1690
1691                                                         if (has_mask) {
1692                                                                 gpu_color_from_mask_copy(*CCG_elem_mask(key, elem),
1693                                                                                          diffuse_color, vd->color);
1694                                                         }
1695                                                 }
1696                                                 vd++;
1697                                         }
1698                                 }
1699                                 
1700                                 if (!smooth) {
1701                                         /* for flat shading, recalc normals and set the last vertex of
1702                                          * each quad in the index buffer to have the flat normal as
1703                                          * that is what opengl will use */
1704                                         for (j = 0; j < key->grid_size - 1; j++) {
1705                                                 for (k = 0; k < key->grid_size - 1; k++) {
1706                                                         CCGElem *elems[4] = {
1707                                                                 CCG_grid_elem(key, grid, k, j + 1),
1708                                                                 CCG_grid_elem(key, grid, k + 1, j + 1),
1709                                                                 CCG_grid_elem(key, grid, k + 1, j),
1710                                                                 CCG_grid_elem(key, grid, k, j)
1711                                                         };
1712                                                         float fno[3];
1713
1714                                                         normal_quad_v3(fno,
1715                                                                        CCG_elem_co(key, elems[0]),
1716                                                                        CCG_elem_co(key, elems[1]),
1717                                                                        CCG_elem_co(key, elems[2]),
1718                                                                        CCG_elem_co(key, elems[3]));
1719
1720                                                         vd = vert_data + (j + 1) * key->grid_size + (k + 1);
1721                                                         normal_float_to_short_v3(vd->no, fno);
1722
1723                                                         if (has_mask) {
1724                                                                 gpu_color_from_mask_quad_copy(key,
1725                                                                                               elems[0],
1726                                                                                               elems[1],
1727                                                                                               elems[2],
1728                                                                                               elems[3],
1729                                                                                               diffuse_color,
1730                                                                                               vd->color);
1731                                                         }
1732                                                 }
1733                                         }
1734                                 }
1735
1736                                 vert_data += key->grid_area;
1737                         }
1738                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1739                 }
1740                 else {
1741                         glDeleteBuffersARB(1, &buffers->vert_buf);
1742                         buffers->vert_buf = 0;
1743                 }
1744                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1745         }
1746
1747         buffers->grids = grids;
1748         buffers->grid_indices = grid_indices;
1749         buffers->totgrid = totgrid;
1750         buffers->grid_flag_mats = grid_flag_mats;
1751         buffers->gridkey = *key;
1752
1753         buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1754
1755         //printf("node updated %p\n", buffers);
1756 }
1757
1758 /* Returns the number of visible quads in the nodes' grids. */
1759 static int gpu_count_grid_quads(BLI_bitmap **grid_hidden,
1760                                 int *grid_indices, int totgrid,
1761                                 int gridsize)
1762 {
1763         int gridarea = (gridsize - 1) * (gridsize - 1);
1764         int i, x, y, totquad;
1765
1766         /* grid hidden layer is present, so have to check each grid for
1767          * visibility */
1768
1769         for (i = 0, totquad = 0; i < totgrid; i++) {
1770                 const BLI_bitmap *gh = grid_hidden[grid_indices[i]];
1771
1772                 if (gh) {
1773                         /* grid hidden are present, have to check each element */
1774                         for (y = 0; y < gridsize - 1; y++) {
1775                                 for (x = 0; x < gridsize - 1; x++) {
1776                                         if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1777                                                 totquad++;
1778                                 }
1779                         }
1780                 }
1781                 else
1782                         totquad += gridarea;
1783         }
1784
1785         return totquad;
1786 }
1787
1788 /* Build the element array buffer of grid indices using either
1789  * unsigned shorts or unsigned ints. */
1790 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_)                     \
1791         {                                                                   \
1792                 type_ *quad_data;                                               \
1793                 int offset = 0;                                                 \
1794                 int i, j, k;                                                    \
1795                                                                                 \
1796                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,                    \
1797                                 sizeof(type_) * (tot_quad_) * 4, NULL,          \
1798                                 GL_STATIC_DRAW_ARB);                            \
1799                                                                                 \
1800                 /* Fill the quad buffer */                                      \
1801                 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB,         \
1802                                            GL_WRITE_ONLY_ARB);                  \
1803                 if (quad_data) {                                                \
1804                         for (i = 0; i < totgrid; ++i) {                             \
1805                                 BLI_bitmap *gh = NULL;                                  \
1806                                 if (grid_hidden)                                        \
1807                                         gh = grid_hidden[(grid_indices)[i]];                \
1808                                                                                                                                                 \
1809                                 for (j = 0; j < gridsize - 1; ++j) {                    \
1810                                         for (k = 0; k < gridsize - 1; ++k) {                \
1811                                                 /* Skip hidden grid face */                     \
1812                                                 if (gh &&                                       \
1813                                                     paint_is_grid_face_hidden(gh,               \
1814                                                                               gridsize, k, j))  \
1815                                                         continue;                                   \
1816                                                                                                                                                 \
1817                                                 *(quad_data++) = offset + j * gridsize + k + 1; \
1818                                                 *(quad_data++) = offset + j * gridsize + k;     \
1819                                                 *(quad_data++) = offset + (j + 1) * gridsize + k; \
1820                                                 *(quad_data++) = offset + (j + 1) * gridsize + k + 1; \
1821                                         }                                                   \
1822                                 }                                                       \
1823                                                                                                                                                 \
1824                                 offset += gridsize * gridsize;                          \
1825                         }                                                           \
1826                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);              \
1827                 }                                                               \
1828                 else {                                                          \
1829                         glDeleteBuffersARB(1, &(buffer_));                          \
1830                         (buffer_) = 0;                                              \
1831                 }                                                               \
1832         } (void)0
1833 /* end FILL_QUAD_BUFFER */
1834
1835 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1836 {
1837         static int prev_gridsize = -1;
1838         static GLenum prev_index_type = 0;
1839         static GLuint buffer = 0;
1840         static unsigned prev_totquad;
1841
1842         /* used in the FILL_QUAD_BUFFER macro */
1843         BLI_bitmap * const *grid_hidden = NULL;
1844         int *grid_indices = NULL;
1845         int totgrid = 1;
1846
1847         /* VBO is disabled; delete the previous buffer (if it exists) and
1848          * return an invalid handle */
1849         if (!gpu_vbo_enabled()) {
1850                 if (buffer)
1851                         glDeleteBuffersARB(1, &buffer);
1852                 return 0;
1853         }
1854
1855         /* VBO is already built */
1856         if (buffer && prev_gridsize == gridsize) {
1857                 *index_type = prev_index_type;
1858                 *totquad = prev_totquad;
1859                 return buffer;
1860         }
1861
1862         /* Build new VBO */
1863         glGenBuffersARB(1, &buffer);
1864         if (buffer) {
1865                 *totquad = (gridsize - 1) * (gridsize - 1);
1866
1867                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1868
1869                 if (gridsize * gridsize < USHRT_MAX) {
1870                         *index_type = GL_UNSIGNED_SHORT;
1871                         FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1872                 }
1873                 else {
1874                         *index_type = GL_UNSIGNED_INT;
1875                         FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1876                 }
1877
1878                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1879         }
1880
1881         prev_gridsize = gridsize;
1882         prev_index_type = *index_type;
1883         prev_totquad = *totquad;
1884         return buffer;
1885 }
1886
1887 GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(int *grid_indices, int totgrid,
1888                                     BLI_bitmap **grid_hidden, int gridsize)
1889 {
1890         GPU_PBVH_Buffers *buffers;
1891         int totquad;
1892         int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
1893
1894         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
1895         buffers->grid_hidden = grid_hidden;
1896         buffers->totgrid = totgrid;
1897
1898         buffers->show_diffuse_color = FALSE;
1899
1900         /* Count the number of quads */
1901         totquad = gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1902
1903         if (totquad == fully_visible_totquad) {
1904                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1905                 buffers->has_hidden = 0;
1906         }
1907         else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1908                 /* Build new VBO */
1909                 glGenBuffersARB(1, &buffers->index_buf);
1910                 if (buffers->index_buf) {
1911                         buffers->tot_quad = totquad;
1912
1913                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1914
1915                         if (totgrid * gridsize * gridsize < USHRT_MAX) {
1916                                 buffers->index_type = GL_UNSIGNED_SHORT;
1917                                 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1918                         }
1919                         else {
1920                                 buffers->index_type = GL_UNSIGNED_INT;
1921                                 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1922                         }
1923
1924                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1925                 }
1926
1927                 buffers->has_hidden = 1;
1928         }
1929
1930         /* Build coord/normal VBO */
1931         if (buffers->index_buf)
1932                 glGenBuffersARB(1, &buffers->vert_buf);
1933
1934         return buffers;
1935 }
1936
1937 #undef FILL_QUAD_BUFFER
1938
1939 /* Output a BMVert into a VertexBufferFormat array
1940  *
1941  * The vertex is skipped if hidden, otherwise the output goes into
1942  * index '*v_index' in the 'vert_data' array and '*v_index' is
1943  * incremented.
1944  */
1945 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
1946                                           VertexBufferFormat *vert_data,
1947                                           int *v_index,
1948                                           const float fno[3],
1949                                           const float *fmask,
1950                                           const int cd_vert_mask_offset)
1951 {
1952         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
1953                 VertexBufferFormat *vd = &vert_data[*v_index];
1954
1955                 /* TODO: should use material color */
1956                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1957
1958                 /* Set coord, normal, and mask */
1959                 copy_v3_v3(vd->co, v->co);
1960                 normal_float_to_short_v3(vd->no, fno ? fno : v->no);
1961
1962                 gpu_color_from_mask_copy(
1963                         fmask ? *fmask :
1964                                 BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset),
1965                         diffuse_color,
1966                         vd->color);
1967                 
1968
1969                 /* Assign index for use in the triangle index buffer */
1970                 /* note: caller must set:  bm->elem_index_dirty |= BM_VERT; */
1971                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
1972
1973                 (*v_index)++;
1974         }
1975 }
1976
1977 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
1978 static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts,
1979                                         GSet *bm_other_verts)
1980 {
1981         GSetIterator gs_iter;
1982         int totvert = 0;
1983
1984         GSET_ITER (gs_iter, bm_unique_verts) {
1985                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
1986                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1987                         totvert++;
1988         }
1989         GSET_ITER (gs_iter, bm_other_verts) {
1990                 BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
1991                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1992                         totvert++;
1993         }
1994
1995         return totvert;
1996 }
1997
1998 /* Return the total number of visible faces */
1999 static int gpu_bmesh_face_visible_count(GHash *bm_faces)
2000 {
2001         GHashIterator gh_iter;
2002         int totface = 0;
2003
2004         GHASH_ITER (gh_iter, bm_faces) {
2005                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2006
2007                 if (!paint_is_bmesh_face_hidden(f))
2008                         totface++;
2009         }
2010
2011         return totface;
2012 }
2013
2014 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
2015  * shading, an element index buffer. */
2016 void GPU_update_bmesh_pbvh_buffers(GPU_PBVH_Buffers *buffers,
2017                               BMesh *bm,
2018                               GHash *bm_faces,
2019                               GSet *bm_unique_verts,
2020                               GSet *bm_other_verts)
2021 {
2022         VertexBufferFormat *vert_data;
2023         void *tri_data;
2024         int tottri, totvert, maxvert = 0;
2025
2026         /* TODO, make mask layer optional for bmesh buffer */
2027         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
2028
2029         if (!buffers->vert_buf || (buffers->smooth && !buffers->index_buf))
2030                 return;
2031
2032         /* Count visible triangles */
2033         tottri = gpu_bmesh_face_visible_count(bm_faces);
2034
2035         if (buffers->smooth) {
2036                 /* Count visible vertices */
2037                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
2038         }
2039         else
2040                 totvert = tottri * 3;
2041
2042         /* Initialize vertex buffer */
2043         glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2044         glBufferDataARB(GL_ARRAY_BUFFER_ARB,
2045                                         sizeof(VertexBufferFormat) * totvert,
2046                                         NULL, GL_STATIC_DRAW_ARB);
2047
2048         /* Fill vertex buffer */
2049         vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2050         if (vert_data) {
2051                 int v_index = 0;
2052
2053                 if (buffers->smooth) {
2054                         GSetIterator gs_iter;
2055
2056                         /* Vertices get an index assigned for use in the triangle
2057                          * index buffer */
2058                         bm->elem_index_dirty |= BM_VERT;
2059
2060                         GSET_ITER (gs_iter, bm_unique_verts) {
2061                                 gpu_bmesh_vert_to_buffer_copy(BLI_gsetIterator_getKey(&gs_iter),
2062                                                               vert_data, &v_index, NULL, NULL,
2063                                                               cd_vert_mask_offset);
2064                         }
2065
2066                         GSET_ITER (gs_iter, bm_other_verts) {
2067                                 gpu_bmesh_vert_to_buffer_copy(BLI_gsetIterator_getKey(&gs_iter),
2068                                                               vert_data, &v_index, NULL, NULL,
2069                                                               cd_vert_mask_offset);
2070                         }
2071
2072                         maxvert = v_index;
2073                 }
2074                 else {
2075                         GHashIterator gh_iter;
2076
2077                         GHASH_ITER (gh_iter, bm_faces) {
2078                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2079
2080                                 BLI_assert(f->len == 3);
2081
2082                                 if (!paint_is_bmesh_face_hidden(f)) {
2083                                         BMVert *v[3];
2084                                         float fmask = 0;
2085                                         int i;
2086
2087                                         // BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void**)v, 3);
2088                                         BM_face_as_array_vert_tri(f, v);
2089
2090                                         /* Average mask value */
2091                                         for (i = 0; i < 3; i++) {
2092                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
2093                                         }
2094                                         fmask /= 3.0f;
2095                                         
2096                                         for (i = 0; i < 3; i++) {
2097                                                 gpu_bmesh_vert_to_buffer_copy(v[i], vert_data,
2098                                                                               &v_index, f->no, &fmask,
2099                                                                               cd_vert_mask_offset);
2100                                         }
2101                                 }
2102                         }
2103
2104                         buffers->tot_tri = tottri;
2105                 }
2106
2107                 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
2108
2109                 /* gpu_bmesh_vert_to_buffer_copy sets dirty index values */
2110                 bm->elem_index_dirty |= BM_VERT;
2111         }
2112         else {
2113                 /* Memory map failed */
2114                 glDeleteBuffersARB(1, &buffers->vert_buf);
2115                 buffers->vert_buf = 0;
2116                 return;
2117         }
2118
2119         if (buffers->smooth) {
2120                 const int use_short = (maxvert < USHRT_MAX);
2121
2122                 /* Initialize triangle index buffer */
2123                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2124                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
2125                                                 (use_short ?
2126                                                  sizeof(unsigned short) :
2127                                                  sizeof(unsigned int)) * 3 * tottri,
2128                                                 NULL, GL_STATIC_DRAW_ARB);
2129
2130                 /* Fill triangle index buffer */
2131                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2132                 if (tri_data) {
2133                         GHashIterator gh_iter;
2134
2135                         GHASH_ITER (gh_iter, bm_faces) {
2136                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2137
2138                                 if (!paint_is_bmesh_face_hidden(f)) {
2139                                         BMLoop *l_iter;
2140                                         BMLoop *l_first;
2141
2142                                         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2143                                         do {
2144                                                 BMVert *v = l_iter->v;
2145                                                 if (use_short) {
2146                                                         unsigned short *elem = tri_data;
2147                                                         (*elem) = BM_elem_index_get(v);
2148                                                         elem++;
2149                                                         tri_data = elem;
2150                                                 }
2151                                                 else {
2152                                                         unsigned int *elem = tri_data;
2153                                                         (*elem) = BM_elem_index_get(v);
2154                                                         elem++;
2155                                                         tri_data = elem;
2156                                                 }
2157                                         } while ((l_iter = l_iter->next) != l_first);
2158                                 }
2159                         }
2160
2161                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
2162
2163                         buffers->tot_tri = tottri;
2164                         buffers->index_type = (use_short ?
2165                                                                    GL_UNSIGNED_SHORT :
2166                                                                    GL_UNSIGNED_INT);
2167                 }
2168                 else {
2169                         /* Memory map failed */
2170                         glDeleteBuffersARB(1, &buffers->index_buf);
2171                         buffers->index_buf = 0;
2172                 }
2173         }
2174 }
2175
2176 GPU_PBVH_Buffers *GPU_build_bmesh_pbvh_buffers(int smooth_shading)
2177 {
2178         GPU_PBVH_Buffers *buffers;
2179
2180         buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
2181         if (smooth_shading)
2182                 glGenBuffersARB(1, &buffers->index_buf);
2183         glGenBuffersARB(1, &buffers->vert_buf);
2184         buffers->use_bmesh = TRUE;
2185         buffers->smooth = smooth_shading;
2186
2187         return buffers;
2188 }
2189
2190 static void gpu_draw_buffers_legacy_mesh(GPU_PBVH_Buffers *buffers)
2191 {
2192         const MVert *mvert = buffers->mvert;
2193         int i, j;
2194         const int has_mask = (buffers->vmask != NULL);
2195         const MFace *face = &buffers->mface[buffers->face_indices[0]];
2196         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2197
2198         if (buffers->show_diffuse_color)
2199                 GPU_material_diffuse_get(face->mat_nr + 1, diffuse_color);
2200
2201         if (has_mask) {
2202                 gpu_colors_enable(VBO_DISABLED);
2203         }
2204
2205         for (i = 0; i < buffers->totface; ++i) {
2206                 MFace *f = buffers->mface + buffers->face_indices[i];
2207                 int S = f->v4 ? 4 : 3;
2208                 unsigned int *fv = &f->v1;
2209
2210                 if (paint_is_face_hidden(f, buffers->mvert))
2211                         continue;
2212
2213                 glBegin((f->v4) ? GL_QUADS : GL_TRIANGLES);
2214
2215                 if (buffers->smooth) {
2216                         for (j = 0; j < S; j++) {
2217                                 if (has_mask) {
2218                                         gpu_color_from_mask_set(buffers->vmask[fv[j]], diffuse_color);
2219                                 }
2220                                 glNormal3sv(mvert[fv[j]].no);
2221                                 glVertex3fv(mvert[fv[j]].co);
2222                         }
2223                 }
2224                 else {
2225                         float fno[3];
2226
2227                         /* calculate face normal */
2228                         if (f->v4) {
2229                                 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
2230                                                mvert[fv[2]].co, mvert[fv[3]].co);
2231                         }
2232                         else
2233                                 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
2234                         glNormal3fv(fno);
2235
2236                         if (has_mask) {
2237                                 float fmask;
2238
2239                                 /* calculate face mask color */
2240                                 fmask = (buffers->vmask[fv[0]] +
2241                                          buffers->vmask[fv[1]] +
2242                                          buffers->vmask[fv[2]]);
2243                                 if (f->v4)
2244                                         fmask = (fmask + buffers->vmask[fv[3]]) * 0.25f;
2245                                 else
2246                                         fmask /= 3.0f;
2247                                 gpu_color_from_mask_set(fmask, diffuse_color);
2248                         }
2249                         
2250                         for (j = 0; j < S; j++)
2251                                 glVertex3fv(mvert[fv[j]].co);
2252                 }
2253                 
2254                 glEnd();
2255         }
2256
2257         if (has_mask) {
2258                 gpu_colors_disable(VBO_DISABLED);
2259         }
2260 }
2261
2262 static void gpu_draw_buffers_legacy_grids(GPU_PBVH_Buffers *buffers)
2263 {
2264         const CCGKey *key = &buffers->gridkey;
2265         int i, j, x, y, gridsize = buffers->gridkey.grid_size;
2266         const int has_mask = key->has_mask;
2267         const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2268         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2269
2270         if (buffers->show_diffuse_color)
2271                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2272
2273         if (has_mask) {
2274                 gpu_colors_enable(VBO_DISABLED);
2275         }
2276
2277         for (i = 0; i < buffers->totgrid; ++i) {
2278                 int g = buffers->grid_indices[i];
2279                 CCGElem *grid = buffers->grids[g];
2280                 BLI_bitmap *gh = buffers->grid_hidden[g];
2281
2282                 /* TODO: could use strips with hiding as well */
2283
2284                 if (gh) {
2285                         glBegin(GL_QUADS);
2286                         
2287                         for (y = 0; y < gridsize - 1; y++) {
2288                                 for (x = 0; x < gridsize - 1; x++) {
2289                                         CCGElem *e[4] = {
2290                                                 CCG_grid_elem(key, grid, x + 1, y + 1),
2291                                                 CCG_grid_elem(key, grid, x + 1, y),
2292                                                 CCG_grid_elem(key, grid, x, y),
2293                                                 CCG_grid_elem(key, grid, x, y + 1)
2294                                         };
2295
2296                                         /* skip face if any of its corners are hidden */
2297                                         if (paint_is_grid_face_hidden(gh, gridsize, x, y))
2298                                                 continue;
2299
2300                                         if (buffers->smooth) {
2301                                                 for (j = 0; j < 4; j++) {
2302                                                         if (has_mask) {
2303                                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, e[j]), diffuse_color);
2304                                                         }
2305                                                         glNormal3fv(CCG_elem_no(key, e[j]));
2306                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2307                                                 }
2308                                         }
2309                                         else {
2310                                                 float fno[3];
2311                                                 normal_quad_v3(fno,
2312                                                                CCG_elem_co(key, e[0]),
2313                                                                CCG_elem_co(key, e[1]),
2314                                                                CCG_elem_co(key, e[2]),
2315                                                                CCG_elem_co(key, e[3]));
2316                                                 glNormal3fv(fno);
2317
2318                                                 if (has_mask) {
2319                                                         gpu_color_from_mask_quad_set(key, e[0], e[1], e[2], e[3], diffuse_color);
2320                                                 }
2321
2322                                                 for (j = 0; j < 4; j++)
2323                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2324                                         }
2325                                 }
2326                         }
2327
2328                         glEnd();
2329                 }
2330                 else if (buffers->smooth) {
2331                         for (y = 0; y < gridsize - 1; y++) {
2332                                 glBegin(GL_QUAD_STRIP);
2333                                 for (x = 0; x < gridsize; x++) {
2334                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2335                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2336
2337                                         if (has_mask) {
2338                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, a), diffuse_color);
2339                                         }
2340                                         glNormal3fv(CCG_elem_no(key, a));
2341                                         glVertex3fv(CCG_elem_co(key, a));
2342                                         if (has_mask) {
2343                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, b), diffuse_color);
2344                                         }
2345                                         glNormal3fv(CCG_elem_no(key, b));
2346                                         glVertex3fv(CCG_elem_co(key, b));
2347                                 }
2348                                 glEnd();
2349                         }
2350                 }
2351                 else {
2352                         for (y = 0; y < gridsize - 1; y++) {
2353                                 glBegin(GL_QUAD_STRIP);
2354                                 for (x = 0; x < gridsize; x++) {
2355                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2356                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2357
2358                                         if (x > 0) {
2359                                                 CCGElem *c = CCG_grid_elem(key, grid, x - 1, y);
2360                                                 CCGElem *d = CCG_grid_elem(key, grid, x - 1, y + 1);
2361
2362                                                 float fno[3];
2363                                                 normal_quad_v3(fno,
2364                                                                CCG_elem_co(key, d),
2365                                                                CCG_elem_co(key, b),
2366                                                                CCG_elem_co(key, a),
2367                                                                CCG_elem_co(key, c));
2368                                                 glNormal3fv(fno);
2369
2370                                                 if (has_mask) {
2371                                                         gpu_color_from_mask_quad_set(key, a, b, c, d, diffuse_color);
2372                                                 }
2373                                         }
2374
2375                                         glVertex3fv(CCG_elem_co(key, a));
2376                                         glVertex3fv(CCG_elem_co(key, b));
2377                                 }
2378                                 glEnd();
2379                         }
2380                 }
2381         }
2382
2383         if (has_mask) {
2384                 gpu_colors_disable(VBO_DISABLED);
2385         }
2386 }
2387
2388 void GPU_draw_pbvh_buffers(GPU_PBVH_Buffers *buffers, DMSetMaterial setMaterial,
2389                                           int wireframe)
2390 {
2391         /* sets material from the first face, to solve properly face would need to
2392          * be sorted in buckets by materials */
2393         if (setMaterial) {
2394                 if (buffers->totface) {
2395                         const MFace *f = &buffers->mface[buffers->face_indices[0]];
2396                         if (!setMaterial(f->mat_nr + 1, NULL))
2397                                 return;
2398                 }
2399                 else if (buffers->totgrid) {
2400                         const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2401                         if (!setMaterial(f->mat_nr + 1, NULL))
2402                                 return;
2403                 }
2404                 else {
2405                         if (!setMaterial(1, NULL))
2406                                 return;
2407                 }
2408         }
2409
2410         glShadeModel((buffers->smooth || buffers->totface) ? GL_SMOOTH : GL_FLAT);
2411
2412         if (buffers->vert_buf) {
2413                 glEnableClientState(GL_VERTEX_ARRAY);
2414                 if (!wireframe) {
2415                         glEnableClientState(GL_NORMAL_ARRAY);
2416                         gpu_colors_enable(VBO_ENABLED);
2417                 }
2418
2419                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2420
2421                 if (buffers->index_buf)
2422                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2423
2424                 if (wireframe)
2425                         glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
2426
2427                 if (buffers->tot_quad) {
2428                         char *offset = 0;
2429                         int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
2430                         for (i = 0; i < last; i++) {
2431                                 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2432                                                 offset + offsetof(VertexBufferFormat, co));
2433                                 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2434                                                 offset + offsetof(VertexBufferFormat, no));
2435                                 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2436                                                offset + offsetof(VertexBufferFormat, color));
2437                                 
2438                                 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
2439
2440                                 offset += buffers->gridkey.grid_area * sizeof(VertexBufferFormat);
2441                         }
2442                 }
2443                 else {
2444                         int totelem = buffers->tot_tri * 3;
2445
2446                         glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2447                                         (void *)offsetof(VertexBufferFormat, co));
2448                         glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2449                                         (void *)offsetof(VertexBufferFormat, no));
2450                         glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2451                                        (void *)offsetof(VertexBufferFormat, color));
2452
2453                         if (buffers->index_buf)
2454                                 glDrawElements(GL_TRIANGLES, totelem, buffers->index_type, 0);
2455                         else
2456                                 glDrawArrays(GL_TRIANGLES, 0, totelem);
2457                 }
2458
2459                 if (wireframe)
2460                         glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
2461
2462                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
2463                 if (buffers->index_buf)
2464                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
2465
2466                 glDisableClientState(GL_VERTEX_ARRAY);
2467                 if (!wireframe) {
2468                         glDisableClientState(GL_NORMAL_ARRAY);
2469                         gpu_colors_disable(VBO_ENABLED);
2470                 }
2471         }
2472         /* fallbacks if we are out of memory or VBO is disabled */
2473         else if (buffers->totface) {
2474                 gpu_draw_buffers_legacy_mesh(buffers);
2475         }
2476         else if (buffers->totgrid) {
2477                 gpu_draw_buffers_legacy_grids(buffers);
2478         }
2479 }
2480
2481 int GPU_pbvh_buffers_diffuse_changed(GPU_PBVH_Buffers *buffers, int show_diffuse_color)
2482 {
2483         float diffuse_color[4];
2484
2485         if (buffers->show_diffuse_color != show_diffuse_color)
2486                 return TRUE;
2487
2488         if (buffers->show_diffuse_color == FALSE)
2489                 return FALSE;
2490
2491         if (buffers->mface) {
2492                 MFace *f = buffers->mface + buffers->face_indices[0];
2493
2494                 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
2495         }
2496         else {
2497                 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2498
2499                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2500         }
2501
2502         return diffuse_color[0] != buffers->diffuse_color[0] ||
2503                diffuse_color[1] != buffers->diffuse_color[1] ||
2504                diffuse_color[2] != buffers->diffuse_color[2];
2505 }
2506
2507 /* release a GPU_PBVH_Buffers id;
2508  *
2509  * Thread-unsafe version for internal usage only.
2510  */
2511 static void gpu_pbvh_buffer_free_intern(GLuint id)
2512 {
2513         GPUBufferPool *pool;
2514
2515         /* zero id is vertex buffers off */
2516         if (!id)
2517                 return;
2518
2519         pool = gpu_get_global_buffer_pool();
2520
2521         /* free the buffers immediately if we are on main thread */
2522         if (BLI_thread_is_main()) {
2523                 glDeleteBuffersARB(1, &id);
2524
2525                 if (pool->totpbvhbufids > 0) {
2526                         glDeleteBuffersARB(pool->totpbvhbufids, pool->pbvhbufids);
2527                         pool->totpbvhbufids = 0;
2528                 }
2529                 return;
2530         }
2531         /* outside of main thread, can't safely delete the
2532          * buffer, so increase pool size */
2533         if (pool->maxpbvhsize == pool->totpbvhbufids) {
2534                 pool->maxpbvhsize += MAX_FREE_GPU_BUFF_IDS;
2535                 pool->pbvhbufids = MEM_reallocN(pool->pbvhbufids,
2536                                                                                 sizeof(*pool->pbvhbufids) * pool->maxpbvhsize);
2537         }
2538
2539         /* insert the buffer into the beginning of the pool */
2540         pool->pbvhbufids[pool->totpbvhbufids++] = id;
2541 }
2542
2543
2544 void GPU_free_pbvh_buffers(GPU_PBVH_Buffers *buffers)
2545 {
2546         if (buffers) {
2547                 if (buffers->vert_buf)
2548                         gpu_pbvh_buffer_free_intern(buffers->vert_buf);
2549                 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
2550                         gpu_pbvh_buffer_free_intern(buffers->index_buf);
2551
2552                 MEM_freeN(buffers);
2553         }
2554 }