Merging r58778 through r59080 from trunk into soc-2013-depsgraph_mt
[blender-staging.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2005 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): Brecht Van Lommel.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/gpu/intern/gpu_buffers.c
29  *  \ingroup gpu
30  */
31
32
33 #include <limits.h>
34 #include <stddef.h>
35 #include <string.h>
36
37 #include "GL/glew.h"
38
39 #include "MEM_guardedalloc.h"
40
41 #include "BLI_bitmap.h"
42 #include "BLI_math.h"
43 #include "BLI_utildefines.h"
44 #include "BLI_ghash.h"
45 #include "BLI_threads.h"
46
47 #include "DNA_meshdata_types.h"
48 #include "DNA_material_types.h"
49
50 #include "BKE_ccg.h"
51 #include "BKE_DerivedMesh.h"
52 #include "BKE_paint.h"
53 #include "BKE_subsurf.h"
54
55 #include "DNA_userdef_types.h"
56
57 #include "GPU_buffers.h"
58 #include "GPU_draw.h"
59
60 #include "bmesh.h"
61
62 typedef enum {
63         GPU_BUFFER_VERTEX_STATE = 1,
64         GPU_BUFFER_NORMAL_STATE = 2,
65         GPU_BUFFER_TEXCOORD_STATE = 4,
66         GPU_BUFFER_COLOR_STATE = 8,
67         GPU_BUFFER_ELEMENT_STATE = 16,
68 } GPUBufferState;
69
70 #define MAX_GPU_ATTRIB_DATA 32
71
72 /* material number is an 16-bit signed short and the range (assume material number is non-negative) */
73 #define MAX_MATERIALS MAXMAT
74
75 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
76 static int useVBOs = -1;
77 static GPUBufferState GLStates = 0;
78 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
79
80 static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
81
82 /* stores recently-deleted buffers so that new buffers won't have to
83  * be recreated as often
84  *
85  * only one instance of this pool is created, stored in
86  * gpu_buffer_pool
87  *
88  * note that the number of buffers in the pool is usually limited to
89  * MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
90  * when a GPUBuffer is released outside the main thread; due to OpenGL
91  * restrictions it cannot be immediately released
92  */
93 typedef struct GPUBufferPool {
94         /* number of allocated buffers stored */
95         int totbuf;
96         /* actual allocated length of the array */
97         int maxsize;
98         GPUBuffer **buffers;
99 } GPUBufferPool;
100 #define MAX_FREE_GPU_BUFFERS 8
101
102 /* create a new GPUBufferPool */
103 static GPUBufferPool *gpu_buffer_pool_new(void)
104 {
105         GPUBufferPool *pool;
106
107         /* enable VBOs if supported */
108         if (useVBOs == -1)
109                 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
110
111         pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer_Pool");
112
113         pool->maxsize = MAX_FREE_GPU_BUFFERS;
114         pool->buffers = MEM_callocN(sizeof(GPUBuffer *) * pool->maxsize,
115                                     "GPUBuffer.buffers");
116
117         return pool;
118 }
119
120 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
121 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
122 {
123         int i;
124
125         if (!pool || index < 0 || index >= pool->totbuf)
126                 return;
127
128         /* shift entries down, overwriting the buffer at `index' */
129         for (i = index; i < pool->totbuf - 1; i++)
130                 pool->buffers[i] = pool->buffers[i + 1];
131
132         /* clear the last entry */
133         if (pool->totbuf > 0)
134                 pool->buffers[pool->totbuf - 1] = NULL;
135
136         pool->totbuf--;
137 }
138
139 /* delete the last entry in the pool */
140 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
141 {
142         GPUBuffer *last;
143
144         if (pool->totbuf <= 0)
145                 return;
146
147         /* get the last entry */
148         if (!(last = pool->buffers[pool->totbuf - 1]))
149                 return;
150
151         /* delete the buffer's data */
152         if (useVBOs)
153                 glDeleteBuffersARB(1, &last->id);
154         else
155                 MEM_freeN(last->pointer);
156
157         /* delete the buffer and remove from pool */
158         MEM_freeN(last);
159         pool->totbuf--;
160         pool->buffers[pool->totbuf] = NULL;
161 }
162
163 /* free a GPUBufferPool; also frees the data in the pool's
164  * GPUBuffers */
165 static void gpu_buffer_pool_free(GPUBufferPool *pool)
166 {
167         if (!pool)
168                 return;
169         
170         while (pool->totbuf)
171                 gpu_buffer_pool_delete_last(pool);
172
173         MEM_freeN(pool->buffers);
174         MEM_freeN(pool);
175 }
176
177 static void gpu_buffer_pool_free_unused(GPUBufferPool *pool)
178 {
179         if (!pool)
180                 return;
181         
182         while (pool->totbuf)
183                 gpu_buffer_pool_delete_last(pool);
184 }
185
186 static GPUBufferPool *gpu_buffer_pool = NULL;
187 static GPUBufferPool *gpu_get_global_buffer_pool(void)
188 {
189         /* initialize the pool */
190         if (!gpu_buffer_pool)
191                 gpu_buffer_pool = gpu_buffer_pool_new();
192
193         return gpu_buffer_pool;
194 }
195
196 void GPU_global_buffer_pool_free(void)
197 {
198         gpu_buffer_pool_free(gpu_buffer_pool);
199         gpu_buffer_pool = NULL;
200 }
201
202 void GPU_global_buffer_pool_free_unused(void)
203 {
204         gpu_buffer_pool_free_unused(gpu_buffer_pool);
205 }
206
207 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
208  * pool if possible, otherwise creates a new one
209  *
210  * Thread-unsafe version for internal usage only.
211  */
212 static GPUBuffer *gpu_buffer_alloc_intern(int size)
213 {
214         GPUBufferPool *pool;
215         GPUBuffer *buf;
216         int i, bufsize, bestfit = -1;
217
218         /* bad case, leads to leak of buf since buf->pointer will allocate
219          * NULL, leading to return without cleanup. In any case better detect early
220          * psy-fi */
221         if (size == 0)
222                 return NULL;
223
224         pool = gpu_get_global_buffer_pool();
225
226         /* not sure if this buffer pool code has been profiled much,
227          * seems to me that the graphics driver and system memory
228          * management might do this stuff anyway. --nicholas
229          */
230
231         /* check the global buffer pool for a recently-deleted buffer
232          * that is at least as big as the request, but not more than
233          * twice as big */
234         for (i = 0; i < pool->totbuf; i++) {
235                 bufsize = pool->buffers[i]->size;
236
237                 /* check for an exact size match */
238                 if (bufsize == size) {
239                         bestfit = i;
240                         break;
241                 }
242                 /* smaller buffers won't fit data and buffers at least
243                  * twice as big are a waste of memory */
244                 else if (bufsize > size && size > (bufsize / 2)) {
245                         /* is it closer to the required size than the
246                          * last appropriate buffer found. try to save
247                          * memory */
248                         if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
249                                 bestfit = i;
250                         }
251                 }
252         }
253
254         /* if an acceptable buffer was found in the pool, remove it
255          * from the pool and return it */
256         if (bestfit != -1) {
257                 buf = pool->buffers[bestfit];
258                 gpu_buffer_pool_remove_index(pool, bestfit);
259                 return buf;
260         }
261
262         /* no acceptable buffer found in the pool, create a new one */
263         buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
264         buf->size = size;
265
266         if (useVBOs == 1) {
267                 /* create a new VBO and initialize it to the requested
268                  * size */
269                 glGenBuffersARB(1, &buf->id);
270                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
271                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
272                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
273         }
274         else {
275                 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
276                 
277                 /* purpose of this seems to be dealing with
278                  * out-of-memory errors? looks a bit iffy to me
279                  * though, at least on Linux I expect malloc() would
280                  * just overcommit. --nicholas */
281                 while (!buf->pointer && pool->totbuf > 0) {
282                         gpu_buffer_pool_delete_last(pool);
283                         buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
284                 }
285                 if (!buf->pointer)
286                         return NULL;
287         }
288
289         return buf;
290 }
291
292 /* Same as above, but safe for threading. */
293 GPUBuffer *GPU_buffer_alloc(int size)
294 {
295         GPUBuffer *buffer;
296
297         if (size == 0) {
298                 /* Early out, no lock needed in this case. */
299                 return NULL;
300         }
301
302         BLI_mutex_lock(&buffer_mutex);
303         buffer = gpu_buffer_alloc_intern(size);
304         BLI_mutex_unlock(&buffer_mutex);
305
306         return buffer;
307 }
308
309 /* release a GPUBuffer; does not free the actual buffer or its data,
310  * but rather moves it to the pool of recently-freed buffers for
311  * possible re-use
312  *
313  * Thread-unsafe version for internal usage only.
314  */
315 static void gpu_buffer_free_intern(GPUBuffer *buffer)
316 {
317         GPUBufferPool *pool;
318         int i;
319
320         if (!buffer)
321                 return;
322
323         pool = gpu_get_global_buffer_pool();
324
325         /* free the last used buffer in the queue if no more space, but only
326          * if we are in the main thread. for e.g. rendering or baking it can
327          * happen that we are in other thread and can't call OpenGL, in that
328          * case cleanup will be done GPU_buffer_pool_free_unused */
329         if (BLI_thread_is_main()) {
330                 /* in main thread, safe to decrease size of pool back
331                  * down to MAX_FREE_GPU_BUFFERS */
332                 while (pool->totbuf >= MAX_FREE_GPU_BUFFERS)
333                         gpu_buffer_pool_delete_last(pool);
334         }
335         else {
336                 /* outside of main thread, can't safely delete the
337                  * buffer, so increase pool size */
338                 if (pool->maxsize == pool->totbuf) {
339                         pool->maxsize += MAX_FREE_GPU_BUFFERS;
340                         pool->buffers = MEM_reallocN(pool->buffers,
341                                                      sizeof(GPUBuffer *) * pool->maxsize);
342                 }
343         }
344
345         /* shift pool entries up by one */
346         for (i = pool->totbuf; i > 0; i--)
347                 pool->buffers[i] = pool->buffers[i - 1];
348
349         /* insert the buffer into the beginning of the pool */
350         pool->buffers[0] = buffer;
351         pool->totbuf++;
352 }
353
354 /* Same as above, but safe for threading. */
355 void GPU_buffer_free(GPUBuffer *buffer)
356 {
357         if (!buffer) {
358                 /* Early output, no need to lock in this case, */
359                 return;
360         }
361
362         BLI_mutex_lock(&buffer_mutex);
363         gpu_buffer_free_intern(buffer);
364         BLI_mutex_unlock(&buffer_mutex);
365 }
366
367 typedef struct GPUVertPointLink {
368         struct GPUVertPointLink *next;
369         /* -1 means uninitialized */
370         int point_index;
371 } GPUVertPointLink;
372
373 /* add a new point to the list of points related to a particular
374  * vertex */
375 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
376 {
377         GPUVertPointLink *lnk;
378
379         lnk = &gdo->vert_points[vert_index];
380
381         /* if first link is in use, add a new link at the end */
382         if (lnk->point_index != -1) {
383                 /* get last link */
384                 for (; lnk->next; lnk = lnk->next) ;
385
386                 /* add a new link from the pool */
387                 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
388                 gdo->vert_points_usage++;
389         }
390
391         lnk->point_index = point_index;
392 }
393
394 /* update the vert_points and triangle_to_mface fields with a new
395  * triangle */
396 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
397                                         int base_point_index,
398                                         int face_index,
399                                         int v1, int v2, int v3)
400 {
401         int i, v[3] = {v1, v2, v3};
402         for (i = 0; i < 3; i++)
403                 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
404         gdo->triangle_to_mface[base_point_index / 3] = face_index;
405 }
406
407 /* for each vertex, build a list of points related to it; these lists
408  * are stored in an array sized to the number of vertices */
409 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
410 {
411         GPUBufferMaterial *mat;
412         int i, mat_orig_to_new[MAX_MATERIALS];
413
414         /* allocate the array and space for links */
415         gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
416                                        "GPUDrawObject.vert_points");
417         gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
418                                            "GPUDrawObject.vert_points_mem");
419         gdo->vert_points_usage = 0;
420
421         /* build a map from the original material indices to the new
422          * GPUBufferMaterial indices */
423         for (i = 0; i < gdo->totmaterial; i++)
424                 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
425
426         /* -1 indicates the link is not yet used */
427         for (i = 0; i < gdo->totvert; i++)
428                 gdo->vert_points[i].point_index = -1;
429
430         for (i = 0; i < totface; i++, f++) {
431                 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
432
433                 /* add triangle */
434                 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
435                                             i, f->v1, f->v2, f->v3);
436                 mat->totpoint += 3;
437
438                 /* add second triangle for quads */
439                 if (f->v4) {
440                         gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
441                                                     i, f->v3, f->v4, f->v1);
442                         mat->totpoint += 3;
443                 }
444         }
445
446         /* map any unused vertices to loose points */
447         for (i = 0; i < gdo->totvert; i++) {
448                 if (gdo->vert_points[i].point_index == -1) {
449                         gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
450                         gdo->tot_loose_point++;
451                 }
452         }
453 }
454
455 /* see GPUDrawObject's structure definition for a description of the
456  * data being initialized here */
457 GPUDrawObject *GPU_drawobject_new(DerivedMesh *dm)
458 {
459         GPUDrawObject *gdo;
460         MFace *mface;
461         int points_per_mat[MAX_MATERIALS];
462         int i, curmat, curpoint, totface;
463
464         mface = dm->getTessFaceArray(dm);
465         totface = dm->getNumTessFaces(dm);
466
467         /* get the number of points used by each material, treating
468          * each quad as two triangles */
469         memset(points_per_mat, 0, sizeof(int) * MAX_MATERIALS);
470         for (i = 0; i < totface; i++)
471                 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
472
473         /* create the GPUDrawObject */
474         gdo = MEM_callocN(sizeof(GPUDrawObject), "GPUDrawObject");
475         gdo->totvert = dm->getNumVerts(dm);
476         gdo->totedge = dm->getNumEdges(dm);
477
478         /* count the number of materials used by this DerivedMesh */
479         for (i = 0; i < MAX_MATERIALS; i++) {
480                 if (points_per_mat[i] > 0)
481                         gdo->totmaterial++;
482         }
483
484         /* allocate an array of materials used by this DerivedMesh */
485         gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
486                                      "GPUDrawObject.materials");
487
488         /* initialize the materials array */
489         for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
490                 if (points_per_mat[i] > 0) {
491                         gdo->materials[curmat].start = curpoint;
492                         gdo->materials[curmat].totpoint = 0;
493                         gdo->materials[curmat].mat_nr = i;
494
495                         curpoint += points_per_mat[i];
496                         curmat++;
497                 }
498         }
499
500         /* store total number of points used for triangles */
501         gdo->tot_triangle_point = curpoint;
502
503         gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
504                                              "GPUDrawObject.triangle_to_mface");
505
506         gpu_drawobject_init_vert_points(gdo, mface, totface);
507
508         return gdo;
509 }
510
511 void GPU_drawobject_free(DerivedMesh *dm)
512 {
513         GPUDrawObject *gdo;
514
515         if (!dm || !(gdo = dm->drawObject))
516                 return;
517
518         MEM_freeN(gdo->materials);
519         MEM_freeN(gdo->triangle_to_mface);
520         MEM_freeN(gdo->vert_points);
521         MEM_freeN(gdo->vert_points_mem);
522         GPU_buffer_free(gdo->points);
523         GPU_buffer_free(gdo->normals);
524         GPU_buffer_free(gdo->uv);
525         GPU_buffer_free(gdo->colors);
526         GPU_buffer_free(gdo->edges);
527         GPU_buffer_free(gdo->uvedges);
528
529         MEM_freeN(gdo);
530         dm->drawObject = NULL;
531 }
532
533 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
534                                   int *mat_orig_to_new, void *user_data);
535
536 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
537                                    int vector_size, int size, GLenum target,
538                                    void *user, GPUBufferCopyFunc copy_f)
539 {
540         GPUBufferPool *pool;
541         GPUBuffer *buffer;
542         float *varray;
543         int mat_orig_to_new[MAX_MATERIALS];
544         int *cur_index_per_mat;
545         int i;
546         int success;
547         GLboolean uploaded;
548
549         pool = gpu_get_global_buffer_pool();
550
551         BLI_mutex_lock(&buffer_mutex);
552
553         /* alloc a GPUBuffer; fall back to legacy mode on failure */
554         if (!(buffer = gpu_buffer_alloc_intern(size)))
555                 dm->drawObject->legacy = 1;
556
557         /* nothing to do for legacy mode */
558         if (dm->drawObject->legacy) {
559                 BLI_mutex_unlock(&buffer_mutex);
560                 return NULL;
561         }
562
563         cur_index_per_mat = MEM_mallocN(sizeof(int) * object->totmaterial,
564                                         "GPU_buffer_setup.cur_index_per_mat");
565         for (i = 0; i < object->totmaterial; i++) {
566                 /* for each material, the current index to copy data to */
567                 cur_index_per_mat[i] = object->materials[i].start * vector_size;
568
569                 /* map from original material index to new
570                  * GPUBufferMaterial index */
571                 mat_orig_to_new[object->materials[i].mat_nr] = i;
572         }
573
574         if (useVBOs) {
575                 success = 0;
576
577                 while (!success) {
578                         /* bind the buffer and discard previous data,
579                          * avoids stalling gpu */
580                         glBindBufferARB(target, buffer->id);
581                         glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
582
583                         /* attempt to map the buffer */
584                         if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
585                                 /* failed to map the buffer; delete it */
586                                 gpu_buffer_free_intern(buffer);
587                                 gpu_buffer_pool_delete_last(pool);
588                                 buffer = NULL;
589
590                                 /* try freeing an entry from the pool
591                                  * and reallocating the buffer */
592                                 if (pool->totbuf > 0) {
593                                         gpu_buffer_pool_delete_last(pool);
594                                         buffer = gpu_buffer_alloc_intern(size);
595                                 }
596
597                                 /* allocation still failed; fall back
598                                  * to legacy mode */
599                                 if (!buffer) {
600                                         dm->drawObject->legacy = 1;
601                                         success = 1;
602                                 }
603                         }
604                         else {
605                                 success = 1;
606                         }
607                 }
608
609                 /* check legacy fallback didn't happen */
610                 if (dm->drawObject->legacy == 0) {
611                         uploaded = GL_FALSE;
612                         /* attempt to upload the data to the VBO */
613                         while (uploaded == GL_FALSE) {
614                                 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
615                                 /* glUnmapBuffer returns GL_FALSE if
616                                  * the data store is corrupted; retry
617                                  * in that case */
618                                 uploaded = glUnmapBufferARB(target);
619                         }
620                 }
621                 glBindBufferARB(target, 0);
622         }
623         else {
624                 /* VBO not supported, use vertex array fallback */
625                 if (buffer->pointer) {
626                         varray = buffer->pointer;
627                         (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
628                 }
629                 else {
630                         dm->drawObject->legacy = 1;
631                 }
632         }
633
634         MEM_freeN(cur_index_per_mat);
635
636         BLI_mutex_unlock(&buffer_mutex);
637
638         return buffer;
639 }
640
641 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
642 {
643         MVert *mvert;
644         MFace *f;
645         int i, j, start, totface;
646
647         mvert = dm->getVertArray(dm);
648         f = dm->getTessFaceArray(dm);
649
650         totface = dm->getNumTessFaces(dm);
651         for (i = 0; i < totface; i++, f++) {
652                 start = index[mat_orig_to_new[f->mat_nr]];
653
654                 /* v1 v2 v3 */
655                 copy_v3_v3(&varray[start], mvert[f->v1].co);
656                 copy_v3_v3(&varray[start + 3], mvert[f->v2].co);
657                 copy_v3_v3(&varray[start + 6], mvert[f->v3].co);
658                 index[mat_orig_to_new[f->mat_nr]] += 9;
659
660                 if (f->v4) {
661                         /* v3 v4 v1 */
662                         copy_v3_v3(&varray[start + 9], mvert[f->v3].co);
663                         copy_v3_v3(&varray[start + 12], mvert[f->v4].co);
664                         copy_v3_v3(&varray[start + 15], mvert[f->v1].co);
665                         index[mat_orig_to_new[f->mat_nr]] += 9;
666                 }
667         }
668
669         /* copy loose points */
670         j = dm->drawObject->tot_triangle_point * 3;
671         for (i = 0; i < dm->drawObject->totvert; i++) {
672                 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
673                         copy_v3_v3(&varray[j], mvert[i].co);
674                         j += 3;
675                 }
676         }
677 }
678
679 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
680 {
681         int i, totface;
682         int start;
683         float f_no[3];
684
685         float *nors = dm->getTessFaceDataArray(dm, CD_NORMAL);
686         MVert *mvert = dm->getVertArray(dm);
687         MFace *f = dm->getTessFaceArray(dm);
688
689         totface = dm->getNumTessFaces(dm);
690         for (i = 0; i < totface; i++, f++) {
691                 const int smoothnormal = (f->flag & ME_SMOOTH);
692
693                 start = index[mat_orig_to_new[f->mat_nr]];
694                 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
695
696                 if (smoothnormal) {
697                         /* copy vertex normal */
698                         normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
699                         normal_short_to_float_v3(&varray[start + 3], mvert[f->v2].no);
700                         normal_short_to_float_v3(&varray[start + 6], mvert[f->v3].no);
701
702                         if (f->v4) {
703                                 normal_short_to_float_v3(&varray[start + 9], mvert[f->v3].no);
704                                 normal_short_to_float_v3(&varray[start + 12], mvert[f->v4].no);
705                                 normal_short_to_float_v3(&varray[start + 15], mvert[f->v1].no);
706                         }
707                 }
708                 else if (nors) {
709                         /* copy cached face normal */
710                         copy_v3_v3(&varray[start], &nors[i * 3]);
711                         copy_v3_v3(&varray[start + 3], &nors[i * 3]);
712                         copy_v3_v3(&varray[start + 6], &nors[i * 3]);
713
714                         if (f->v4) {
715                                 copy_v3_v3(&varray[start + 9], &nors[i * 3]);
716                                 copy_v3_v3(&varray[start + 12], &nors[i * 3]);
717                                 copy_v3_v3(&varray[start + 15], &nors[i * 3]);
718                         }
719                 }
720                 else {
721                         /* calculate face normal */
722                         if (f->v4)
723                                 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
724                         else
725                                 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
726
727                         copy_v3_v3(&varray[start], f_no);
728                         copy_v3_v3(&varray[start + 3], f_no);
729                         copy_v3_v3(&varray[start + 6], f_no);
730
731                         if (f->v4) {
732                                 copy_v3_v3(&varray[start + 9], f_no);
733                                 copy_v3_v3(&varray[start + 12], f_no);
734                                 copy_v3_v3(&varray[start + 15], f_no);
735                         }
736                 }
737         }
738 }
739
740 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
741 {
742         int start;
743         int i, totface;
744
745         MTFace *mtface;
746         MFace *f;
747
748         if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
749                 return;
750         f = dm->getTessFaceArray(dm);
751                 
752         totface = dm->getNumTessFaces(dm);
753         for (i = 0; i < totface; i++, f++) {
754                 start = index[mat_orig_to_new[f->mat_nr]];
755
756                 /* v1 v2 v3 */
757                 copy_v2_v2(&varray[start], mtface[i].uv[0]);
758                 copy_v2_v2(&varray[start + 2], mtface[i].uv[1]);
759                 copy_v2_v2(&varray[start + 4], mtface[i].uv[2]);
760                 index[mat_orig_to_new[f->mat_nr]] += 6;
761
762                 if (f->v4) {
763                         /* v3 v4 v1 */
764                         copy_v2_v2(&varray[start + 6], mtface[i].uv[2]);
765                         copy_v2_v2(&varray[start + 8], mtface[i].uv[3]);
766                         copy_v2_v2(&varray[start + 10], mtface[i].uv[0]);
767                         index[mat_orig_to_new[f->mat_nr]] += 6;
768                 }
769         }
770 }
771
772 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
773 {
774         v[0] = col[3];
775         v[1] = col[2];
776         v[2] = col[1];
777 }
778
779 /* treat varray_ as an array of MCol, four MCol's per face */
780 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
781 {
782         int i, totface;
783         unsigned char *varray = (unsigned char *)varray_;
784         unsigned char *mcol = (unsigned char *)user;
785         MFace *f = dm->getTessFaceArray(dm);
786
787         totface = dm->getNumTessFaces(dm);
788         for (i = 0; i < totface; i++, f++) {
789                 int start = index[mat_orig_to_new[f->mat_nr]];
790
791                 /* v1 v2 v3 */
792                 copy_mcol_uc3(&varray[start], &mcol[i * 16]);
793                 copy_mcol_uc3(&varray[start + 3], &mcol[i * 16 + 4]);
794                 copy_mcol_uc3(&varray[start + 6], &mcol[i * 16 + 8]);
795                 index[mat_orig_to_new[f->mat_nr]] += 9;
796
797                 if (f->v4) {
798                         /* v3 v4 v1 */
799                         copy_mcol_uc3(&varray[start + 9], &mcol[i * 16 + 8]);
800                         copy_mcol_uc3(&varray[start + 12], &mcol[i * 16 + 12]);
801                         copy_mcol_uc3(&varray[start + 15], &mcol[i * 16]);
802                         index[mat_orig_to_new[f->mat_nr]] += 9;
803                 }
804         }
805 }
806
807 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
808 {
809         MEdge *medge;
810         unsigned int *varray = (unsigned int *)varray_;
811         int i, totedge;
812  
813         medge = dm->getEdgeArray(dm);
814         totedge = dm->getNumEdges(dm);
815
816         for (i = 0; i < totedge; i++, medge++) {
817                 varray[i * 2] = dm->drawObject->vert_points[medge->v1].point_index;
818                 varray[i * 2 + 1] = dm->drawObject->vert_points[medge->v2].point_index;
819         }
820 }
821
822 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
823 {
824         MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
825         int i, j = 0;
826
827         if (!tf)
828                 return;
829
830         for (i = 0; i < dm->numTessFaceData; i++, tf++) {
831                 MFace mf;
832                 dm->getTessFace(dm, i, &mf);
833
834                 copy_v2_v2(&varray[j], tf->uv[0]);
835                 copy_v2_v2(&varray[j + 2], tf->uv[1]);
836
837                 copy_v2_v2(&varray[j + 4], tf->uv[1]);
838                 copy_v2_v2(&varray[j + 6], tf->uv[2]);
839
840                 if (!mf.v4) {
841                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
842                         copy_v2_v2(&varray[j + 10], tf->uv[0]);
843                         j += 12;
844                 }
845                 else {
846                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
847                         copy_v2_v2(&varray[j + 10], tf->uv[3]);
848
849                         copy_v2_v2(&varray[j + 12], tf->uv[3]);
850                         copy_v2_v2(&varray[j + 14], tf->uv[0]);
851                         j += 16;
852                 }
853         }
854 }
855
856 typedef enum {
857         GPU_BUFFER_VERTEX = 0,
858         GPU_BUFFER_NORMAL,
859         GPU_BUFFER_COLOR,
860         GPU_BUFFER_UV,
861         GPU_BUFFER_EDGE,
862         GPU_BUFFER_UVEDGE,
863 } GPUBufferType;
864
865 typedef struct {
866         GPUBufferCopyFunc copy;
867         GLenum gl_buffer_type;
868         int vector_size;
869 } GPUBufferTypeSettings;
870
871 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
872         {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
873         {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
874         {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
875         {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
876         {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
877         {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
878 };
879
880 /* get the GPUDrawObject buffer associated with a type */
881 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
882 {
883         switch (type) {
884                 case GPU_BUFFER_VERTEX:
885                         return &gdo->points;
886                 case GPU_BUFFER_NORMAL:
887                         return &gdo->normals;
888                 case GPU_BUFFER_COLOR:
889                         return &gdo->colors;
890                 case GPU_BUFFER_UV:
891                         return &gdo->uv;
892                 case GPU_BUFFER_EDGE:
893                         return &gdo->edges;
894                 case GPU_BUFFER_UVEDGE:
895                         return &gdo->uvedges;
896                 default:
897                         return NULL;
898         }
899 }
900
901 /* get the amount of space to allocate for a buffer of a particular type */
902 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
903 {
904         switch (type) {
905                 case GPU_BUFFER_VERTEX:
906                         return sizeof(float) * 3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
907                 case GPU_BUFFER_NORMAL:
908                         return sizeof(float) * 3 * dm->drawObject->tot_triangle_point;
909                 case GPU_BUFFER_COLOR:
910                         return sizeof(char) * 3 * dm->drawObject->tot_triangle_point;
911                 case GPU_BUFFER_UV:
912                         return sizeof(float) * 2 * dm->drawObject->tot_triangle_point;
913                 case GPU_BUFFER_EDGE:
914                         return sizeof(int) * 2 * dm->drawObject->totedge;
915                 case GPU_BUFFER_UVEDGE:
916                         /* each face gets 3 points, 3 edges per triangle, and
917                          * each edge has its own, non-shared coords, so each
918                          * tri corner needs minimum of 4 floats, quads used
919                          * less so here we can over allocate and assume all
920                          * tris. */
921                         return sizeof(float) * 4 * dm->drawObject->tot_triangle_point;
922                 default:
923                         return -1;
924         }
925 }
926
927 /* call gpu_buffer_setup with settings for a particular type of buffer */
928 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
929 {
930         const GPUBufferTypeSettings *ts;
931         void *user_data = NULL;
932         GPUBuffer *buf;
933
934         ts = &gpu_buffer_type_settings[type];
935
936         /* special handling for MCol and UV buffers */
937         if (type == GPU_BUFFER_COLOR) {
938                 if (!(user_data = DM_get_tessface_data_layer(dm, dm->drawObject->colType)))
939                         return NULL;
940         }
941         else if (type == GPU_BUFFER_UV) {
942                 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
943                         return NULL;
944         }
945
946         buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
947                                gpu_buffer_size_from_type(dm, type),
948                                ts->gl_buffer_type, user_data, ts->copy);
949
950         return buf;
951 }
952
953 /* get the buffer of `type', initializing the GPUDrawObject and
954  * buffer if needed */
955 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
956 {
957         GPUBuffer **buf;
958
959         if (!dm->drawObject)
960                 dm->drawObject = GPU_drawobject_new(dm);
961
962         buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
963         if (!(*buf))
964                 *buf = gpu_buffer_setup_type(dm, type);
965
966         return *buf;
967 }
968
969 void GPU_vertex_setup(DerivedMesh *dm)
970 {
971         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
972                 return;
973
974         glEnableClientState(GL_VERTEX_ARRAY);
975         if (useVBOs) {
976                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
977                 glVertexPointer(3, GL_FLOAT, 0, 0);
978         }
979         else {
980                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
981         }
982         
983         GLStates |= GPU_BUFFER_VERTEX_STATE;
984 }
985
986 void GPU_normal_setup(DerivedMesh *dm)
987 {
988         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
989                 return;
990
991         glEnableClientState(GL_NORMAL_ARRAY);
992         if (useVBOs) {
993                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
994                 glNormalPointer(GL_FLOAT, 0, 0);
995         }
996         else {
997                 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
998         }
999
1000         GLStates |= GPU_BUFFER_NORMAL_STATE;
1001 }
1002
1003 void GPU_uv_setup(DerivedMesh *dm)
1004 {
1005         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
1006                 return;
1007
1008         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
1009         if (useVBOs) {
1010                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
1011                 glTexCoordPointer(2, GL_FLOAT, 0, 0);
1012         }
1013         else {
1014                 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
1015         }
1016
1017         GLStates |= GPU_BUFFER_TEXCOORD_STATE;
1018 }
1019
1020 void GPU_color_setup(DerivedMesh *dm, int colType)
1021 {
1022         if (!dm->drawObject) {
1023                 /* XXX Not really nice, but we need a valid gpu draw object to set the colType...
1024                  *     Else we would have to add a new param to gpu_buffer_setup_common. */
1025                 dm->drawObject = GPU_drawobject_new(dm);
1026                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1027                 dm->drawObject->colType = colType;
1028         }
1029         /* In paint mode, dm may stay the same during stroke, however we still want to update colors!
1030          * Also check in case we changed color type (i.e. which MCol cdlayer we use). */
1031         else if ((dm->dirty & DM_DIRTY_MCOL_UPDATE_DRAW) || (colType != dm->drawObject->colType)) {
1032                 GPUBuffer **buf = gpu_drawobject_buffer_from_type(dm->drawObject, GPU_BUFFER_COLOR);
1033                 /* XXX Freeing this buffer is a bit stupid, as geometry has not changed, size should remain the same.
1034                  *     Not sure though it would be worth defining a sort of gpu_buffer_update func - nor whether
1035                  *     it is even possible ! */
1036                 GPU_buffer_free(*buf);
1037                 *buf = NULL;
1038                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1039                 dm->drawObject->colType = colType;
1040         }
1041
1042         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
1043                 return;
1044
1045         glEnableClientState(GL_COLOR_ARRAY);
1046         if (useVBOs) {
1047                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1048                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1049         }
1050         else {
1051                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1052         }
1053
1054         GLStates |= GPU_BUFFER_COLOR_STATE;
1055 }
1056
1057 void GPU_edge_setup(DerivedMesh *dm)
1058 {
1059         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1060                 return;
1061
1062         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1063                 return;
1064
1065         glEnableClientState(GL_VERTEX_ARRAY);
1066         if (useVBOs) {
1067                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1068                 glVertexPointer(3, GL_FLOAT, 0, 0);
1069         }
1070         else {
1071                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1072         }
1073         
1074         GLStates |= GPU_BUFFER_VERTEX_STATE;
1075
1076         if (useVBOs)
1077                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1078
1079         GLStates |= GPU_BUFFER_ELEMENT_STATE;
1080 }
1081
1082 void GPU_uvedge_setup(DerivedMesh *dm)
1083 {
1084         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1085                 return;
1086
1087         glEnableClientState(GL_VERTEX_ARRAY);
1088         if (useVBOs) {
1089                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1090                 glVertexPointer(2, GL_FLOAT, 0, 0);
1091         }
1092         else {
1093                 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1094         }
1095         
1096         GLStates |= GPU_BUFFER_VERTEX_STATE;
1097 }
1098
1099 static int GPU_typesize(int type)
1100 {
1101         switch (type) {
1102                 case GL_FLOAT:
1103                         return sizeof(float);
1104                 case GL_INT:
1105                         return sizeof(int);
1106                 case GL_UNSIGNED_INT:
1107                         return sizeof(unsigned int);
1108                 case GL_BYTE:
1109                         return sizeof(char);
1110                 case GL_UNSIGNED_BYTE:
1111                         return sizeof(unsigned char);
1112                 default:
1113                         return 0;
1114         }
1115 }
1116
1117 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1118 {
1119         int i, elementsize = 0;
1120
1121         for (i = 0; i < numdata; i++) {
1122                 int typesize = GPU_typesize(data[i].type);
1123                 if (typesize != 0)
1124                         elementsize += typesize * data[i].size;
1125         }
1126         return elementsize;
1127 }
1128
1129 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1130 {
1131         int i;
1132         int elementsize;
1133         intptr_t offset = 0;
1134
1135         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1136                 if (attribData[i].index != -1) {
1137                         glDisableVertexAttribArrayARB(attribData[i].index);
1138                 }
1139                 else
1140                         break;
1141         }
1142         elementsize = GPU_attrib_element_size(data, numdata);
1143
1144         if (useVBOs) {
1145                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1146                 for (i = 0; i < numdata; i++) {
1147                         glEnableVertexAttribArrayARB(data[i].index);
1148                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1149                                                  GL_FALSE, elementsize, (void *)offset);
1150                         offset += data[i].size * GPU_typesize(data[i].type);
1151
1152                         attribData[i].index = data[i].index;
1153                         attribData[i].size = data[i].size;
1154                         attribData[i].type = data[i].type;
1155                 }
1156                 attribData[numdata].index = -1;
1157         }
1158         else {
1159                 for (i = 0; i < numdata; i++) {
1160                         glEnableVertexAttribArrayARB(data[i].index);
1161                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1162                                                  GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1163                         offset += data[i].size * GPU_typesize(data[i].type);
1164                 }
1165         }
1166 }
1167
1168
1169 void GPU_buffer_unbind(void)
1170 {
1171         int i;
1172
1173         if (GLStates & GPU_BUFFER_VERTEX_STATE)
1174                 glDisableClientState(GL_VERTEX_ARRAY);
1175         if (GLStates & GPU_BUFFER_NORMAL_STATE)
1176                 glDisableClientState(GL_NORMAL_ARRAY);
1177         if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1178                 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1179         if (GLStates & GPU_BUFFER_COLOR_STATE)
1180                 glDisableClientState(GL_COLOR_ARRAY);
1181         if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1182                 if (useVBOs) {
1183                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1184                 }
1185         }
1186         GLStates &= ~(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1187                       GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1188                       GPU_BUFFER_ELEMENT_STATE);
1189
1190         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1191                 if (attribData[i].index != -1) {
1192                         glDisableVertexAttribArrayARB(attribData[i].index);
1193                 }
1194                 else
1195                         break;
1196         }
1197
1198         if (useVBOs)
1199                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1200 }
1201
1202 void GPU_color_switch(int mode)
1203 {
1204         if (mode) {
1205                 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1206                         glEnableClientState(GL_COLOR_ARRAY);
1207                 GLStates |= GPU_BUFFER_COLOR_STATE;
1208         }
1209         else {
1210                 if (GLStates & GPU_BUFFER_COLOR_STATE)
1211                         glDisableClientState(GL_COLOR_ARRAY);
1212                 GLStates &= ~GPU_BUFFER_COLOR_STATE;
1213         }
1214 }
1215
1216 /* return 1 if drawing should be done using old immediate-mode
1217  * code, 0 otherwise */
1218 int GPU_buffer_legacy(DerivedMesh *dm)
1219 {
1220         int test = (U.gameflags & USER_DISABLE_VBO);
1221         if (test)
1222                 return 1;
1223
1224         if (dm->drawObject == 0)
1225                 dm->drawObject = GPU_drawobject_new(dm);
1226         return dm->drawObject->legacy;
1227 }
1228
1229 void *GPU_buffer_lock(GPUBuffer *buffer)
1230 {
1231         float *varray;
1232
1233         if (!buffer)
1234                 return 0;
1235
1236         if (useVBOs) {
1237                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1238                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1239                 return varray;
1240         }
1241         else {
1242                 return buffer->pointer;
1243         }
1244 }
1245
1246 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1247 {
1248         float *varray;
1249
1250         if (!buffer)
1251                 return 0;
1252
1253         if (useVBOs) {
1254                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1255                 /* discard previous data, avoid stalling gpu */
1256                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1257                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1258                 return varray;
1259         }
1260         else {
1261                 return buffer->pointer;
1262         }
1263 }
1264
1265 void GPU_buffer_unlock(GPUBuffer *buffer)
1266 {
1267         if (useVBOs) {
1268                 if (buffer) {
1269                         /* note: this operation can fail, could return
1270                          * an error code from this function? */
1271                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1272                 }
1273                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1274         }
1275 }
1276
1277 /* used for drawing edges */
1278 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1279 {
1280         glDrawElements(mode, count, GL_UNSIGNED_INT,
1281                        (useVBOs ?
1282                         (void *)(start * sizeof(unsigned int)) :
1283                         ((int *)elements->pointer) + start));
1284 }
1285
1286
1287 /* XXX: the rest of the code in this file is used for optimized PBVH
1288  * drawing and doesn't interact at all with the buffer code above */
1289
1290 /* Return false if VBO is either unavailable or disabled by the user,
1291  * true otherwise */
1292 static int gpu_vbo_enabled(void)
1293 {
1294         return (GLEW_ARB_vertex_buffer_object &&
1295                 !(U.gameflags & USER_DISABLE_VBO));
1296 }
1297
1298 /* Convenience struct for building the VBO. */
1299 typedef struct {
1300         float co[3];
1301         short no[3];
1302
1303         /* inserting this to align the 'color' field to a four-byte
1304          * boundary; drastically increases viewport performance on my
1305          * drivers (Gallium/Radeon) --nicholasbishop */
1306         char pad[2];
1307         
1308         unsigned char color[3];
1309 } VertexBufferFormat;
1310
1311 struct GPU_Buffers {
1312         /* opengl buffer handles */
1313         GLuint vert_buf, index_buf;
1314         GLenum index_type;
1315
1316         /* mesh pointers in case buffer allocation fails */
1317         MFace *mface;
1318         MVert *mvert;
1319         int *face_indices;
1320         int totface;
1321         const float *vmask;
1322
1323         /* grid pointers */
1324         CCGKey gridkey;
1325         CCGElem **grids;
1326         const DMFlagMat *grid_flag_mats;
1327         BLI_bitmap * const *grid_hidden;
1328         int *grid_indices;
1329         int totgrid;
1330         int has_hidden;
1331
1332         int use_bmesh;
1333
1334         unsigned int tot_tri, tot_quad;
1335
1336         /* The PBVH ensures that either all faces in the node are
1337          * smooth-shaded or all faces are flat-shaded */
1338         int smooth;
1339
1340         int show_diffuse_color;
1341         float diffuse_color[4];
1342 };
1343 typedef enum {
1344         VBO_ENABLED,
1345         VBO_DISABLED
1346 } VBO_State;
1347
1348 static void gpu_colors_enable(VBO_State vbo_state)
1349 {
1350         glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
1351         glEnable(GL_COLOR_MATERIAL);
1352         if (vbo_state == VBO_ENABLED)
1353                 glEnableClientState(GL_COLOR_ARRAY);
1354 }
1355
1356 static void gpu_colors_disable(VBO_State vbo_state)
1357 {
1358         glDisable(GL_COLOR_MATERIAL);
1359         if (vbo_state == VBO_ENABLED)
1360                 glDisableClientState(GL_COLOR_ARRAY);
1361 }
1362
1363 static float gpu_color_from_mask(float mask)
1364 {
1365         return 1.0f - mask * 0.75f;
1366 }
1367
1368 static void gpu_color_from_mask_copy(float mask, const float diffuse_color[4], unsigned char out[3])
1369 {
1370         float mask_color;
1371
1372         mask_color = gpu_color_from_mask(mask) * 255.0f;
1373
1374         out[0] = diffuse_color[0] * mask_color;
1375         out[1] = diffuse_color[1] * mask_color;
1376         out[2] = diffuse_color[2] * mask_color;
1377 }
1378
1379 static void gpu_color_from_mask_set(float mask, float diffuse_color[4])
1380 {
1381         float color = gpu_color_from_mask(mask);
1382         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1383 }
1384
1385 static float gpu_color_from_mask_quad(const CCGKey *key,
1386                                       CCGElem *a, CCGElem *b,
1387                                       CCGElem *c, CCGElem *d)
1388 {
1389         return gpu_color_from_mask((*CCG_elem_mask(key, a) +
1390                                     *CCG_elem_mask(key, b) +
1391                                     *CCG_elem_mask(key, c) +
1392                                     *CCG_elem_mask(key, d)) * 0.25f);
1393 }
1394
1395 static void gpu_color_from_mask_quad_copy(const CCGKey *key,
1396                                           CCGElem *a, CCGElem *b,
1397                                           CCGElem *c, CCGElem *d,
1398                                           const float *diffuse_color,
1399                                           unsigned char out[3])
1400 {
1401         float mask_color =
1402             gpu_color_from_mask((*CCG_elem_mask(key, a) +
1403                                  *CCG_elem_mask(key, b) +
1404                                  *CCG_elem_mask(key, c) +
1405                                  *CCG_elem_mask(key, d)) * 0.25f) * 255.0f;
1406
1407         out[0] = diffuse_color[0] * mask_color;
1408         out[1] = diffuse_color[1] * mask_color;
1409         out[2] = diffuse_color[2] * mask_color;
1410 }
1411
1412 static void gpu_color_from_mask_quad_set(const CCGKey *key,
1413                                          CCGElem *a, CCGElem *b,
1414                                          CCGElem *c, CCGElem *d,
1415                                          float diffuse_color[4])
1416 {
1417         float color = gpu_color_from_mask_quad(key, a, b, c, d);
1418         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1419 }
1420
1421 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert,
1422                              int *vert_indices, int totvert, const float *vmask,
1423                              int (*face_vert_indices)[4], int show_diffuse_color)
1424 {
1425         VertexBufferFormat *vert_data;
1426         int i, j, k;
1427
1428         buffers->vmask = vmask;
1429         buffers->show_diffuse_color = show_diffuse_color;
1430
1431         if (buffers->vert_buf) {
1432                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
1433                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1434
1435                 if (buffers->show_diffuse_color) {
1436                         MFace *f = buffers->mface + buffers->face_indices[0];
1437
1438                         GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
1439                 }
1440
1441                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1442
1443                 /* Build VBO */
1444                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1445                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1446                                                 sizeof(VertexBufferFormat) * totelem,
1447                                                 NULL, GL_STATIC_DRAW_ARB);
1448
1449                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1450
1451                 if (vert_data) {
1452                         /* Vertex data is shared if smooth-shaded, but separate
1453                          * copies are made for flat shading because normals
1454                          * shouldn't be shared. */
1455                         if (buffers->smooth) {
1456                                 for (i = 0; i < totvert; ++i) {
1457                                         MVert *v = mvert + vert_indices[i];
1458                                         VertexBufferFormat *out = vert_data + i;
1459
1460                                         copy_v3_v3(out->co, v->co);
1461                                         memcpy(out->no, v->no, sizeof(short) * 3);
1462                                 }
1463
1464 #define UPDATE_VERTEX(face, vertex, index, diffuse_color) \
1465                                 { \
1466                                         VertexBufferFormat *out = vert_data + face_vert_indices[face][index]; \
1467                                         if (vmask) \
1468                                                 gpu_color_from_mask_copy(vmask[vertex], diffuse_color, out->color); \
1469                                         else \
1470                                                 rgb_float_to_uchar(out->color, diffuse_color); \
1471                                 } (void)0
1472
1473                                 for (i = 0; i < buffers->totface; i++) {
1474                                         MFace *f = buffers->mface + buffers->face_indices[i];
1475
1476                                         UPDATE_VERTEX(i, f->v1, 0, diffuse_color);
1477                                         UPDATE_VERTEX(i, f->v2, 1, diffuse_color);
1478                                         UPDATE_VERTEX(i, f->v3, 2, diffuse_color);
1479                                         if (f->v4)
1480                                                 UPDATE_VERTEX(i, f->v4, 3, diffuse_color);
1481                                 }
1482 #undef UPDATE_VERTEX
1483                         }
1484                         else {
1485                                 for (i = 0; i < buffers->totface; ++i) {
1486                                         const MFace *f = &buffers->mface[buffers->face_indices[i]];
1487                                         const unsigned int *fv = &f->v1;
1488                                         const int vi[2][3] = {{0, 1, 2}, {3, 0, 2}};
1489                                         float fno[3];
1490                                         short no[3];
1491
1492                                         float fmask;
1493
1494                                         if (paint_is_face_hidden(f, mvert))
1495                                                 continue;
1496
1497                                         /* Face normal and mask */
1498                                         if (f->v4) {
1499                                                 normal_quad_v3(fno,
1500                                                                            mvert[fv[0]].co,
1501                                                                            mvert[fv[1]].co,
1502                                                                            mvert[fv[2]].co,
1503                                                                            mvert[fv[3]].co);
1504                                                 if (vmask) {
1505                                                         fmask = (vmask[fv[0]] +
1506                                                                          vmask[fv[1]] +
1507                                                                          vmask[fv[2]] +
1508                                                                          vmask[fv[3]]) * 0.25f;
1509                                                 }
1510                                         }
1511                                         else {
1512                                                 normal_tri_v3(fno,
1513                                                                           mvert[fv[0]].co,
1514                                                                           mvert[fv[1]].co,
1515                                                                           mvert[fv[2]].co);
1516                                                 if (vmask) {
1517                                                         fmask = (vmask[fv[0]] +
1518                                                                          vmask[fv[1]] +
1519                                                                          vmask[fv[2]]) / 3.0f;
1520                                                 }
1521                                         }
1522                                         normal_float_to_short_v3(no, fno);
1523
1524                                         for (j = 0; j < (f->v4 ? 2 : 1); j++) {
1525                                                 for (k = 0; k < 3; k++) {
1526                                                         const MVert *v = &mvert[fv[vi[j][k]]];
1527                                                         VertexBufferFormat *out = vert_data;
1528
1529                                                         copy_v3_v3(out->co, v->co);
1530                                                         memcpy(out->no, no, sizeof(short) * 3);
1531
1532                                                         if (vmask)
1533                                                                 gpu_color_from_mask_copy(fmask, diffuse_color, out->color);
1534                                                         else
1535                                                                 rgb_float_to_uchar(out->color, diffuse_color);
1536
1537                                                         vert_data++;
1538                                                 }
1539                                         }
1540                                 }
1541                         }
1542
1543                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1544                 }
1545                 else {
1546                         glDeleteBuffersARB(1, &buffers->vert_buf);
1547                         buffers->vert_buf = 0;
1548                 }
1549
1550                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1551         }
1552
1553         buffers->mvert = mvert;
1554 }
1555
1556 GPU_Buffers *GPU_build_mesh_buffers(int (*face_vert_indices)[4],
1557                                     MFace *mface, MVert *mvert,
1558                                     int *face_indices,
1559                                     int totface)
1560 {
1561         GPU_Buffers *buffers;
1562         unsigned short *tri_data;
1563         int i, j, k, tottri;
1564
1565         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1566         buffers->index_type = GL_UNSIGNED_SHORT;
1567         buffers->smooth = mface[face_indices[0]].flag & ME_SMOOTH;
1568
1569         buffers->show_diffuse_color = FALSE;
1570
1571         /* Count the number of visible triangles */
1572         for (i = 0, tottri = 0; i < totface; ++i) {
1573                 const MFace *f = &mface[face_indices[i]];
1574                 if (!paint_is_face_hidden(f, mvert))
1575                         tottri += f->v4 ? 2 : 1;
1576         }
1577
1578         /* An element index buffer is used for smooth shading, but flat
1579          * shading requires separate vertex normals so an index buffer is
1580          * can't be used there. */
1581         if (gpu_vbo_enabled() && buffers->smooth)
1582                 glGenBuffersARB(1, &buffers->index_buf);
1583
1584         if (buffers->index_buf) {
1585                 /* Generate index buffer object */
1586                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1587                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1588                                 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1589
1590                 /* Fill the triangle buffer */
1591                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1592                 if (tri_data) {
1593                         for (i = 0; i < totface; ++i) {
1594                                 const MFace *f = mface + face_indices[i];
1595                                 int v[3];
1596
1597                                 /* Skip hidden faces */
1598                                 if (paint_is_face_hidden(f, mvert))
1599                                         continue;
1600
1601                                 v[0] = 0;
1602                                 v[1] = 1;
1603                                 v[2] = 2;
1604
1605                                 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1606                                         for (k = 0; k < 3; ++k) {
1607                                                 *tri_data = face_vert_indices[i][v[k]];
1608                                                 tri_data++;
1609                                         }
1610                                         v[0] = 3;
1611                                         v[1] = 0;
1612                                         v[2] = 2;
1613                                 }
1614                         }
1615                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1616                 }
1617                 else {
1618                         glDeleteBuffersARB(1, &buffers->index_buf);
1619                         buffers->index_buf = 0;
1620                 }
1621
1622                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1623         }
1624
1625         if (gpu_vbo_enabled() && (buffers->index_buf || !buffers->smooth))
1626                 glGenBuffersARB(1, &buffers->vert_buf);
1627
1628         buffers->tot_tri = tottri;
1629
1630         buffers->mface = mface;
1631         buffers->face_indices = face_indices;
1632         buffers->totface = totface;
1633
1634         return buffers;
1635 }
1636
1637 void GPU_update_grid_buffers(GPU_Buffers *buffers, CCGElem **grids,
1638                              const DMFlagMat *grid_flag_mats, int *grid_indices,
1639                              int totgrid, const CCGKey *key, int show_diffuse_color)
1640 {
1641         VertexBufferFormat *vert_data;
1642         int i, j, k, x, y;
1643
1644         buffers->show_diffuse_color = show_diffuse_color;
1645
1646         /* Build VBO */
1647         if (buffers->vert_buf) {
1648                 int totvert = key->grid_area * totgrid;
1649                 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1650                 const int has_mask = key->has_mask;
1651                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1652
1653                 if (buffers->show_diffuse_color) {
1654                         const DMFlagMat *flags = &grid_flag_mats[grid_indices[0]];
1655
1656                         GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
1657                 }
1658
1659                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1660
1661                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1662                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1663                                 sizeof(VertexBufferFormat) * totvert,
1664                                 NULL, GL_STATIC_DRAW_ARB);
1665                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1666                 if (vert_data) {
1667                         for (i = 0; i < totgrid; ++i) {
1668                                 VertexBufferFormat *vd = vert_data;
1669                                 CCGElem *grid = grids[grid_indices[i]];
1670
1671                                 for (y = 0; y < key->grid_size; y++) {
1672                                         for (x = 0; x < key->grid_size; x++) {
1673                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
1674                                                 
1675                                                 copy_v3_v3(vd->co, CCG_elem_co(key, elem));
1676                                                 if (smooth) {
1677                                                         normal_float_to_short_v3(vd->no, CCG_elem_no(key, elem));
1678
1679                                                         if (has_mask) {
1680                                                                 gpu_color_from_mask_copy(*CCG_elem_mask(key, elem),
1681                                                                                          diffuse_color, vd->color);
1682                                                         }
1683                                                 }
1684                                                 vd++;
1685                                         }
1686                                 }
1687                                 
1688                                 if (!smooth) {
1689                                         /* for flat shading, recalc normals and set the last vertex of
1690                                          * each quad in the index buffer to have the flat normal as
1691                                          * that is what opengl will use */
1692                                         for (j = 0; j < key->grid_size - 1; j++) {
1693                                                 for (k = 0; k < key->grid_size - 1; k++) {
1694                                                         CCGElem *elems[4] = {
1695                                                                 CCG_grid_elem(key, grid, k, j + 1),
1696                                                                 CCG_grid_elem(key, grid, k + 1, j + 1),
1697                                                                 CCG_grid_elem(key, grid, k + 1, j),
1698                                                                 CCG_grid_elem(key, grid, k, j)
1699                                                         };
1700                                                         float fno[3];
1701
1702                                                         normal_quad_v3(fno,
1703                                                                        CCG_elem_co(key, elems[0]),
1704                                                                        CCG_elem_co(key, elems[1]),
1705                                                                        CCG_elem_co(key, elems[2]),
1706                                                                        CCG_elem_co(key, elems[3]));
1707
1708                                                         vd = vert_data + (j + 1) * key->grid_size + (k + 1);
1709                                                         normal_float_to_short_v3(vd->no, fno);
1710
1711                                                         if (has_mask) {
1712                                                                 gpu_color_from_mask_quad_copy(key,
1713                                                                                               elems[0],
1714                                                                                               elems[1],
1715                                                                                               elems[2],
1716                                                                                               elems[3],
1717                                                                                               diffuse_color,
1718                                                                                               vd->color);
1719                                                         }
1720                                                 }
1721                                         }
1722                                 }
1723
1724                                 vert_data += key->grid_area;
1725                         }
1726                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1727                 }
1728                 else {
1729                         glDeleteBuffersARB(1, &buffers->vert_buf);
1730                         buffers->vert_buf = 0;
1731                 }
1732                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1733         }
1734
1735         buffers->grids = grids;
1736         buffers->grid_indices = grid_indices;
1737         buffers->totgrid = totgrid;
1738         buffers->grid_flag_mats = grid_flag_mats;
1739         buffers->gridkey = *key;
1740
1741         buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1742
1743         //printf("node updated %p\n", buffers);
1744 }
1745
1746 /* Returns the number of visible quads in the nodes' grids. */
1747 static int gpu_count_grid_quads(BLI_bitmap **grid_hidden,
1748                                 int *grid_indices, int totgrid,
1749                                 int gridsize)
1750 {
1751         int gridarea = (gridsize - 1) * (gridsize - 1);
1752         int i, x, y, totquad;
1753
1754         /* grid hidden layer is present, so have to check each grid for
1755          * visibility */
1756
1757         for (i = 0, totquad = 0; i < totgrid; i++) {
1758                 const BLI_bitmap *gh = grid_hidden[grid_indices[i]];
1759
1760                 if (gh) {
1761                         /* grid hidden are present, have to check each element */
1762                         for (y = 0; y < gridsize - 1; y++) {
1763                                 for (x = 0; x < gridsize - 1; x++) {
1764                                         if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1765                                                 totquad++;
1766                                 }
1767                         }
1768                 }
1769                 else
1770                         totquad += gridarea;
1771         }
1772
1773         return totquad;
1774 }
1775
1776 /* Build the element array buffer of grid indices using either
1777  * unsigned shorts or unsigned ints. */
1778 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_)                     \
1779         {                                                                   \
1780                 type_ *quad_data;                                               \
1781                 int offset = 0;                                                 \
1782                 int i, j, k;                                                    \
1783                                                                                 \
1784                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,                    \
1785                                 sizeof(type_) * (tot_quad_) * 4, NULL,          \
1786                                 GL_STATIC_DRAW_ARB);                            \
1787                                                                                 \
1788                 /* Fill the quad buffer */                                      \
1789                 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB,         \
1790                                            GL_WRITE_ONLY_ARB);                  \
1791                 if (quad_data) {                                                \
1792                         for (i = 0; i < totgrid; ++i) {                             \
1793                                 BLI_bitmap *gh = NULL;                                  \
1794                                 if (grid_hidden)                                        \
1795                                         gh = grid_hidden[(grid_indices)[i]];                \
1796                                                                                                                                                 \
1797                                 for (j = 0; j < gridsize - 1; ++j) {                    \
1798                                         for (k = 0; k < gridsize - 1; ++k) {                \
1799                                                 /* Skip hidden grid face */                     \
1800                                                 if (gh &&                                       \
1801                                                     paint_is_grid_face_hidden(gh,               \
1802                                                                               gridsize, k, j))  \
1803                                                         continue;                                   \
1804                                                                                                                                                 \
1805                                                 *(quad_data++) = offset + j * gridsize + k + 1; \
1806                                                 *(quad_data++) = offset + j * gridsize + k;     \
1807                                                 *(quad_data++) = offset + (j + 1) * gridsize + k; \
1808                                                 *(quad_data++) = offset + (j + 1) * gridsize + k + 1; \
1809                                         }                                                   \
1810                                 }                                                       \
1811                                                                                                                                                 \
1812                                 offset += gridsize * gridsize;                          \
1813                         }                                                           \
1814                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);              \
1815                 }                                                               \
1816                 else {                                                          \
1817                         glDeleteBuffersARB(1, &(buffer_));                          \
1818                         (buffer_) = 0;                                              \
1819                 }                                                               \
1820         } (void)0
1821 /* end FILL_QUAD_BUFFER */
1822
1823 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1824 {
1825         static int prev_gridsize = -1;
1826         static GLenum prev_index_type = 0;
1827         static GLuint buffer = 0;
1828         static unsigned prev_totquad;
1829
1830         /* used in the FILL_QUAD_BUFFER macro */
1831         BLI_bitmap * const *grid_hidden = NULL;
1832         int *grid_indices = NULL;
1833         int totgrid = 1;
1834
1835         /* VBO is disabled; delete the previous buffer (if it exists) and
1836          * return an invalid handle */
1837         if (!gpu_vbo_enabled()) {
1838                 if (buffer)
1839                         glDeleteBuffersARB(1, &buffer);
1840                 return 0;
1841         }
1842
1843         /* VBO is already built */
1844         if (buffer && prev_gridsize == gridsize) {
1845                 *index_type = prev_index_type;
1846                 *totquad = prev_totquad;
1847                 return buffer;
1848         }
1849
1850         /* Build new VBO */
1851         glGenBuffersARB(1, &buffer);
1852         if (buffer) {
1853                 *totquad = (gridsize - 1) * (gridsize - 1);
1854
1855                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1856
1857                 if (gridsize * gridsize < USHRT_MAX) {
1858                         *index_type = GL_UNSIGNED_SHORT;
1859                         FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1860                 }
1861                 else {
1862                         *index_type = GL_UNSIGNED_INT;
1863                         FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1864                 }
1865
1866                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1867         }
1868
1869         prev_gridsize = gridsize;
1870         prev_index_type = *index_type;
1871         prev_totquad = *totquad;
1872         return buffer;
1873 }
1874
1875 GPU_Buffers *GPU_build_grid_buffers(int *grid_indices, int totgrid,
1876                                     BLI_bitmap **grid_hidden, int gridsize)
1877 {
1878         GPU_Buffers *buffers;
1879         int totquad;
1880         int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
1881
1882         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1883         buffers->grid_hidden = grid_hidden;
1884         buffers->totgrid = totgrid;
1885
1886         buffers->show_diffuse_color = FALSE;
1887
1888         /* Count the number of quads */
1889         totquad = gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1890
1891         if (totquad == fully_visible_totquad) {
1892                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1893                 buffers->has_hidden = 0;
1894         }
1895         else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1896                 /* Build new VBO */
1897                 glGenBuffersARB(1, &buffers->index_buf);
1898                 if (buffers->index_buf) {
1899                         buffers->tot_quad = totquad;
1900
1901                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1902
1903                         if (totgrid * gridsize * gridsize < USHRT_MAX) {
1904                                 buffers->index_type = GL_UNSIGNED_SHORT;
1905                                 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1906                         }
1907                         else {
1908                                 buffers->index_type = GL_UNSIGNED_INT;
1909                                 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1910                         }
1911
1912                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1913                 }
1914
1915                 buffers->has_hidden = 1;
1916         }
1917
1918         /* Build coord/normal VBO */
1919         if (buffers->index_buf)
1920                 glGenBuffersARB(1, &buffers->vert_buf);
1921
1922         return buffers;
1923 }
1924
1925 #undef FILL_QUAD_BUFFER
1926
1927 /* Output a BMVert into a VertexBufferFormat array
1928  *
1929  * The vertex is skipped if hidden, otherwise the output goes into
1930  * index '*v_index' in the 'vert_data' array and '*v_index' is
1931  * incremented.
1932  */
1933 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
1934                                           VertexBufferFormat *vert_data,
1935                                           int *v_index,
1936                                           const float fno[3],
1937                                           const float *fmask,
1938                                           const int cd_vert_mask_offset)
1939 {
1940         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
1941                 VertexBufferFormat *vd = &vert_data[*v_index];
1942
1943                 /* TODO: should use material color */
1944                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1945
1946                 /* Set coord, normal, and mask */
1947                 copy_v3_v3(vd->co, v->co);
1948                 normal_float_to_short_v3(vd->no, fno ? fno : v->no);
1949
1950                 gpu_color_from_mask_copy(
1951                         fmask ? *fmask :
1952                                 BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset),
1953                         diffuse_color,
1954                         vd->color);
1955                 
1956
1957                 /* Assign index for use in the triangle index buffer */
1958                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
1959
1960                 (*v_index)++;
1961         }
1962 }
1963
1964 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
1965 static int gpu_bmesh_vert_visible_count(GHash *bm_unique_verts,
1966                                                                                 GHash *bm_other_verts)
1967 {
1968         GHashIterator gh_iter;
1969         int totvert = 0;
1970
1971         GHASH_ITER (gh_iter, bm_unique_verts) {
1972                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1973                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1974                         totvert++;
1975         }
1976         GHASH_ITER (gh_iter, bm_other_verts) {
1977                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1978                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1979                         totvert++;
1980         }
1981
1982         return totvert;
1983 }
1984
1985 /* Return the total number of visible faces */
1986 static int gpu_bmesh_face_visible_count(GHash *bm_faces)
1987 {
1988         GHashIterator gh_iter;
1989         int totface = 0;
1990
1991         GHASH_ITER (gh_iter, bm_faces) {
1992                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
1993
1994                 if (!paint_is_bmesh_face_hidden(f))
1995                         totface++;
1996         }
1997
1998         return totface;
1999 }
2000
2001 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
2002  * shading, an element index buffer. */
2003 void GPU_update_bmesh_buffers(GPU_Buffers *buffers,
2004                                                           BMesh *bm,
2005                                                           GHash *bm_faces,
2006                                                           GHash *bm_unique_verts,
2007                                                           GHash *bm_other_verts)
2008 {
2009         VertexBufferFormat *vert_data;
2010         void *tri_data;
2011         int tottri, totvert, maxvert = 0;
2012
2013         /* TODO, make mask layer optional for bmesh buffer */
2014         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
2015
2016         if (!buffers->vert_buf || (buffers->smooth && !buffers->index_buf))
2017                 return;
2018
2019         /* Count visible triangles */
2020         tottri = gpu_bmesh_face_visible_count(bm_faces);
2021
2022         if (buffers->smooth) {
2023                 /* Count visible vertices */
2024                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
2025         }
2026         else
2027                 totvert = tottri * 3;
2028
2029         /* Initialize vertex buffer */
2030         glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2031         glBufferDataARB(GL_ARRAY_BUFFER_ARB,
2032                                         sizeof(VertexBufferFormat) * totvert,
2033                                         NULL, GL_STATIC_DRAW_ARB);
2034
2035         /* Fill vertex buffer */
2036         vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2037         if (vert_data) {
2038                 GHashIterator gh_iter;
2039                 int v_index = 0;
2040
2041                 if (buffers->smooth) {
2042                         /* Vertices get an index assigned for use in the triangle
2043                          * index buffer */
2044                         bm->elem_index_dirty |= BM_VERT;
2045
2046                         GHASH_ITER (gh_iter, bm_unique_verts) {
2047                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2048                                                               vert_data, &v_index, NULL, NULL,
2049                                                               cd_vert_mask_offset);
2050                         }
2051
2052                         GHASH_ITER (gh_iter, bm_other_verts) {
2053                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2054                                                               vert_data, &v_index, NULL, NULL,
2055                                                               cd_vert_mask_offset);
2056                         }
2057
2058                         maxvert = v_index;
2059                 }
2060                 else {
2061                         GHASH_ITER (gh_iter, bm_faces) {
2062                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2063
2064                                 BLI_assert(f->len == 3);
2065
2066                                 if (!paint_is_bmesh_face_hidden(f)) {
2067                                         BMVert *v[3];
2068                                         float fmask = 0;
2069                                         int i;
2070
2071                                         // BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void**)v, 3);
2072                                         BM_face_as_array_vert_tri(f, v);
2073
2074                                         /* Average mask value */
2075                                         for (i = 0; i < 3; i++) {
2076                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
2077                                         }
2078                                         fmask /= 3.0f;
2079                                         
2080                                         for (i = 0; i < 3; i++) {
2081                                                 gpu_bmesh_vert_to_buffer_copy(v[i], vert_data,
2082                                                                               &v_index, f->no, &fmask,
2083                                                                               cd_vert_mask_offset);
2084                                         }
2085                                 }
2086                         }
2087
2088                         buffers->tot_tri = tottri;
2089                 }
2090
2091                 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
2092         }
2093         else {
2094                 /* Memory map failed */
2095                 glDeleteBuffersARB(1, &buffers->vert_buf);
2096                 buffers->vert_buf = 0;
2097                 return;
2098         }
2099
2100         if (buffers->smooth) {
2101                 const int use_short = (maxvert < USHRT_MAX);
2102
2103                 /* Initialize triangle index buffer */
2104                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2105                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
2106                                                 (use_short ?
2107                                                  sizeof(unsigned short) :
2108                                                  sizeof(unsigned int)) * 3 * tottri,
2109                                                 NULL, GL_STATIC_DRAW_ARB);
2110
2111                 /* Fill triangle index buffer */
2112                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2113                 if (tri_data) {
2114                         GHashIterator gh_iter;
2115
2116                         GHASH_ITER (gh_iter, bm_faces) {
2117                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2118
2119                                 if (!paint_is_bmesh_face_hidden(f)) {
2120                                         BMLoop *l_iter;
2121                                         BMLoop *l_first;
2122
2123                                         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2124                                         do {
2125                                                 BMVert *v = l_iter->v;
2126                                                 if (use_short) {
2127                                                         unsigned short *elem = tri_data;
2128                                                         (*elem) = BM_elem_index_get(v);
2129                                                         elem++;
2130                                                         tri_data = elem;
2131                                                 }
2132                                                 else {
2133                                                         unsigned int *elem = tri_data;
2134                                                         (*elem) = BM_elem_index_get(v);
2135                                                         elem++;
2136                                                         tri_data = elem;
2137                                                 }
2138                                         } while ((l_iter = l_iter->next) != l_first);
2139                                 }
2140                         }
2141
2142                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
2143
2144                         buffers->tot_tri = tottri;
2145                         buffers->index_type = (use_short ?
2146                                                                    GL_UNSIGNED_SHORT :
2147                                                                    GL_UNSIGNED_INT);
2148                 }
2149                 else {
2150                         /* Memory map failed */
2151                         glDeleteBuffersARB(1, &buffers->index_buf);
2152                         buffers->index_buf = 0;
2153                 }
2154         }
2155 }
2156
2157 GPU_Buffers *GPU_build_bmesh_buffers(int smooth_shading)
2158 {
2159         GPU_Buffers *buffers;
2160
2161         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
2162         if (smooth_shading)
2163                 glGenBuffersARB(1, &buffers->index_buf);
2164         glGenBuffersARB(1, &buffers->vert_buf);
2165         buffers->use_bmesh = TRUE;
2166         buffers->smooth = smooth_shading;
2167
2168         return buffers;
2169 }
2170
2171 static void gpu_draw_buffers_legacy_mesh(GPU_Buffers *buffers)
2172 {
2173         const MVert *mvert = buffers->mvert;
2174         int i, j;
2175         const int has_mask = (buffers->vmask != NULL);
2176         const MFace *face = &buffers->mface[buffers->face_indices[0]];
2177         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2178
2179         if (buffers->show_diffuse_color)
2180                 GPU_material_diffuse_get(face->mat_nr + 1, diffuse_color);
2181
2182         if (has_mask) {
2183                 gpu_colors_enable(VBO_DISABLED);
2184         }
2185
2186         for (i = 0; i < buffers->totface; ++i) {
2187                 MFace *f = buffers->mface + buffers->face_indices[i];
2188                 int S = f->v4 ? 4 : 3;
2189                 unsigned int *fv = &f->v1;
2190
2191                 if (paint_is_face_hidden(f, buffers->mvert))
2192                         continue;
2193
2194                 glBegin((f->v4) ? GL_QUADS : GL_TRIANGLES);
2195
2196                 if (buffers->smooth) {
2197                         for (j = 0; j < S; j++) {
2198                                 if (has_mask) {
2199                                         gpu_color_from_mask_set(buffers->vmask[fv[j]], diffuse_color);
2200                                 }
2201                                 glNormal3sv(mvert[fv[j]].no);
2202                                 glVertex3fv(mvert[fv[j]].co);
2203                         }
2204                 }
2205                 else {
2206                         float fno[3];
2207
2208                         /* calculate face normal */
2209                         if (f->v4) {
2210                                 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
2211                                                mvert[fv[2]].co, mvert[fv[3]].co);
2212                         }
2213                         else
2214                                 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
2215                         glNormal3fv(fno);
2216
2217                         if (has_mask) {
2218                                 float fmask;
2219
2220                                 /* calculate face mask color */
2221                                 fmask = (buffers->vmask[fv[0]] +
2222                                          buffers->vmask[fv[1]] +
2223                                          buffers->vmask[fv[2]]);
2224                                 if (f->v4)
2225                                         fmask = (fmask + buffers->vmask[fv[3]]) * 0.25f;
2226                                 else
2227                                         fmask /= 3.0f;
2228                                 gpu_color_from_mask_set(fmask, diffuse_color);
2229                         }
2230                         
2231                         for (j = 0; j < S; j++)
2232                                 glVertex3fv(mvert[fv[j]].co);
2233                 }
2234                 
2235                 glEnd();
2236         }
2237
2238         if (has_mask) {
2239                 gpu_colors_disable(VBO_DISABLED);
2240         }
2241 }
2242
2243 static void gpu_draw_buffers_legacy_grids(GPU_Buffers *buffers)
2244 {
2245         const CCGKey *key = &buffers->gridkey;
2246         int i, j, x, y, gridsize = buffers->gridkey.grid_size;
2247         const int has_mask = key->has_mask;
2248         const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2249         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2250
2251         if (buffers->show_diffuse_color)
2252                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2253
2254         if (has_mask) {
2255                 gpu_colors_enable(VBO_DISABLED);
2256         }
2257
2258         for (i = 0; i < buffers->totgrid; ++i) {
2259                 int g = buffers->grid_indices[i];
2260                 CCGElem *grid = buffers->grids[g];
2261                 BLI_bitmap *gh = buffers->grid_hidden[g];
2262
2263                 /* TODO: could use strips with hiding as well */
2264
2265                 if (gh) {
2266                         glBegin(GL_QUADS);
2267                         
2268                         for (y = 0; y < gridsize - 1; y++) {
2269                                 for (x = 0; x < gridsize - 1; x++) {
2270                                         CCGElem *e[4] = {
2271                                                 CCG_grid_elem(key, grid, x + 1, y + 1),
2272                                                 CCG_grid_elem(key, grid, x + 1, y),
2273                                                 CCG_grid_elem(key, grid, x, y),
2274                                                 CCG_grid_elem(key, grid, x, y + 1)
2275                                         };
2276
2277                                         /* skip face if any of its corners are hidden */
2278                                         if (paint_is_grid_face_hidden(gh, gridsize, x, y))
2279                                                 continue;
2280
2281                                         if (buffers->smooth) {
2282                                                 for (j = 0; j < 4; j++) {
2283                                                         if (has_mask) {
2284                                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, e[j]), diffuse_color);
2285                                                         }
2286                                                         glNormal3fv(CCG_elem_no(key, e[j]));
2287                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2288                                                 }
2289                                         }
2290                                         else {
2291                                                 float fno[3];
2292                                                 normal_quad_v3(fno,
2293                                                                CCG_elem_co(key, e[0]),
2294                                                                CCG_elem_co(key, e[1]),
2295                                                                CCG_elem_co(key, e[2]),
2296                                                                CCG_elem_co(key, e[3]));
2297                                                 glNormal3fv(fno);
2298
2299                                                 if (has_mask) {
2300                                                         gpu_color_from_mask_quad_set(key, e[0], e[1], e[2], e[3], diffuse_color);
2301                                                 }
2302
2303                                                 for (j = 0; j < 4; j++)
2304                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2305                                         }
2306                                 }
2307                         }
2308
2309                         glEnd();
2310                 }
2311                 else if (buffers->smooth) {
2312                         for (y = 0; y < gridsize - 1; y++) {
2313                                 glBegin(GL_QUAD_STRIP);
2314                                 for (x = 0; x < gridsize; x++) {
2315                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2316                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2317
2318                                         if (has_mask) {
2319                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, a), diffuse_color);
2320                                         }
2321                                         glNormal3fv(CCG_elem_no(key, a));
2322                                         glVertex3fv(CCG_elem_co(key, a));
2323                                         if (has_mask) {
2324                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, b), diffuse_color);
2325                                         }
2326                                         glNormal3fv(CCG_elem_no(key, b));
2327                                         glVertex3fv(CCG_elem_co(key, b));
2328                                 }
2329                                 glEnd();
2330                         }
2331                 }
2332                 else {
2333                         for (y = 0; y < gridsize - 1; y++) {
2334                                 glBegin(GL_QUAD_STRIP);
2335                                 for (x = 0; x < gridsize; x++) {
2336                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2337                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2338
2339                                         if (x > 0) {
2340                                                 CCGElem *c = CCG_grid_elem(key, grid, x - 1, y);
2341                                                 CCGElem *d = CCG_grid_elem(key, grid, x - 1, y + 1);
2342
2343                                                 float fno[3];
2344                                                 normal_quad_v3(fno,
2345                                                                CCG_elem_co(key, d),
2346                                                                CCG_elem_co(key, b),
2347                                                                CCG_elem_co(key, a),
2348                                                                CCG_elem_co(key, c));
2349                                                 glNormal3fv(fno);
2350
2351                                                 if (has_mask) {
2352                                                         gpu_color_from_mask_quad_set(key, a, b, c, d, diffuse_color);
2353                                                 }
2354                                         }
2355
2356                                         glVertex3fv(CCG_elem_co(key, a));
2357                                         glVertex3fv(CCG_elem_co(key, b));
2358                                 }
2359                                 glEnd();
2360                         }
2361                 }
2362         }
2363
2364         if (has_mask) {
2365                 gpu_colors_disable(VBO_DISABLED);
2366         }
2367 }
2368
2369 void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial,
2370                                           int wireframe)
2371 {
2372         /* sets material from the first face, to solve properly face would need to
2373          * be sorted in buckets by materials */
2374         if (setMaterial) {
2375                 if (buffers->totface) {
2376                         const MFace *f = &buffers->mface[buffers->face_indices[0]];
2377                         if (!setMaterial(f->mat_nr + 1, NULL))
2378                                 return;
2379                 }
2380                 else if (buffers->totgrid) {
2381                         const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2382                         if (!setMaterial(f->mat_nr + 1, NULL))
2383                                 return;
2384                 }
2385                 else {
2386                         if (!setMaterial(1, NULL))
2387                                 return;
2388                 }
2389         }
2390
2391         glShadeModel((buffers->smooth || buffers->totface) ? GL_SMOOTH : GL_FLAT);
2392
2393         if (buffers->vert_buf) {
2394                 glEnableClientState(GL_VERTEX_ARRAY);
2395                 if (!wireframe) {
2396                         glEnableClientState(GL_NORMAL_ARRAY);
2397                         gpu_colors_enable(VBO_ENABLED);
2398                 }
2399
2400                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2401
2402                 if (buffers->index_buf)
2403                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2404
2405                 if (wireframe)
2406                         glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
2407
2408                 if (buffers->tot_quad) {
2409                         char *offset = 0;
2410                         int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
2411                         for (i = 0; i < last; i++) {
2412                                 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2413                                                 offset + offsetof(VertexBufferFormat, co));
2414                                 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2415                                                 offset + offsetof(VertexBufferFormat, no));
2416                                 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2417                                                offset + offsetof(VertexBufferFormat, color));
2418                                 
2419                                 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
2420
2421                                 offset += buffers->gridkey.grid_area * sizeof(VertexBufferFormat);
2422                         }
2423                 }
2424                 else {
2425                         int totelem = buffers->tot_tri * 3;
2426
2427                         glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2428                                         (void *)offsetof(VertexBufferFormat, co));
2429                         glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2430                                         (void *)offsetof(VertexBufferFormat, no));
2431                         glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2432                                        (void *)offsetof(VertexBufferFormat, color));
2433
2434                         if (buffers->index_buf)
2435                                 glDrawElements(GL_TRIANGLES, totelem, buffers->index_type, 0);
2436                         else
2437                                 glDrawArrays(GL_TRIANGLES, 0, totelem);
2438                 }
2439
2440                 if (wireframe)
2441                         glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
2442
2443                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
2444                 if (buffers->index_buf)
2445                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
2446
2447                 glDisableClientState(GL_VERTEX_ARRAY);
2448                 if (!wireframe) {
2449                         glDisableClientState(GL_NORMAL_ARRAY);
2450                         gpu_colors_disable(VBO_ENABLED);
2451                 }
2452         }
2453         /* fallbacks if we are out of memory or VBO is disabled */
2454         else if (buffers->totface) {
2455                 gpu_draw_buffers_legacy_mesh(buffers);
2456         }
2457         else if (buffers->totgrid) {
2458                 gpu_draw_buffers_legacy_grids(buffers);
2459         }
2460 }
2461
2462 int GPU_buffers_diffuse_changed(GPU_Buffers *buffers, int show_diffuse_color)
2463 {
2464         float diffuse_color[4];
2465
2466         if (buffers->show_diffuse_color != show_diffuse_color)
2467                 return TRUE;
2468
2469         if (buffers->show_diffuse_color == FALSE)
2470                 return FALSE;
2471
2472         if (buffers->mface) {
2473                 MFace *f = buffers->mface + buffers->face_indices[0];
2474
2475                 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
2476         }
2477         else {
2478                 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2479
2480                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2481         }
2482
2483         return diffuse_color[0] != buffers->diffuse_color[0] ||
2484                diffuse_color[1] != buffers->diffuse_color[1] ||
2485                diffuse_color[2] != buffers->diffuse_color[2];
2486 }
2487
2488 void GPU_free_buffers(GPU_Buffers *buffers)
2489 {
2490         if (buffers) {
2491                 if (buffers->vert_buf)
2492                         glDeleteBuffersARB(1, &buffers->vert_buf);
2493                 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
2494                         glDeleteBuffersARB(1, &buffers->index_buf);
2495
2496                 MEM_freeN(buffers);
2497         }
2498 }