Merging r58475 through r58700 from trunk into soc-2013-depsgraph_mt
[blender.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2005 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): Brecht Van Lommel.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/gpu/intern/gpu_buffers.c
29  *  \ingroup gpu
30  */
31
32
33 #include <limits.h>
34 #include <stddef.h>
35 #include <string.h>
36
37 #include "GL/glew.h"
38
39 #include "MEM_guardedalloc.h"
40
41 #include "BLI_bitmap.h"
42 #include "BLI_math.h"
43 #include "BLI_utildefines.h"
44 #include "BLI_ghash.h"
45 #include "BLI_threads.h"
46
47 #include "DNA_meshdata_types.h"
48 #include "DNA_material_types.h"
49
50 #include "BKE_ccg.h"
51 #include "BKE_DerivedMesh.h"
52 #include "BKE_paint.h"
53 #include "BKE_subsurf.h"
54
55 #include "DNA_userdef_types.h"
56
57 #include "GPU_buffers.h"
58 #include "GPU_draw.h"
59
60 #include "bmesh.h"
61
62 typedef enum {
63         GPU_BUFFER_VERTEX_STATE = 1,
64         GPU_BUFFER_NORMAL_STATE = 2,
65         GPU_BUFFER_TEXCOORD_STATE = 4,
66         GPU_BUFFER_COLOR_STATE = 8,
67         GPU_BUFFER_ELEMENT_STATE = 16,
68 } GPUBufferState;
69
70 #define MAX_GPU_ATTRIB_DATA 32
71
72 /* material number is an 16-bit signed short and the range (assume material number is non-negative) */
73 #define MAX_MATERIALS MAXMAT
74
75 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
76 static int useVBOs = -1;
77 static GPUBufferState GLStates = 0;
78 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
79
80 static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
81
82 /* stores recently-deleted buffers so that new buffers won't have to
83  * be recreated as often
84  *
85  * only one instance of this pool is created, stored in
86  * gpu_buffer_pool
87  *
88  * note that the number of buffers in the pool is usually limited to
89  * MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
90  * when a GPUBuffer is released outside the main thread; due to OpenGL
91  * restrictions it cannot be immediately released
92  */
93 typedef struct GPUBufferPool {
94         /* number of allocated buffers stored */
95         int totbuf;
96         /* actual allocated length of the array */
97         int maxsize;
98         GPUBuffer **buffers;
99 } GPUBufferPool;
100 #define MAX_FREE_GPU_BUFFERS 8
101
102 /* create a new GPUBufferPool */
103 static GPUBufferPool *gpu_buffer_pool_new(void)
104 {
105         GPUBufferPool *pool;
106
107         /* enable VBOs if supported */
108         if (useVBOs == -1)
109                 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
110
111         pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer_Pool");
112
113         pool->maxsize = MAX_FREE_GPU_BUFFERS;
114         pool->buffers = MEM_callocN(sizeof(GPUBuffer *) * pool->maxsize,
115                                     "GPUBuffer.buffers");
116
117         return pool;
118 }
119
120 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
121 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
122 {
123         int i;
124
125         if (!pool || index < 0 || index >= pool->totbuf)
126                 return;
127
128         /* shift entries down, overwriting the buffer at `index' */
129         for (i = index; i < pool->totbuf - 1; i++)
130                 pool->buffers[i] = pool->buffers[i + 1];
131
132         /* clear the last entry */
133         if (pool->totbuf > 0)
134                 pool->buffers[pool->totbuf - 1] = NULL;
135
136         pool->totbuf--;
137 }
138
139 /* delete the last entry in the pool */
140 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
141 {
142         GPUBuffer *last;
143
144         if (pool->totbuf <= 0)
145                 return;
146
147         /* get the last entry */
148         if (!(last = pool->buffers[pool->totbuf - 1]))
149                 return;
150
151         /* delete the buffer's data */
152         if (useVBOs)
153                 glDeleteBuffersARB(1, &last->id);
154         else
155                 MEM_freeN(last->pointer);
156
157         /* delete the buffer and remove from pool */
158         MEM_freeN(last);
159         pool->totbuf--;
160         pool->buffers[pool->totbuf] = NULL;
161 }
162
163 /* free a GPUBufferPool; also frees the data in the pool's
164  * GPUBuffers */
165 static void gpu_buffer_pool_free(GPUBufferPool *pool)
166 {
167         if (!pool)
168                 return;
169         
170         while (pool->totbuf)
171                 gpu_buffer_pool_delete_last(pool);
172
173         MEM_freeN(pool->buffers);
174         MEM_freeN(pool);
175 }
176
177 static GPUBufferPool *gpu_buffer_pool = NULL;
178 static GPUBufferPool *gpu_get_global_buffer_pool(void)
179 {
180         /* initialize the pool */
181         if (!gpu_buffer_pool)
182                 gpu_buffer_pool = gpu_buffer_pool_new();
183
184         return gpu_buffer_pool;
185 }
186
187 void GPU_global_buffer_pool_free(void)
188 {
189         gpu_buffer_pool_free(gpu_buffer_pool);
190         gpu_buffer_pool = NULL;
191 }
192
193 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
194  * pool if possible, otherwise creates a new one
195  *
196  * Thread-unsafe version for internal usage only.
197  */
198 static GPUBuffer *gpu_buffer_alloc_intern(int size)
199 {
200         GPUBufferPool *pool;
201         GPUBuffer *buf;
202         int i, bufsize, bestfit = -1;
203
204         /* bad case, leads to leak of buf since buf->pointer will allocate
205          * NULL, leading to return without cleanup. In any case better detect early
206          * psy-fi */
207         if (size == 0)
208                 return NULL;
209
210         pool = gpu_get_global_buffer_pool();
211
212         /* not sure if this buffer pool code has been profiled much,
213          * seems to me that the graphics driver and system memory
214          * management might do this stuff anyway. --nicholas
215          */
216
217         /* check the global buffer pool for a recently-deleted buffer
218          * that is at least as big as the request, but not more than
219          * twice as big */
220         for (i = 0; i < pool->totbuf; i++) {
221                 bufsize = pool->buffers[i]->size;
222
223                 /* check for an exact size match */
224                 if (bufsize == size) {
225                         bestfit = i;
226                         break;
227                 }
228                 /* smaller buffers won't fit data and buffers at least
229                  * twice as big are a waste of memory */
230                 else if (bufsize > size && size > (bufsize / 2)) {
231                         /* is it closer to the required size than the
232                          * last appropriate buffer found. try to save
233                          * memory */
234                         if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
235                                 bestfit = i;
236                         }
237                 }
238         }
239
240         /* if an acceptable buffer was found in the pool, remove it
241          * from the pool and return it */
242         if (bestfit != -1) {
243                 buf = pool->buffers[bestfit];
244                 gpu_buffer_pool_remove_index(pool, bestfit);
245                 return buf;
246         }
247
248         /* no acceptable buffer found in the pool, create a new one */
249         buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
250         buf->size = size;
251
252         if (useVBOs == 1) {
253                 /* create a new VBO and initialize it to the requested
254                  * size */
255                 glGenBuffersARB(1, &buf->id);
256                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
257                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
258                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
259         }
260         else {
261                 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
262                 
263                 /* purpose of this seems to be dealing with
264                  * out-of-memory errors? looks a bit iffy to me
265                  * though, at least on Linux I expect malloc() would
266                  * just overcommit. --nicholas */
267                 while (!buf->pointer && pool->totbuf > 0) {
268                         gpu_buffer_pool_delete_last(pool);
269                         buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
270                 }
271                 if (!buf->pointer)
272                         return NULL;
273         }
274
275         return buf;
276 }
277
278 /* Same as above, but safe for threading. */
279 GPUBuffer *GPU_buffer_alloc(int size)
280 {
281         GPUBuffer *buffer;
282
283         if (size == 0) {
284                 /* Early out, no lock needed in this case. */
285                 return NULL;
286         }
287
288         BLI_mutex_lock(&buffer_mutex);
289         buffer = gpu_buffer_alloc_intern(size);
290         BLI_mutex_unlock(&buffer_mutex);
291
292         return buffer;
293 }
294
295 /* release a GPUBuffer; does not free the actual buffer or its data,
296  * but rather moves it to the pool of recently-freed buffers for
297  * possible re-use
298  *
299  * Thread-unsafe version for internal usage only.
300  */
301 static void gpu_buffer_free_intern(GPUBuffer *buffer)
302 {
303         GPUBufferPool *pool;
304         int i;
305
306         if (!buffer)
307                 return;
308
309         pool = gpu_get_global_buffer_pool();
310
311         /* free the last used buffer in the queue if no more space, but only
312          * if we are in the main thread. for e.g. rendering or baking it can
313          * happen that we are in other thread and can't call OpenGL, in that
314          * case cleanup will be done GPU_buffer_pool_free_unused */
315         if (BLI_thread_is_main()) {
316                 /* in main thread, safe to decrease size of pool back
317                  * down to MAX_FREE_GPU_BUFFERS */
318                 while (pool->totbuf >= MAX_FREE_GPU_BUFFERS)
319                         gpu_buffer_pool_delete_last(pool);
320         }
321         else {
322                 /* outside of main thread, can't safely delete the
323                  * buffer, so increase pool size */
324                 if (pool->maxsize == pool->totbuf) {
325                         pool->maxsize += MAX_FREE_GPU_BUFFERS;
326                         pool->buffers = MEM_reallocN(pool->buffers,
327                                                      sizeof(GPUBuffer *) * pool->maxsize);
328                 }
329         }
330
331         /* shift pool entries up by one */
332         for (i = pool->totbuf; i > 0; i--)
333                 pool->buffers[i] = pool->buffers[i - 1];
334
335         /* insert the buffer into the beginning of the pool */
336         pool->buffers[0] = buffer;
337         pool->totbuf++;
338 }
339
340 /* Same as above, but safe for threading. */
341 void GPU_buffer_free(GPUBuffer *buffer)
342 {
343         if (!buffer) {
344                 /* Early output, no need to lock in this case, */
345                 return;
346         }
347
348         BLI_mutex_lock(&buffer_mutex);
349         gpu_buffer_free_intern(buffer);
350         BLI_mutex_unlock(&buffer_mutex);
351 }
352
353 typedef struct GPUVertPointLink {
354         struct GPUVertPointLink *next;
355         /* -1 means uninitialized */
356         int point_index;
357 } GPUVertPointLink;
358
359 /* add a new point to the list of points related to a particular
360  * vertex */
361 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
362 {
363         GPUVertPointLink *lnk;
364
365         lnk = &gdo->vert_points[vert_index];
366
367         /* if first link is in use, add a new link at the end */
368         if (lnk->point_index != -1) {
369                 /* get last link */
370                 for (; lnk->next; lnk = lnk->next) ;
371
372                 /* add a new link from the pool */
373                 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
374                 gdo->vert_points_usage++;
375         }
376
377         lnk->point_index = point_index;
378 }
379
380 /* update the vert_points and triangle_to_mface fields with a new
381  * triangle */
382 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
383                                         int base_point_index,
384                                         int face_index,
385                                         int v1, int v2, int v3)
386 {
387         int i, v[3] = {v1, v2, v3};
388         for (i = 0; i < 3; i++)
389                 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
390         gdo->triangle_to_mface[base_point_index / 3] = face_index;
391 }
392
393 /* for each vertex, build a list of points related to it; these lists
394  * are stored in an array sized to the number of vertices */
395 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
396 {
397         GPUBufferMaterial *mat;
398         int i, mat_orig_to_new[MAX_MATERIALS];
399
400         /* allocate the array and space for links */
401         gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
402                                        "GPUDrawObject.vert_points");
403         gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
404                                            "GPUDrawObject.vert_points_mem");
405         gdo->vert_points_usage = 0;
406
407         /* build a map from the original material indices to the new
408          * GPUBufferMaterial indices */
409         for (i = 0; i < gdo->totmaterial; i++)
410                 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
411
412         /* -1 indicates the link is not yet used */
413         for (i = 0; i < gdo->totvert; i++)
414                 gdo->vert_points[i].point_index = -1;
415
416         for (i = 0; i < totface; i++, f++) {
417                 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
418
419                 /* add triangle */
420                 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
421                                             i, f->v1, f->v2, f->v3);
422                 mat->totpoint += 3;
423
424                 /* add second triangle for quads */
425                 if (f->v4) {
426                         gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
427                                                     i, f->v3, f->v4, f->v1);
428                         mat->totpoint += 3;
429                 }
430         }
431
432         /* map any unused vertices to loose points */
433         for (i = 0; i < gdo->totvert; i++) {
434                 if (gdo->vert_points[i].point_index == -1) {
435                         gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
436                         gdo->tot_loose_point++;
437                 }
438         }
439 }
440
441 /* see GPUDrawObject's structure definition for a description of the
442  * data being initialized here */
443 GPUDrawObject *GPU_drawobject_new(DerivedMesh *dm)
444 {
445         GPUDrawObject *gdo;
446         MFace *mface;
447         int points_per_mat[MAX_MATERIALS];
448         int i, curmat, curpoint, totface;
449
450         mface = dm->getTessFaceArray(dm);
451         totface = dm->getNumTessFaces(dm);
452
453         /* get the number of points used by each material, treating
454          * each quad as two triangles */
455         memset(points_per_mat, 0, sizeof(int) * MAX_MATERIALS);
456         for (i = 0; i < totface; i++)
457                 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
458
459         /* create the GPUDrawObject */
460         gdo = MEM_callocN(sizeof(GPUDrawObject), "GPUDrawObject");
461         gdo->totvert = dm->getNumVerts(dm);
462         gdo->totedge = dm->getNumEdges(dm);
463
464         /* count the number of materials used by this DerivedMesh */
465         for (i = 0; i < MAX_MATERIALS; i++) {
466                 if (points_per_mat[i] > 0)
467                         gdo->totmaterial++;
468         }
469
470         /* allocate an array of materials used by this DerivedMesh */
471         gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
472                                      "GPUDrawObject.materials");
473
474         /* initialize the materials array */
475         for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
476                 if (points_per_mat[i] > 0) {
477                         gdo->materials[curmat].start = curpoint;
478                         gdo->materials[curmat].totpoint = 0;
479                         gdo->materials[curmat].mat_nr = i;
480
481                         curpoint += points_per_mat[i];
482                         curmat++;
483                 }
484         }
485
486         /* store total number of points used for triangles */
487         gdo->tot_triangle_point = curpoint;
488
489         gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
490                                              "GPUDrawObject.triangle_to_mface");
491
492         gpu_drawobject_init_vert_points(gdo, mface, totface);
493
494         return gdo;
495 }
496
497 void GPU_drawobject_free(DerivedMesh *dm)
498 {
499         GPUDrawObject *gdo;
500
501         if (!dm || !(gdo = dm->drawObject))
502                 return;
503
504         MEM_freeN(gdo->materials);
505         MEM_freeN(gdo->triangle_to_mface);
506         MEM_freeN(gdo->vert_points);
507         MEM_freeN(gdo->vert_points_mem);
508         GPU_buffer_free(gdo->points);
509         GPU_buffer_free(gdo->normals);
510         GPU_buffer_free(gdo->uv);
511         GPU_buffer_free(gdo->colors);
512         GPU_buffer_free(gdo->edges);
513         GPU_buffer_free(gdo->uvedges);
514
515         MEM_freeN(gdo);
516         dm->drawObject = NULL;
517 }
518
519 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
520                                   int *mat_orig_to_new, void *user_data);
521
522 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
523                                    int vector_size, int size, GLenum target,
524                                    void *user, GPUBufferCopyFunc copy_f)
525 {
526         GPUBufferPool *pool;
527         GPUBuffer *buffer;
528         float *varray;
529         int mat_orig_to_new[MAX_MATERIALS];
530         int *cur_index_per_mat;
531         int i;
532         int success;
533         GLboolean uploaded;
534
535         pool = gpu_get_global_buffer_pool();
536
537         BLI_mutex_lock(&buffer_mutex);
538
539         /* alloc a GPUBuffer; fall back to legacy mode on failure */
540         if (!(buffer = gpu_buffer_alloc_intern(size)))
541                 dm->drawObject->legacy = 1;
542
543         /* nothing to do for legacy mode */
544         if (dm->drawObject->legacy) {
545                 BLI_mutex_unlock(&buffer_mutex);
546                 return NULL;
547         }
548
549         cur_index_per_mat = MEM_mallocN(sizeof(int) * object->totmaterial,
550                                         "GPU_buffer_setup.cur_index_per_mat");
551         for (i = 0; i < object->totmaterial; i++) {
552                 /* for each material, the current index to copy data to */
553                 cur_index_per_mat[i] = object->materials[i].start * vector_size;
554
555                 /* map from original material index to new
556                  * GPUBufferMaterial index */
557                 mat_orig_to_new[object->materials[i].mat_nr] = i;
558         }
559
560         if (useVBOs) {
561                 success = 0;
562
563                 while (!success) {
564                         /* bind the buffer and discard previous data,
565                          * avoids stalling gpu */
566                         glBindBufferARB(target, buffer->id);
567                         glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
568
569                         /* attempt to map the buffer */
570                         if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
571                                 /* failed to map the buffer; delete it */
572                                 gpu_buffer_free_intern(buffer);
573                                 gpu_buffer_pool_delete_last(pool);
574                                 buffer = NULL;
575
576                                 /* try freeing an entry from the pool
577                                  * and reallocating the buffer */
578                                 if (pool->totbuf > 0) {
579                                         gpu_buffer_pool_delete_last(pool);
580                                         buffer = gpu_buffer_alloc_intern(size);
581                                 }
582
583                                 /* allocation still failed; fall back
584                                  * to legacy mode */
585                                 if (!buffer) {
586                                         dm->drawObject->legacy = 1;
587                                         success = 1;
588                                 }
589                         }
590                         else {
591                                 success = 1;
592                         }
593                 }
594
595                 /* check legacy fallback didn't happen */
596                 if (dm->drawObject->legacy == 0) {
597                         uploaded = GL_FALSE;
598                         /* attempt to upload the data to the VBO */
599                         while (uploaded == GL_FALSE) {
600                                 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
601                                 /* glUnmapBuffer returns GL_FALSE if
602                                  * the data store is corrupted; retry
603                                  * in that case */
604                                 uploaded = glUnmapBufferARB(target);
605                         }
606                 }
607                 glBindBufferARB(target, 0);
608         }
609         else {
610                 /* VBO not supported, use vertex array fallback */
611                 if (buffer->pointer) {
612                         varray = buffer->pointer;
613                         (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
614                 }
615                 else {
616                         dm->drawObject->legacy = 1;
617                 }
618         }
619
620         MEM_freeN(cur_index_per_mat);
621
622         BLI_mutex_unlock(&buffer_mutex);
623
624         return buffer;
625 }
626
627 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
628 {
629         MVert *mvert;
630         MFace *f;
631         int i, j, start, totface;
632
633         mvert = dm->getVertArray(dm);
634         f = dm->getTessFaceArray(dm);
635
636         totface = dm->getNumTessFaces(dm);
637         for (i = 0; i < totface; i++, f++) {
638                 start = index[mat_orig_to_new[f->mat_nr]];
639
640                 /* v1 v2 v3 */
641                 copy_v3_v3(&varray[start], mvert[f->v1].co);
642                 copy_v3_v3(&varray[start + 3], mvert[f->v2].co);
643                 copy_v3_v3(&varray[start + 6], mvert[f->v3].co);
644                 index[mat_orig_to_new[f->mat_nr]] += 9;
645
646                 if (f->v4) {
647                         /* v3 v4 v1 */
648                         copy_v3_v3(&varray[start + 9], mvert[f->v3].co);
649                         copy_v3_v3(&varray[start + 12], mvert[f->v4].co);
650                         copy_v3_v3(&varray[start + 15], mvert[f->v1].co);
651                         index[mat_orig_to_new[f->mat_nr]] += 9;
652                 }
653         }
654
655         /* copy loose points */
656         j = dm->drawObject->tot_triangle_point * 3;
657         for (i = 0; i < dm->drawObject->totvert; i++) {
658                 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
659                         copy_v3_v3(&varray[j], mvert[i].co);
660                         j += 3;
661                 }
662         }
663 }
664
665 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
666 {
667         int i, totface;
668         int start;
669         float f_no[3];
670
671         float *nors = dm->getTessFaceDataArray(dm, CD_NORMAL);
672         MVert *mvert = dm->getVertArray(dm);
673         MFace *f = dm->getTessFaceArray(dm);
674
675         totface = dm->getNumTessFaces(dm);
676         for (i = 0; i < totface; i++, f++) {
677                 const int smoothnormal = (f->flag & ME_SMOOTH);
678
679                 start = index[mat_orig_to_new[f->mat_nr]];
680                 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
681
682                 if (smoothnormal) {
683                         /* copy vertex normal */
684                         normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
685                         normal_short_to_float_v3(&varray[start + 3], mvert[f->v2].no);
686                         normal_short_to_float_v3(&varray[start + 6], mvert[f->v3].no);
687
688                         if (f->v4) {
689                                 normal_short_to_float_v3(&varray[start + 9], mvert[f->v3].no);
690                                 normal_short_to_float_v3(&varray[start + 12], mvert[f->v4].no);
691                                 normal_short_to_float_v3(&varray[start + 15], mvert[f->v1].no);
692                         }
693                 }
694                 else if (nors) {
695                         /* copy cached face normal */
696                         copy_v3_v3(&varray[start], &nors[i * 3]);
697                         copy_v3_v3(&varray[start + 3], &nors[i * 3]);
698                         copy_v3_v3(&varray[start + 6], &nors[i * 3]);
699
700                         if (f->v4) {
701                                 copy_v3_v3(&varray[start + 9], &nors[i * 3]);
702                                 copy_v3_v3(&varray[start + 12], &nors[i * 3]);
703                                 copy_v3_v3(&varray[start + 15], &nors[i * 3]);
704                         }
705                 }
706                 else {
707                         /* calculate face normal */
708                         if (f->v4)
709                                 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
710                         else
711                                 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
712
713                         copy_v3_v3(&varray[start], f_no);
714                         copy_v3_v3(&varray[start + 3], f_no);
715                         copy_v3_v3(&varray[start + 6], f_no);
716
717                         if (f->v4) {
718                                 copy_v3_v3(&varray[start + 9], f_no);
719                                 copy_v3_v3(&varray[start + 12], f_no);
720                                 copy_v3_v3(&varray[start + 15], f_no);
721                         }
722                 }
723         }
724 }
725
726 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
727 {
728         int start;
729         int i, totface;
730
731         MTFace *mtface;
732         MFace *f;
733
734         if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
735                 return;
736         f = dm->getTessFaceArray(dm);
737                 
738         totface = dm->getNumTessFaces(dm);
739         for (i = 0; i < totface; i++, f++) {
740                 start = index[mat_orig_to_new[f->mat_nr]];
741
742                 /* v1 v2 v3 */
743                 copy_v2_v2(&varray[start], mtface[i].uv[0]);
744                 copy_v2_v2(&varray[start + 2], mtface[i].uv[1]);
745                 copy_v2_v2(&varray[start + 4], mtface[i].uv[2]);
746                 index[mat_orig_to_new[f->mat_nr]] += 6;
747
748                 if (f->v4) {
749                         /* v3 v4 v1 */
750                         copy_v2_v2(&varray[start + 6], mtface[i].uv[2]);
751                         copy_v2_v2(&varray[start + 8], mtface[i].uv[3]);
752                         copy_v2_v2(&varray[start + 10], mtface[i].uv[0]);
753                         index[mat_orig_to_new[f->mat_nr]] += 6;
754                 }
755         }
756 }
757
758 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
759 {
760         v[0] = col[3];
761         v[1] = col[2];
762         v[2] = col[1];
763 }
764
765 /* treat varray_ as an array of MCol, four MCol's per face */
766 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
767 {
768         int i, totface;
769         unsigned char *varray = (unsigned char *)varray_;
770         unsigned char *mcol = (unsigned char *)user;
771         MFace *f = dm->getTessFaceArray(dm);
772
773         totface = dm->getNumTessFaces(dm);
774         for (i = 0; i < totface; i++, f++) {
775                 int start = index[mat_orig_to_new[f->mat_nr]];
776
777                 /* v1 v2 v3 */
778                 copy_mcol_uc3(&varray[start], &mcol[i * 16]);
779                 copy_mcol_uc3(&varray[start + 3], &mcol[i * 16 + 4]);
780                 copy_mcol_uc3(&varray[start + 6], &mcol[i * 16 + 8]);
781                 index[mat_orig_to_new[f->mat_nr]] += 9;
782
783                 if (f->v4) {
784                         /* v3 v4 v1 */
785                         copy_mcol_uc3(&varray[start + 9], &mcol[i * 16 + 8]);
786                         copy_mcol_uc3(&varray[start + 12], &mcol[i * 16 + 12]);
787                         copy_mcol_uc3(&varray[start + 15], &mcol[i * 16]);
788                         index[mat_orig_to_new[f->mat_nr]] += 9;
789                 }
790         }
791 }
792
793 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
794 {
795         MEdge *medge;
796         unsigned int *varray = (unsigned int *)varray_;
797         int i, totedge;
798  
799         medge = dm->getEdgeArray(dm);
800         totedge = dm->getNumEdges(dm);
801
802         for (i = 0; i < totedge; i++, medge++) {
803                 varray[i * 2] = dm->drawObject->vert_points[medge->v1].point_index;
804                 varray[i * 2 + 1] = dm->drawObject->vert_points[medge->v2].point_index;
805         }
806 }
807
808 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
809 {
810         MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
811         int i, j = 0;
812
813         if (!tf)
814                 return;
815
816         for (i = 0; i < dm->numTessFaceData; i++, tf++) {
817                 MFace mf;
818                 dm->getTessFace(dm, i, &mf);
819
820                 copy_v2_v2(&varray[j], tf->uv[0]);
821                 copy_v2_v2(&varray[j + 2], tf->uv[1]);
822
823                 copy_v2_v2(&varray[j + 4], tf->uv[1]);
824                 copy_v2_v2(&varray[j + 6], tf->uv[2]);
825
826                 if (!mf.v4) {
827                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
828                         copy_v2_v2(&varray[j + 10], tf->uv[0]);
829                         j += 12;
830                 }
831                 else {
832                         copy_v2_v2(&varray[j + 8], tf->uv[2]);
833                         copy_v2_v2(&varray[j + 10], tf->uv[3]);
834
835                         copy_v2_v2(&varray[j + 12], tf->uv[3]);
836                         copy_v2_v2(&varray[j + 14], tf->uv[0]);
837                         j += 16;
838                 }
839         }
840 }
841
842 typedef enum {
843         GPU_BUFFER_VERTEX = 0,
844         GPU_BUFFER_NORMAL,
845         GPU_BUFFER_COLOR,
846         GPU_BUFFER_UV,
847         GPU_BUFFER_EDGE,
848         GPU_BUFFER_UVEDGE,
849 } GPUBufferType;
850
851 typedef struct {
852         GPUBufferCopyFunc copy;
853         GLenum gl_buffer_type;
854         int vector_size;
855 } GPUBufferTypeSettings;
856
857 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
858         {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
859         {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
860         {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
861         {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
862         {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
863         {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
864 };
865
866 /* get the GPUDrawObject buffer associated with a type */
867 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
868 {
869         switch (type) {
870                 case GPU_BUFFER_VERTEX:
871                         return &gdo->points;
872                 case GPU_BUFFER_NORMAL:
873                         return &gdo->normals;
874                 case GPU_BUFFER_COLOR:
875                         return &gdo->colors;
876                 case GPU_BUFFER_UV:
877                         return &gdo->uv;
878                 case GPU_BUFFER_EDGE:
879                         return &gdo->edges;
880                 case GPU_BUFFER_UVEDGE:
881                         return &gdo->uvedges;
882                 default:
883                         return NULL;
884         }
885 }
886
887 /* get the amount of space to allocate for a buffer of a particular type */
888 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
889 {
890         switch (type) {
891                 case GPU_BUFFER_VERTEX:
892                         return sizeof(float) * 3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
893                 case GPU_BUFFER_NORMAL:
894                         return sizeof(float) * 3 * dm->drawObject->tot_triangle_point;
895                 case GPU_BUFFER_COLOR:
896                         return sizeof(char) * 3 * dm->drawObject->tot_triangle_point;
897                 case GPU_BUFFER_UV:
898                         return sizeof(float) * 2 * dm->drawObject->tot_triangle_point;
899                 case GPU_BUFFER_EDGE:
900                         return sizeof(int) * 2 * dm->drawObject->totedge;
901                 case GPU_BUFFER_UVEDGE:
902                         /* each face gets 3 points, 3 edges per triangle, and
903                          * each edge has its own, non-shared coords, so each
904                          * tri corner needs minimum of 4 floats, quads used
905                          * less so here we can over allocate and assume all
906                          * tris. */
907                         return sizeof(float) * 4 * dm->drawObject->tot_triangle_point;
908                 default:
909                         return -1;
910         }
911 }
912
913 /* call gpu_buffer_setup with settings for a particular type of buffer */
914 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
915 {
916         const GPUBufferTypeSettings *ts;
917         void *user_data = NULL;
918         GPUBuffer *buf;
919
920         ts = &gpu_buffer_type_settings[type];
921
922         /* special handling for MCol and UV buffers */
923         if (type == GPU_BUFFER_COLOR) {
924                 if (!(user_data = DM_get_tessface_data_layer(dm, dm->drawObject->colType)))
925                         return NULL;
926         }
927         else if (type == GPU_BUFFER_UV) {
928                 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
929                         return NULL;
930         }
931
932         buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
933                                gpu_buffer_size_from_type(dm, type),
934                                ts->gl_buffer_type, user_data, ts->copy);
935
936         return buf;
937 }
938
939 /* get the buffer of `type', initializing the GPUDrawObject and
940  * buffer if needed */
941 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
942 {
943         GPUBuffer **buf;
944
945         if (!dm->drawObject)
946                 dm->drawObject = GPU_drawobject_new(dm);
947
948         buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
949         if (!(*buf))
950                 *buf = gpu_buffer_setup_type(dm, type);
951
952         return *buf;
953 }
954
955 void GPU_vertex_setup(DerivedMesh *dm)
956 {
957         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
958                 return;
959
960         glEnableClientState(GL_VERTEX_ARRAY);
961         if (useVBOs) {
962                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
963                 glVertexPointer(3, GL_FLOAT, 0, 0);
964         }
965         else {
966                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
967         }
968         
969         GLStates |= GPU_BUFFER_VERTEX_STATE;
970 }
971
972 void GPU_normal_setup(DerivedMesh *dm)
973 {
974         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
975                 return;
976
977         glEnableClientState(GL_NORMAL_ARRAY);
978         if (useVBOs) {
979                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
980                 glNormalPointer(GL_FLOAT, 0, 0);
981         }
982         else {
983                 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
984         }
985
986         GLStates |= GPU_BUFFER_NORMAL_STATE;
987 }
988
989 void GPU_uv_setup(DerivedMesh *dm)
990 {
991         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
992                 return;
993
994         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
995         if (useVBOs) {
996                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
997                 glTexCoordPointer(2, GL_FLOAT, 0, 0);
998         }
999         else {
1000                 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
1001         }
1002
1003         GLStates |= GPU_BUFFER_TEXCOORD_STATE;
1004 }
1005
1006 void GPU_color_setup(DerivedMesh *dm, int colType)
1007 {
1008         if (!dm->drawObject) {
1009                 /* XXX Not really nice, but we need a valid gpu draw object to set the colType...
1010                  *     Else we would have to add a new param to gpu_buffer_setup_common. */
1011                 dm->drawObject = GPU_drawobject_new(dm);
1012                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1013                 dm->drawObject->colType = colType;
1014         }
1015         /* In paint mode, dm may stay the same during stroke, however we still want to update colors!
1016          * Also check in case we changed color type (i.e. which MCol cdlayer we use). */
1017         else if ((dm->dirty & DM_DIRTY_MCOL_UPDATE_DRAW) || (colType != dm->drawObject->colType)) {
1018                 GPUBuffer **buf = gpu_drawobject_buffer_from_type(dm->drawObject, GPU_BUFFER_COLOR);
1019                 /* XXX Freeing this buffer is a bit stupid, as geometry has not changed, size should remain the same.
1020                  *     Not sure though it would be worth defining a sort of gpu_buffer_update func - nor whether
1021                  *     it is even possible ! */
1022                 GPU_buffer_free(*buf);
1023                 *buf = NULL;
1024                 dm->dirty &= ~DM_DIRTY_MCOL_UPDATE_DRAW;
1025                 dm->drawObject->colType = colType;
1026         }
1027
1028         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
1029                 return;
1030
1031         glEnableClientState(GL_COLOR_ARRAY);
1032         if (useVBOs) {
1033                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1034                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1035         }
1036         else {
1037                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1038         }
1039
1040         GLStates |= GPU_BUFFER_COLOR_STATE;
1041 }
1042
1043 void GPU_edge_setup(DerivedMesh *dm)
1044 {
1045         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1046                 return;
1047
1048         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1049                 return;
1050
1051         glEnableClientState(GL_VERTEX_ARRAY);
1052         if (useVBOs) {
1053                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1054                 glVertexPointer(3, GL_FLOAT, 0, 0);
1055         }
1056         else {
1057                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1058         }
1059         
1060         GLStates |= GPU_BUFFER_VERTEX_STATE;
1061
1062         if (useVBOs)
1063                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1064
1065         GLStates |= GPU_BUFFER_ELEMENT_STATE;
1066 }
1067
1068 void GPU_uvedge_setup(DerivedMesh *dm)
1069 {
1070         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1071                 return;
1072
1073         glEnableClientState(GL_VERTEX_ARRAY);
1074         if (useVBOs) {
1075                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1076                 glVertexPointer(2, GL_FLOAT, 0, 0);
1077         }
1078         else {
1079                 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1080         }
1081         
1082         GLStates |= GPU_BUFFER_VERTEX_STATE;
1083 }
1084
1085 static int GPU_typesize(int type)
1086 {
1087         switch (type) {
1088                 case GL_FLOAT:
1089                         return sizeof(float);
1090                 case GL_INT:
1091                         return sizeof(int);
1092                 case GL_UNSIGNED_INT:
1093                         return sizeof(unsigned int);
1094                 case GL_BYTE:
1095                         return sizeof(char);
1096                 case GL_UNSIGNED_BYTE:
1097                         return sizeof(unsigned char);
1098                 default:
1099                         return 0;
1100         }
1101 }
1102
1103 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1104 {
1105         int i, elementsize = 0;
1106
1107         for (i = 0; i < numdata; i++) {
1108                 int typesize = GPU_typesize(data[i].type);
1109                 if (typesize != 0)
1110                         elementsize += typesize * data[i].size;
1111         }
1112         return elementsize;
1113 }
1114
1115 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1116 {
1117         int i;
1118         int elementsize;
1119         intptr_t offset = 0;
1120
1121         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1122                 if (attribData[i].index != -1) {
1123                         glDisableVertexAttribArrayARB(attribData[i].index);
1124                 }
1125                 else
1126                         break;
1127         }
1128         elementsize = GPU_attrib_element_size(data, numdata);
1129
1130         if (useVBOs) {
1131                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1132                 for (i = 0; i < numdata; i++) {
1133                         glEnableVertexAttribArrayARB(data[i].index);
1134                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1135                                                  GL_FALSE, elementsize, (void *)offset);
1136                         offset += data[i].size * GPU_typesize(data[i].type);
1137
1138                         attribData[i].index = data[i].index;
1139                         attribData[i].size = data[i].size;
1140                         attribData[i].type = data[i].type;
1141                 }
1142                 attribData[numdata].index = -1;
1143         }
1144         else {
1145                 for (i = 0; i < numdata; i++) {
1146                         glEnableVertexAttribArrayARB(data[i].index);
1147                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1148                                                  GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1149                         offset += data[i].size * GPU_typesize(data[i].type);
1150                 }
1151         }
1152 }
1153
1154
1155 void GPU_buffer_unbind(void)
1156 {
1157         int i;
1158
1159         if (GLStates & GPU_BUFFER_VERTEX_STATE)
1160                 glDisableClientState(GL_VERTEX_ARRAY);
1161         if (GLStates & GPU_BUFFER_NORMAL_STATE)
1162                 glDisableClientState(GL_NORMAL_ARRAY);
1163         if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1164                 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1165         if (GLStates & GPU_BUFFER_COLOR_STATE)
1166                 glDisableClientState(GL_COLOR_ARRAY);
1167         if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1168                 if (useVBOs) {
1169                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1170                 }
1171         }
1172         GLStates &= ~(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1173                       GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1174                       GPU_BUFFER_ELEMENT_STATE);
1175
1176         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1177                 if (attribData[i].index != -1) {
1178                         glDisableVertexAttribArrayARB(attribData[i].index);
1179                 }
1180                 else
1181                         break;
1182         }
1183
1184         if (useVBOs)
1185                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1186 }
1187
1188 void GPU_color_switch(int mode)
1189 {
1190         if (mode) {
1191                 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1192                         glEnableClientState(GL_COLOR_ARRAY);
1193                 GLStates |= GPU_BUFFER_COLOR_STATE;
1194         }
1195         else {
1196                 if (GLStates & GPU_BUFFER_COLOR_STATE)
1197                         glDisableClientState(GL_COLOR_ARRAY);
1198                 GLStates &= ~GPU_BUFFER_COLOR_STATE;
1199         }
1200 }
1201
1202 /* return 1 if drawing should be done using old immediate-mode
1203  * code, 0 otherwise */
1204 int GPU_buffer_legacy(DerivedMesh *dm)
1205 {
1206         int test = (U.gameflags & USER_DISABLE_VBO);
1207         if (test)
1208                 return 1;
1209
1210         if (dm->drawObject == 0)
1211                 dm->drawObject = GPU_drawobject_new(dm);
1212         return dm->drawObject->legacy;
1213 }
1214
1215 void *GPU_buffer_lock(GPUBuffer *buffer)
1216 {
1217         float *varray;
1218
1219         if (!buffer)
1220                 return 0;
1221
1222         if (useVBOs) {
1223                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1224                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1225                 return varray;
1226         }
1227         else {
1228                 return buffer->pointer;
1229         }
1230 }
1231
1232 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1233 {
1234         float *varray;
1235
1236         if (!buffer)
1237                 return 0;
1238
1239         if (useVBOs) {
1240                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1241                 /* discard previous data, avoid stalling gpu */
1242                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1243                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1244                 return varray;
1245         }
1246         else {
1247                 return buffer->pointer;
1248         }
1249 }
1250
1251 void GPU_buffer_unlock(GPUBuffer *buffer)
1252 {
1253         if (useVBOs) {
1254                 if (buffer) {
1255                         /* note: this operation can fail, could return
1256                          * an error code from this function? */
1257                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1258                 }
1259                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1260         }
1261 }
1262
1263 /* used for drawing edges */
1264 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1265 {
1266         glDrawElements(mode, count, GL_UNSIGNED_INT,
1267                        (useVBOs ?
1268                         (void *)(start * sizeof(unsigned int)) :
1269                         ((int *)elements->pointer) + start));
1270 }
1271
1272
1273 /* XXX: the rest of the code in this file is used for optimized PBVH
1274  * drawing and doesn't interact at all with the buffer code above */
1275
1276 /* Return false if VBO is either unavailable or disabled by the user,
1277  * true otherwise */
1278 static int gpu_vbo_enabled(void)
1279 {
1280         return (GLEW_ARB_vertex_buffer_object &&
1281                 !(U.gameflags & USER_DISABLE_VBO));
1282 }
1283
1284 /* Convenience struct for building the VBO. */
1285 typedef struct {
1286         float co[3];
1287         short no[3];
1288
1289         /* inserting this to align the 'color' field to a four-byte
1290          * boundary; drastically increases viewport performance on my
1291          * drivers (Gallium/Radeon) --nicholasbishop */
1292         char pad[2];
1293         
1294         unsigned char color[3];
1295 } VertexBufferFormat;
1296
1297 struct GPU_Buffers {
1298         /* opengl buffer handles */
1299         GLuint vert_buf, index_buf;
1300         GLenum index_type;
1301
1302         /* mesh pointers in case buffer allocation fails */
1303         MFace *mface;
1304         MVert *mvert;
1305         int *face_indices;
1306         int totface;
1307         const float *vmask;
1308
1309         /* grid pointers */
1310         CCGKey gridkey;
1311         CCGElem **grids;
1312         const DMFlagMat *grid_flag_mats;
1313         BLI_bitmap * const *grid_hidden;
1314         int *grid_indices;
1315         int totgrid;
1316         int has_hidden;
1317
1318         int use_bmesh;
1319
1320         unsigned int tot_tri, tot_quad;
1321
1322         /* The PBVH ensures that either all faces in the node are
1323          * smooth-shaded or all faces are flat-shaded */
1324         int smooth;
1325
1326         int show_diffuse_color;
1327         float diffuse_color[4];
1328 };
1329 typedef enum {
1330         VBO_ENABLED,
1331         VBO_DISABLED
1332 } VBO_State;
1333
1334 static void gpu_colors_enable(VBO_State vbo_state)
1335 {
1336         glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
1337         glEnable(GL_COLOR_MATERIAL);
1338         if (vbo_state == VBO_ENABLED)
1339                 glEnableClientState(GL_COLOR_ARRAY);
1340 }
1341
1342 static void gpu_colors_disable(VBO_State vbo_state)
1343 {
1344         glDisable(GL_COLOR_MATERIAL);
1345         if (vbo_state == VBO_ENABLED)
1346                 glDisableClientState(GL_COLOR_ARRAY);
1347 }
1348
1349 static float gpu_color_from_mask(float mask)
1350 {
1351         return 1.0f - mask * 0.75f;
1352 }
1353
1354 static void gpu_color_from_mask_copy(float mask, const float diffuse_color[4], unsigned char out[3])
1355 {
1356         float mask_color;
1357
1358         mask_color = gpu_color_from_mask(mask) * 255.0f;
1359
1360         out[0] = diffuse_color[0] * mask_color;
1361         out[1] = diffuse_color[1] * mask_color;
1362         out[2] = diffuse_color[2] * mask_color;
1363 }
1364
1365 static void gpu_color_from_mask_set(float mask, float diffuse_color[4])
1366 {
1367         float color = gpu_color_from_mask(mask);
1368         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1369 }
1370
1371 static float gpu_color_from_mask_quad(const CCGKey *key,
1372                                       CCGElem *a, CCGElem *b,
1373                                       CCGElem *c, CCGElem *d)
1374 {
1375         return gpu_color_from_mask((*CCG_elem_mask(key, a) +
1376                                     *CCG_elem_mask(key, b) +
1377                                     *CCG_elem_mask(key, c) +
1378                                     *CCG_elem_mask(key, d)) * 0.25f);
1379 }
1380
1381 static void gpu_color_from_mask_quad_copy(const CCGKey *key,
1382                                           CCGElem *a, CCGElem *b,
1383                                           CCGElem *c, CCGElem *d,
1384                                           const float *diffuse_color,
1385                                           unsigned char out[3])
1386 {
1387         float mask_color =
1388             gpu_color_from_mask((*CCG_elem_mask(key, a) +
1389                                  *CCG_elem_mask(key, b) +
1390                                  *CCG_elem_mask(key, c) +
1391                                  *CCG_elem_mask(key, d)) * 0.25f) * 255.0f;
1392
1393         out[0] = diffuse_color[0] * mask_color;
1394         out[1] = diffuse_color[1] * mask_color;
1395         out[2] = diffuse_color[2] * mask_color;
1396 }
1397
1398 static void gpu_color_from_mask_quad_set(const CCGKey *key,
1399                                          CCGElem *a, CCGElem *b,
1400                                          CCGElem *c, CCGElem *d,
1401                                          float diffuse_color[4])
1402 {
1403         float color = gpu_color_from_mask_quad(key, a, b, c, d);
1404         glColor3f(diffuse_color[0] * color, diffuse_color[1] * color, diffuse_color[2] * color);
1405 }
1406
1407 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert,
1408                              int *vert_indices, int totvert, const float *vmask,
1409                              int (*face_vert_indices)[4], int show_diffuse_color)
1410 {
1411         VertexBufferFormat *vert_data;
1412         int i, j, k;
1413
1414         buffers->vmask = vmask;
1415         buffers->show_diffuse_color = show_diffuse_color;
1416
1417         if (buffers->vert_buf) {
1418                 int totelem = (buffers->smooth ? totvert : (buffers->tot_tri * 3));
1419                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1420
1421                 if (buffers->show_diffuse_color) {
1422                         MFace *f = buffers->mface + buffers->face_indices[0];
1423
1424                         GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
1425                 }
1426
1427                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1428
1429                 /* Build VBO */
1430                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1431                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1432                                                 sizeof(VertexBufferFormat) * totelem,
1433                                                 NULL, GL_STATIC_DRAW_ARB);
1434
1435                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1436
1437                 if (vert_data) {
1438                         /* Vertex data is shared if smooth-shaded, but separate
1439                          * copies are made for flat shading because normals
1440                          * shouldn't be shared. */
1441                         if (buffers->smooth) {
1442                                 for (i = 0; i < totvert; ++i) {
1443                                         MVert *v = mvert + vert_indices[i];
1444                                         VertexBufferFormat *out = vert_data + i;
1445
1446                                         copy_v3_v3(out->co, v->co);
1447                                         memcpy(out->no, v->no, sizeof(short) * 3);
1448                                 }
1449
1450 #define UPDATE_VERTEX(face, vertex, index, diffuse_color) \
1451                                 { \
1452                                         VertexBufferFormat *out = vert_data + face_vert_indices[face][index]; \
1453                                         if (vmask) \
1454                                                 gpu_color_from_mask_copy(vmask[vertex], diffuse_color, out->color); \
1455                                         else \
1456                                                 rgb_float_to_uchar(out->color, diffuse_color); \
1457                                 } (void)0
1458
1459                                 for (i = 0; i < buffers->totface; i++) {
1460                                         MFace *f = buffers->mface + buffers->face_indices[i];
1461
1462                                         UPDATE_VERTEX(i, f->v1, 0, diffuse_color);
1463                                         UPDATE_VERTEX(i, f->v2, 1, diffuse_color);
1464                                         UPDATE_VERTEX(i, f->v3, 2, diffuse_color);
1465                                         if (f->v4)
1466                                                 UPDATE_VERTEX(i, f->v4, 3, diffuse_color);
1467                                 }
1468 #undef UPDATE_VERTEX
1469                         }
1470                         else {
1471                                 for (i = 0; i < buffers->totface; ++i) {
1472                                         const MFace *f = &buffers->mface[buffers->face_indices[i]];
1473                                         const unsigned int *fv = &f->v1;
1474                                         const int vi[2][3] = {{0, 1, 2}, {3, 0, 2}};
1475                                         float fno[3];
1476                                         short no[3];
1477
1478                                         float fmask;
1479
1480                                         if (paint_is_face_hidden(f, mvert))
1481                                                 continue;
1482
1483                                         /* Face normal and mask */
1484                                         if (f->v4) {
1485                                                 normal_quad_v3(fno,
1486                                                                            mvert[fv[0]].co,
1487                                                                            mvert[fv[1]].co,
1488                                                                            mvert[fv[2]].co,
1489                                                                            mvert[fv[3]].co);
1490                                                 if (vmask) {
1491                                                         fmask = (vmask[fv[0]] +
1492                                                                          vmask[fv[1]] +
1493                                                                          vmask[fv[2]] +
1494                                                                          vmask[fv[3]]) * 0.25f;
1495                                                 }
1496                                         }
1497                                         else {
1498                                                 normal_tri_v3(fno,
1499                                                                           mvert[fv[0]].co,
1500                                                                           mvert[fv[1]].co,
1501                                                                           mvert[fv[2]].co);
1502                                                 if (vmask) {
1503                                                         fmask = (vmask[fv[0]] +
1504                                                                          vmask[fv[1]] +
1505                                                                          vmask[fv[2]]) / 3.0f;
1506                                                 }
1507                                         }
1508                                         normal_float_to_short_v3(no, fno);
1509
1510                                         for (j = 0; j < (f->v4 ? 2 : 1); j++) {
1511                                                 for (k = 0; k < 3; k++) {
1512                                                         const MVert *v = &mvert[fv[vi[j][k]]];
1513                                                         VertexBufferFormat *out = vert_data;
1514
1515                                                         copy_v3_v3(out->co, v->co);
1516                                                         memcpy(out->no, no, sizeof(short) * 3);
1517
1518                                                         if (vmask)
1519                                                                 gpu_color_from_mask_copy(fmask, diffuse_color, out->color);
1520                                                         else
1521                                                                 rgb_float_to_uchar(out->color, diffuse_color);
1522
1523                                                         vert_data++;
1524                                                 }
1525                                         }
1526                                 }
1527                         }
1528
1529                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1530                 }
1531                 else {
1532                         glDeleteBuffersARB(1, &buffers->vert_buf);
1533                         buffers->vert_buf = 0;
1534                 }
1535
1536                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1537         }
1538
1539         buffers->mvert = mvert;
1540 }
1541
1542 GPU_Buffers *GPU_build_mesh_buffers(int (*face_vert_indices)[4],
1543                                     MFace *mface, MVert *mvert,
1544                                     int *face_indices,
1545                                     int totface)
1546 {
1547         GPU_Buffers *buffers;
1548         unsigned short *tri_data;
1549         int i, j, k, tottri;
1550
1551         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1552         buffers->index_type = GL_UNSIGNED_SHORT;
1553         buffers->smooth = mface[face_indices[0]].flag & ME_SMOOTH;
1554
1555         buffers->show_diffuse_color = FALSE;
1556
1557         /* Count the number of visible triangles */
1558         for (i = 0, tottri = 0; i < totface; ++i) {
1559                 const MFace *f = &mface[face_indices[i]];
1560                 if (!paint_is_face_hidden(f, mvert))
1561                         tottri += f->v4 ? 2 : 1;
1562         }
1563
1564         /* An element index buffer is used for smooth shading, but flat
1565          * shading requires separate vertex normals so an index buffer is
1566          * can't be used there. */
1567         if (gpu_vbo_enabled() && buffers->smooth)
1568                 glGenBuffersARB(1, &buffers->index_buf);
1569
1570         if (buffers->index_buf) {
1571                 /* Generate index buffer object */
1572                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1573                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1574                                 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1575
1576                 /* Fill the triangle buffer */
1577                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1578                 if (tri_data) {
1579                         for (i = 0; i < totface; ++i) {
1580                                 const MFace *f = mface + face_indices[i];
1581                                 int v[3];
1582
1583                                 /* Skip hidden faces */
1584                                 if (paint_is_face_hidden(f, mvert))
1585                                         continue;
1586
1587                                 v[0] = 0;
1588                                 v[1] = 1;
1589                                 v[2] = 2;
1590
1591                                 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1592                                         for (k = 0; k < 3; ++k) {
1593                                                 *tri_data = face_vert_indices[i][v[k]];
1594                                                 tri_data++;
1595                                         }
1596                                         v[0] = 3;
1597                                         v[1] = 0;
1598                                         v[2] = 2;
1599                                 }
1600                         }
1601                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1602                 }
1603                 else {
1604                         glDeleteBuffersARB(1, &buffers->index_buf);
1605                         buffers->index_buf = 0;
1606                 }
1607
1608                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1609         }
1610
1611         if (gpu_vbo_enabled() && (buffers->index_buf || !buffers->smooth))
1612                 glGenBuffersARB(1, &buffers->vert_buf);
1613
1614         buffers->tot_tri = tottri;
1615
1616         buffers->mface = mface;
1617         buffers->face_indices = face_indices;
1618         buffers->totface = totface;
1619
1620         return buffers;
1621 }
1622
1623 void GPU_update_grid_buffers(GPU_Buffers *buffers, CCGElem **grids,
1624                              const DMFlagMat *grid_flag_mats, int *grid_indices,
1625                              int totgrid, const CCGKey *key, int show_diffuse_color)
1626 {
1627         VertexBufferFormat *vert_data;
1628         int i, j, k, x, y;
1629
1630         buffers->show_diffuse_color = show_diffuse_color;
1631
1632         /* Build VBO */
1633         if (buffers->vert_buf) {
1634                 int totvert = key->grid_area * totgrid;
1635                 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1636                 const int has_mask = key->has_mask;
1637                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1638
1639                 if (buffers->show_diffuse_color) {
1640                         const DMFlagMat *flags = &grid_flag_mats[grid_indices[0]];
1641
1642                         GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
1643                 }
1644
1645                 copy_v4_v4(buffers->diffuse_color, diffuse_color);
1646
1647                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1648                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1649                                 sizeof(VertexBufferFormat) * totvert,
1650                                 NULL, GL_STATIC_DRAW_ARB);
1651                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1652                 if (vert_data) {
1653                         for (i = 0; i < totgrid; ++i) {
1654                                 VertexBufferFormat *vd = vert_data;
1655                                 CCGElem *grid = grids[grid_indices[i]];
1656
1657                                 for (y = 0; y < key->grid_size; y++) {
1658                                         for (x = 0; x < key->grid_size; x++) {
1659                                                 CCGElem *elem = CCG_grid_elem(key, grid, x, y);
1660                                                 
1661                                                 copy_v3_v3(vd->co, CCG_elem_co(key, elem));
1662                                                 if (smooth) {
1663                                                         normal_float_to_short_v3(vd->no, CCG_elem_no(key, elem));
1664
1665                                                         if (has_mask) {
1666                                                                 gpu_color_from_mask_copy(*CCG_elem_mask(key, elem),
1667                                                                                          diffuse_color, vd->color);
1668                                                         }
1669                                                 }
1670                                                 vd++;
1671                                         }
1672                                 }
1673                                 
1674                                 if (!smooth) {
1675                                         /* for flat shading, recalc normals and set the last vertex of
1676                                          * each quad in the index buffer to have the flat normal as
1677                                          * that is what opengl will use */
1678                                         for (j = 0; j < key->grid_size - 1; j++) {
1679                                                 for (k = 0; k < key->grid_size - 1; k++) {
1680                                                         CCGElem *elems[4] = {
1681                                                                 CCG_grid_elem(key, grid, k, j + 1),
1682                                                                 CCG_grid_elem(key, grid, k + 1, j + 1),
1683                                                                 CCG_grid_elem(key, grid, k + 1, j),
1684                                                                 CCG_grid_elem(key, grid, k, j)
1685                                                         };
1686                                                         float fno[3];
1687
1688                                                         normal_quad_v3(fno,
1689                                                                        CCG_elem_co(key, elems[0]),
1690                                                                        CCG_elem_co(key, elems[1]),
1691                                                                        CCG_elem_co(key, elems[2]),
1692                                                                        CCG_elem_co(key, elems[3]));
1693
1694                                                         vd = vert_data + (j + 1) * key->grid_size + (k + 1);
1695                                                         normal_float_to_short_v3(vd->no, fno);
1696
1697                                                         if (has_mask) {
1698                                                                 gpu_color_from_mask_quad_copy(key,
1699                                                                                               elems[0],
1700                                                                                               elems[1],
1701                                                                                               elems[2],
1702                                                                                               elems[3],
1703                                                                                               diffuse_color,
1704                                                                                               vd->color);
1705                                                         }
1706                                                 }
1707                                         }
1708                                 }
1709
1710                                 vert_data += key->grid_area;
1711                         }
1712                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1713                 }
1714                 else {
1715                         glDeleteBuffersARB(1, &buffers->vert_buf);
1716                         buffers->vert_buf = 0;
1717                 }
1718                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1719         }
1720
1721         buffers->grids = grids;
1722         buffers->grid_indices = grid_indices;
1723         buffers->totgrid = totgrid;
1724         buffers->grid_flag_mats = grid_flag_mats;
1725         buffers->gridkey = *key;
1726
1727         buffers->smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1728
1729         //printf("node updated %p\n", buffers);
1730 }
1731
1732 /* Returns the number of visible quads in the nodes' grids. */
1733 static int gpu_count_grid_quads(BLI_bitmap **grid_hidden,
1734                                 int *grid_indices, int totgrid,
1735                                 int gridsize)
1736 {
1737         int gridarea = (gridsize - 1) * (gridsize - 1);
1738         int i, x, y, totquad;
1739
1740         /* grid hidden layer is present, so have to check each grid for
1741          * visibility */
1742
1743         for (i = 0, totquad = 0; i < totgrid; i++) {
1744                 const BLI_bitmap *gh = grid_hidden[grid_indices[i]];
1745
1746                 if (gh) {
1747                         /* grid hidden are present, have to check each element */
1748                         for (y = 0; y < gridsize - 1; y++) {
1749                                 for (x = 0; x < gridsize - 1; x++) {
1750                                         if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1751                                                 totquad++;
1752                                 }
1753                         }
1754                 }
1755                 else
1756                         totquad += gridarea;
1757         }
1758
1759         return totquad;
1760 }
1761
1762 /* Build the element array buffer of grid indices using either
1763  * unsigned shorts or unsigned ints. */
1764 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_)                     \
1765         {                                                                   \
1766                 type_ *quad_data;                                               \
1767                 int offset = 0;                                                 \
1768                 int i, j, k;                                                    \
1769                                                                                 \
1770                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,                    \
1771                                 sizeof(type_) * (tot_quad_) * 4, NULL,          \
1772                                 GL_STATIC_DRAW_ARB);                            \
1773                                                                                 \
1774                 /* Fill the quad buffer */                                      \
1775                 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB,         \
1776                                            GL_WRITE_ONLY_ARB);                  \
1777                 if (quad_data) {                                                \
1778                         for (i = 0; i < totgrid; ++i) {                             \
1779                                 BLI_bitmap *gh = NULL;                                  \
1780                                 if (grid_hidden)                                        \
1781                                         gh = grid_hidden[(grid_indices)[i]];                \
1782                                                                                                                                                 \
1783                                 for (j = 0; j < gridsize - 1; ++j) {                    \
1784                                         for (k = 0; k < gridsize - 1; ++k) {                \
1785                                                 /* Skip hidden grid face */                     \
1786                                                 if (gh &&                                       \
1787                                                     paint_is_grid_face_hidden(gh,               \
1788                                                                               gridsize, k, j))  \
1789                                                         continue;                                   \
1790                                                                                                                                                 \
1791                                                 *(quad_data++) = offset + j * gridsize + k + 1; \
1792                                                 *(quad_data++) = offset + j * gridsize + k;     \
1793                                                 *(quad_data++) = offset + (j + 1) * gridsize + k; \
1794                                                 *(quad_data++) = offset + (j + 1) * gridsize + k + 1; \
1795                                         }                                                   \
1796                                 }                                                       \
1797                                                                                                                                                 \
1798                                 offset += gridsize * gridsize;                          \
1799                         }                                                           \
1800                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);              \
1801                 }                                                               \
1802                 else {                                                          \
1803                         glDeleteBuffersARB(1, &(buffer_));                          \
1804                         (buffer_) = 0;                                              \
1805                 }                                                               \
1806         } (void)0
1807 /* end FILL_QUAD_BUFFER */
1808
1809 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1810 {
1811         static int prev_gridsize = -1;
1812         static GLenum prev_index_type = 0;
1813         static GLuint buffer = 0;
1814         static unsigned prev_totquad;
1815
1816         /* used in the FILL_QUAD_BUFFER macro */
1817         BLI_bitmap * const *grid_hidden = NULL;
1818         int *grid_indices = NULL;
1819         int totgrid = 1;
1820
1821         /* VBO is disabled; delete the previous buffer (if it exists) and
1822          * return an invalid handle */
1823         if (!gpu_vbo_enabled()) {
1824                 if (buffer)
1825                         glDeleteBuffersARB(1, &buffer);
1826                 return 0;
1827         }
1828
1829         /* VBO is already built */
1830         if (buffer && prev_gridsize == gridsize) {
1831                 *index_type = prev_index_type;
1832                 *totquad = prev_totquad;
1833                 return buffer;
1834         }
1835
1836         /* Build new VBO */
1837         glGenBuffersARB(1, &buffer);
1838         if (buffer) {
1839                 *totquad = (gridsize - 1) * (gridsize - 1);
1840
1841                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1842
1843                 if (gridsize * gridsize < USHRT_MAX) {
1844                         *index_type = GL_UNSIGNED_SHORT;
1845                         FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1846                 }
1847                 else {
1848                         *index_type = GL_UNSIGNED_INT;
1849                         FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1850                 }
1851
1852                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1853         }
1854
1855         prev_gridsize = gridsize;
1856         prev_index_type = *index_type;
1857         prev_totquad = *totquad;
1858         return buffer;
1859 }
1860
1861 GPU_Buffers *GPU_build_grid_buffers(int *grid_indices, int totgrid,
1862                                     BLI_bitmap **grid_hidden, int gridsize)
1863 {
1864         GPU_Buffers *buffers;
1865         int totquad;
1866         int fully_visible_totquad = (gridsize - 1) * (gridsize - 1) * totgrid;
1867
1868         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1869         buffers->grid_hidden = grid_hidden;
1870         buffers->totgrid = totgrid;
1871
1872         buffers->show_diffuse_color = FALSE;
1873
1874         /* Count the number of quads */
1875         totquad = gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1876
1877         if (totquad == fully_visible_totquad) {
1878                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1879                 buffers->has_hidden = 0;
1880         }
1881         else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1882                 /* Build new VBO */
1883                 glGenBuffersARB(1, &buffers->index_buf);
1884                 if (buffers->index_buf) {
1885                         buffers->tot_quad = totquad;
1886
1887                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1888
1889                         if (totgrid * gridsize * gridsize < USHRT_MAX) {
1890                                 buffers->index_type = GL_UNSIGNED_SHORT;
1891                                 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1892                         }
1893                         else {
1894                                 buffers->index_type = GL_UNSIGNED_INT;
1895                                 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1896                         }
1897
1898                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1899                 }
1900
1901                 buffers->has_hidden = 1;
1902         }
1903
1904         /* Build coord/normal VBO */
1905         if (buffers->index_buf)
1906                 glGenBuffersARB(1, &buffers->vert_buf);
1907
1908         return buffers;
1909 }
1910
1911 #undef FILL_QUAD_BUFFER
1912
1913 /* Output a BMVert into a VertexBufferFormat array
1914  *
1915  * The vertex is skipped if hidden, otherwise the output goes into
1916  * index '*v_index' in the 'vert_data' array and '*v_index' is
1917  * incremented.
1918  */
1919 static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
1920                                           VertexBufferFormat *vert_data,
1921                                           int *v_index,
1922                                           const float fno[3],
1923                                           const float *fmask,
1924                                           const int cd_vert_mask_offset)
1925 {
1926         if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
1927                 VertexBufferFormat *vd = &vert_data[*v_index];
1928
1929                 /* TODO: should use material color */
1930                 float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
1931
1932                 /* Set coord, normal, and mask */
1933                 copy_v3_v3(vd->co, v->co);
1934                 normal_float_to_short_v3(vd->no, fno ? fno : v->no);
1935
1936                 gpu_color_from_mask_copy(
1937                         fmask ? *fmask :
1938                                 BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset),
1939                         diffuse_color,
1940                         vd->color);
1941                 
1942
1943                 /* Assign index for use in the triangle index buffer */
1944                 BM_elem_index_set(v, (*v_index)); /* set_dirty! */
1945
1946                 (*v_index)++;
1947         }
1948 }
1949
1950 /* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
1951 static int gpu_bmesh_vert_visible_count(GHash *bm_unique_verts,
1952                                                                                 GHash *bm_other_verts)
1953 {
1954         GHashIterator gh_iter;
1955         int totvert = 0;
1956
1957         GHASH_ITER (gh_iter, bm_unique_verts) {
1958                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1959                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1960                         totvert++;
1961         }
1962         GHASH_ITER (gh_iter, bm_other_verts) {
1963                 BMVert *v = BLI_ghashIterator_getKey(&gh_iter);
1964                 if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN))
1965                         totvert++;
1966         }
1967
1968         return totvert;
1969 }
1970
1971 /* Return the total number of visible faces */
1972 static int gpu_bmesh_face_visible_count(GHash *bm_faces)
1973 {
1974         GHashIterator gh_iter;
1975         int totface = 0;
1976
1977         GHASH_ITER (gh_iter, bm_faces) {
1978                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
1979
1980                 if (!paint_is_bmesh_face_hidden(f))
1981                         totface++;
1982         }
1983
1984         return totface;
1985 }
1986
1987 /* Creates a vertex buffer (coordinate, normal, color) and, if smooth
1988  * shading, an element index buffer. */
1989 void GPU_update_bmesh_buffers(GPU_Buffers *buffers,
1990                                                           BMesh *bm,
1991                                                           GHash *bm_faces,
1992                                                           GHash *bm_unique_verts,
1993                                                           GHash *bm_other_verts)
1994 {
1995         VertexBufferFormat *vert_data;
1996         void *tri_data;
1997         int tottri, totvert, maxvert = 0;
1998
1999         /* TODO, make mask layer optional for bmesh buffer */
2000         const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
2001
2002         if (!buffers->vert_buf || (buffers->smooth && !buffers->index_buf))
2003                 return;
2004
2005         /* Count visible triangles */
2006         tottri = gpu_bmesh_face_visible_count(bm_faces);
2007
2008         if (buffers->smooth) {
2009                 /* Count visible vertices */
2010                 totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
2011         }
2012         else
2013                 totvert = tottri * 3;
2014
2015         /* Initialize vertex buffer */
2016         glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2017         glBufferDataARB(GL_ARRAY_BUFFER_ARB,
2018                                         sizeof(VertexBufferFormat) * totvert,
2019                                         NULL, GL_STATIC_DRAW_ARB);
2020
2021         /* Fill vertex buffer */
2022         vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2023         if (vert_data) {
2024                 GHashIterator gh_iter;
2025                 int v_index = 0;
2026
2027                 if (buffers->smooth) {
2028                         /* Vertices get an index assigned for use in the triangle
2029                          * index buffer */
2030                         bm->elem_index_dirty |= BM_VERT;
2031
2032                         GHASH_ITER (gh_iter, bm_unique_verts) {
2033                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2034                                                               vert_data, &v_index, NULL, NULL,
2035                                                               cd_vert_mask_offset);
2036                         }
2037
2038                         GHASH_ITER (gh_iter, bm_other_verts) {
2039                                 gpu_bmesh_vert_to_buffer_copy(BLI_ghashIterator_getKey(&gh_iter),
2040                                                               vert_data, &v_index, NULL, NULL,
2041                                                               cd_vert_mask_offset);
2042                         }
2043
2044                         maxvert = v_index;
2045                 }
2046                 else {
2047                         GHASH_ITER (gh_iter, bm_faces) {
2048                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2049
2050                                 BLI_assert(f->len == 3);
2051
2052                                 if (!paint_is_bmesh_face_hidden(f)) {
2053                                         BMVert *v[3];
2054                                         float fmask = 0;
2055                                         int i;
2056
2057                                         // BM_iter_as_array(bm, BM_VERTS_OF_FACE, f, (void**)v, 3);
2058                                         BM_face_as_array_vert_tri(f, v);
2059
2060                                         /* Average mask value */
2061                                         for (i = 0; i < 3; i++) {
2062                                                 fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
2063                                         }
2064                                         fmask /= 3.0f;
2065                                         
2066                                         for (i = 0; i < 3; i++) {
2067                                                 gpu_bmesh_vert_to_buffer_copy(v[i], vert_data,
2068                                                                               &v_index, f->no, &fmask,
2069                                                                               cd_vert_mask_offset);
2070                                         }
2071                                 }
2072                         }
2073
2074                         buffers->tot_tri = tottri;
2075                 }
2076
2077                 glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
2078         }
2079         else {
2080                 /* Memory map failed */
2081                 glDeleteBuffersARB(1, &buffers->vert_buf);
2082                 buffers->vert_buf = 0;
2083                 return;
2084         }
2085
2086         if (buffers->smooth) {
2087                 const int use_short = (maxvert < USHRT_MAX);
2088
2089                 /* Initialize triangle index buffer */
2090                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2091                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
2092                                                 (use_short ?
2093                                                  sizeof(unsigned short) :
2094                                                  sizeof(unsigned int)) * 3 * tottri,
2095                                                 NULL, GL_STATIC_DRAW_ARB);
2096
2097                 /* Fill triangle index buffer */
2098                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
2099                 if (tri_data) {
2100                         GHashIterator gh_iter;
2101
2102                         GHASH_ITER (gh_iter, bm_faces) {
2103                                 BMFace *f = BLI_ghashIterator_getKey(&gh_iter);
2104
2105                                 if (!paint_is_bmesh_face_hidden(f)) {
2106                                         BMLoop *l_iter;
2107                                         BMLoop *l_first;
2108
2109                                         l_iter = l_first = BM_FACE_FIRST_LOOP(f);
2110                                         do {
2111                                                 BMVert *v = l_iter->v;
2112                                                 if (use_short) {
2113                                                         unsigned short *elem = tri_data;
2114                                                         (*elem) = BM_elem_index_get(v);
2115                                                         elem++;
2116                                                         tri_data = elem;
2117                                                 }
2118                                                 else {
2119                                                         unsigned int *elem = tri_data;
2120                                                         (*elem) = BM_elem_index_get(v);
2121                                                         elem++;
2122                                                         tri_data = elem;
2123                                                 }
2124                                         } while ((l_iter = l_iter->next) != l_first);
2125                                 }
2126                         }
2127
2128                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
2129
2130                         buffers->tot_tri = tottri;
2131                         buffers->index_type = (use_short ?
2132                                                                    GL_UNSIGNED_SHORT :
2133                                                                    GL_UNSIGNED_INT);
2134                 }
2135                 else {
2136                         /* Memory map failed */
2137                         glDeleteBuffersARB(1, &buffers->index_buf);
2138                         buffers->index_buf = 0;
2139                 }
2140         }
2141 }
2142
2143 GPU_Buffers *GPU_build_bmesh_buffers(int smooth_shading)
2144 {
2145         GPU_Buffers *buffers;
2146
2147         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
2148         if (smooth_shading)
2149                 glGenBuffersARB(1, &buffers->index_buf);
2150         glGenBuffersARB(1, &buffers->vert_buf);
2151         buffers->use_bmesh = TRUE;
2152         buffers->smooth = smooth_shading;
2153
2154         return buffers;
2155 }
2156
2157 static void gpu_draw_buffers_legacy_mesh(GPU_Buffers *buffers)
2158 {
2159         const MVert *mvert = buffers->mvert;
2160         int i, j;
2161         const int has_mask = (buffers->vmask != NULL);
2162         const MFace *face = &buffers->mface[buffers->face_indices[0]];
2163         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2164
2165         if (buffers->show_diffuse_color)
2166                 GPU_material_diffuse_get(face->mat_nr + 1, diffuse_color);
2167
2168         if (has_mask) {
2169                 gpu_colors_enable(VBO_DISABLED);
2170         }
2171
2172         for (i = 0; i < buffers->totface; ++i) {
2173                 MFace *f = buffers->mface + buffers->face_indices[i];
2174                 int S = f->v4 ? 4 : 3;
2175                 unsigned int *fv = &f->v1;
2176
2177                 if (paint_is_face_hidden(f, buffers->mvert))
2178                         continue;
2179
2180                 glBegin((f->v4) ? GL_QUADS : GL_TRIANGLES);
2181
2182                 if (buffers->smooth) {
2183                         for (j = 0; j < S; j++) {
2184                                 if (has_mask) {
2185                                         gpu_color_from_mask_set(buffers->vmask[fv[j]], diffuse_color);
2186                                 }
2187                                 glNormal3sv(mvert[fv[j]].no);
2188                                 glVertex3fv(mvert[fv[j]].co);
2189                         }
2190                 }
2191                 else {
2192                         float fno[3];
2193
2194                         /* calculate face normal */
2195                         if (f->v4) {
2196                                 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
2197                                                mvert[fv[2]].co, mvert[fv[3]].co);
2198                         }
2199                         else
2200                                 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
2201                         glNormal3fv(fno);
2202
2203                         if (has_mask) {
2204                                 float fmask;
2205
2206                                 /* calculate face mask color */
2207                                 fmask = (buffers->vmask[fv[0]] +
2208                                          buffers->vmask[fv[1]] +
2209                                          buffers->vmask[fv[2]]);
2210                                 if (f->v4)
2211                                         fmask = (fmask + buffers->vmask[fv[3]]) * 0.25f;
2212                                 else
2213                                         fmask /= 3.0f;
2214                                 gpu_color_from_mask_set(fmask, diffuse_color);
2215                         }
2216                         
2217                         for (j = 0; j < S; j++)
2218                                 glVertex3fv(mvert[fv[j]].co);
2219                 }
2220                 
2221                 glEnd();
2222         }
2223
2224         if (has_mask) {
2225                 gpu_colors_disable(VBO_DISABLED);
2226         }
2227 }
2228
2229 static void gpu_draw_buffers_legacy_grids(GPU_Buffers *buffers)
2230 {
2231         const CCGKey *key = &buffers->gridkey;
2232         int i, j, x, y, gridsize = buffers->gridkey.grid_size;
2233         const int has_mask = key->has_mask;
2234         const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2235         float diffuse_color[4] = {0.8f, 0.8f, 0.8f, 1.0f};
2236
2237         if (buffers->show_diffuse_color)
2238                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2239
2240         if (has_mask) {
2241                 gpu_colors_enable(VBO_DISABLED);
2242         }
2243
2244         for (i = 0; i < buffers->totgrid; ++i) {
2245                 int g = buffers->grid_indices[i];
2246                 CCGElem *grid = buffers->grids[g];
2247                 BLI_bitmap *gh = buffers->grid_hidden[g];
2248
2249                 /* TODO: could use strips with hiding as well */
2250
2251                 if (gh) {
2252                         glBegin(GL_QUADS);
2253                         
2254                         for (y = 0; y < gridsize - 1; y++) {
2255                                 for (x = 0; x < gridsize - 1; x++) {
2256                                         CCGElem *e[4] = {
2257                                                 CCG_grid_elem(key, grid, x + 1, y + 1),
2258                                                 CCG_grid_elem(key, grid, x + 1, y),
2259                                                 CCG_grid_elem(key, grid, x, y),
2260                                                 CCG_grid_elem(key, grid, x, y + 1)
2261                                         };
2262
2263                                         /* skip face if any of its corners are hidden */
2264                                         if (paint_is_grid_face_hidden(gh, gridsize, x, y))
2265                                                 continue;
2266
2267                                         if (buffers->smooth) {
2268                                                 for (j = 0; j < 4; j++) {
2269                                                         if (has_mask) {
2270                                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, e[j]), diffuse_color);
2271                                                         }
2272                                                         glNormal3fv(CCG_elem_no(key, e[j]));
2273                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2274                                                 }
2275                                         }
2276                                         else {
2277                                                 float fno[3];
2278                                                 normal_quad_v3(fno,
2279                                                                CCG_elem_co(key, e[0]),
2280                                                                CCG_elem_co(key, e[1]),
2281                                                                CCG_elem_co(key, e[2]),
2282                                                                CCG_elem_co(key, e[3]));
2283                                                 glNormal3fv(fno);
2284
2285                                                 if (has_mask) {
2286                                                         gpu_color_from_mask_quad_set(key, e[0], e[1], e[2], e[3], diffuse_color);
2287                                                 }
2288
2289                                                 for (j = 0; j < 4; j++)
2290                                                         glVertex3fv(CCG_elem_co(key, e[j]));
2291                                         }
2292                                 }
2293                         }
2294
2295                         glEnd();
2296                 }
2297                 else if (buffers->smooth) {
2298                         for (y = 0; y < gridsize - 1; y++) {
2299                                 glBegin(GL_QUAD_STRIP);
2300                                 for (x = 0; x < gridsize; x++) {
2301                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2302                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2303
2304                                         if (has_mask) {
2305                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, a), diffuse_color);
2306                                         }
2307                                         glNormal3fv(CCG_elem_no(key, a));
2308                                         glVertex3fv(CCG_elem_co(key, a));
2309                                         if (has_mask) {
2310                                                 gpu_color_from_mask_set(*CCG_elem_mask(key, b), diffuse_color);
2311                                         }
2312                                         glNormal3fv(CCG_elem_no(key, b));
2313                                         glVertex3fv(CCG_elem_co(key, b));
2314                                 }
2315                                 glEnd();
2316                         }
2317                 }
2318                 else {
2319                         for (y = 0; y < gridsize - 1; y++) {
2320                                 glBegin(GL_QUAD_STRIP);
2321                                 for (x = 0; x < gridsize; x++) {
2322                                         CCGElem *a = CCG_grid_elem(key, grid, x, y);
2323                                         CCGElem *b = CCG_grid_elem(key, grid, x, y + 1);
2324
2325                                         if (x > 0) {
2326                                                 CCGElem *c = CCG_grid_elem(key, grid, x - 1, y);
2327                                                 CCGElem *d = CCG_grid_elem(key, grid, x - 1, y + 1);
2328
2329                                                 float fno[3];
2330                                                 normal_quad_v3(fno,
2331                                                                CCG_elem_co(key, d),
2332                                                                CCG_elem_co(key, b),
2333                                                                CCG_elem_co(key, a),
2334                                                                CCG_elem_co(key, c));
2335                                                 glNormal3fv(fno);
2336
2337                                                 if (has_mask) {
2338                                                         gpu_color_from_mask_quad_set(key, a, b, c, d, diffuse_color);
2339                                                 }
2340                                         }
2341
2342                                         glVertex3fv(CCG_elem_co(key, a));
2343                                         glVertex3fv(CCG_elem_co(key, b));
2344                                 }
2345                                 glEnd();
2346                         }
2347                 }
2348         }
2349
2350         if (has_mask) {
2351                 gpu_colors_disable(VBO_DISABLED);
2352         }
2353 }
2354
2355 void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial,
2356                                           int wireframe)
2357 {
2358         /* sets material from the first face, to solve properly face would need to
2359          * be sorted in buckets by materials */
2360         if (setMaterial) {
2361                 if (buffers->totface) {
2362                         const MFace *f = &buffers->mface[buffers->face_indices[0]];
2363                         if (!setMaterial(f->mat_nr + 1, NULL))
2364                                 return;
2365                 }
2366                 else if (buffers->totgrid) {
2367                         const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2368                         if (!setMaterial(f->mat_nr + 1, NULL))
2369                                 return;
2370                 }
2371                 else {
2372                         if (!setMaterial(1, NULL))
2373                                 return;
2374                 }
2375         }
2376
2377         glShadeModel((buffers->smooth || buffers->totface) ? GL_SMOOTH : GL_FLAT);
2378
2379         if (buffers->vert_buf) {
2380                 glEnableClientState(GL_VERTEX_ARRAY);
2381                 if (!wireframe) {
2382                         glEnableClientState(GL_NORMAL_ARRAY);
2383                         gpu_colors_enable(VBO_ENABLED);
2384                 }
2385
2386                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
2387
2388                 if (buffers->index_buf)
2389                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
2390
2391                 if (wireframe)
2392                         glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
2393
2394                 if (buffers->tot_quad) {
2395                         char *offset = 0;
2396                         int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
2397                         for (i = 0; i < last; i++) {
2398                                 glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2399                                                 offset + offsetof(VertexBufferFormat, co));
2400                                 glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2401                                                 offset + offsetof(VertexBufferFormat, no));
2402                                 glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2403                                                offset + offsetof(VertexBufferFormat, color));
2404                                 
2405                                 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
2406
2407                                 offset += buffers->gridkey.grid_area * sizeof(VertexBufferFormat);
2408                         }
2409                 }
2410                 else {
2411                         int totelem = buffers->tot_tri * 3;
2412
2413                         glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat),
2414                                         (void *)offsetof(VertexBufferFormat, co));
2415                         glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat),
2416                                         (void *)offsetof(VertexBufferFormat, no));
2417                         glColorPointer(3, GL_UNSIGNED_BYTE, sizeof(VertexBufferFormat),
2418                                        (void *)offsetof(VertexBufferFormat, color));
2419
2420                         if (buffers->index_buf)
2421                                 glDrawElements(GL_TRIANGLES, totelem, buffers->index_type, 0);
2422                         else
2423                                 glDrawArrays(GL_TRIANGLES, 0, totelem);
2424                 }
2425
2426                 if (wireframe)
2427                         glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
2428
2429                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
2430                 if (buffers->index_buf)
2431                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
2432
2433                 glDisableClientState(GL_VERTEX_ARRAY);
2434                 if (!wireframe) {
2435                         glDisableClientState(GL_NORMAL_ARRAY);
2436                         gpu_colors_disable(VBO_ENABLED);
2437                 }
2438         }
2439         /* fallbacks if we are out of memory or VBO is disabled */
2440         else if (buffers->totface) {
2441                 gpu_draw_buffers_legacy_mesh(buffers);
2442         }
2443         else if (buffers->totgrid) {
2444                 gpu_draw_buffers_legacy_grids(buffers);
2445         }
2446 }
2447
2448 int GPU_buffers_diffuse_changed(GPU_Buffers *buffers, int show_diffuse_color)
2449 {
2450         float diffuse_color[4];
2451
2452         if (buffers->show_diffuse_color != show_diffuse_color)
2453                 return TRUE;
2454
2455         if (buffers->show_diffuse_color == FALSE)
2456                 return FALSE;
2457
2458         if (buffers->mface) {
2459                 MFace *f = buffers->mface + buffers->face_indices[0];
2460
2461                 GPU_material_diffuse_get(f->mat_nr + 1, diffuse_color);
2462         }
2463         else {
2464                 const DMFlagMat *flags = &buffers->grid_flag_mats[buffers->grid_indices[0]];
2465
2466                 GPU_material_diffuse_get(flags->mat_nr + 1, diffuse_color);
2467         }
2468
2469         return diffuse_color[0] != buffers->diffuse_color[0] ||
2470                diffuse_color[1] != buffers->diffuse_color[1] ||
2471                diffuse_color[2] != buffers->diffuse_color[2];
2472 }
2473
2474 void GPU_free_buffers(GPU_Buffers *buffers)
2475 {
2476         if (buffers) {
2477                 if (buffers->vert_buf)
2478                         glDeleteBuffersARB(1, &buffers->vert_buf);
2479                 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
2480                         glDeleteBuffersARB(1, &buffers->index_buf);
2481
2482                 MEM_freeN(buffers);
2483         }
2484 }