bf4df2e9b0aebfaf9c6bf2f33c1597df8a853f19
[blender.git] / source / blender / gpu / intern / gpu_buffers.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2005 Blender Foundation.
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): Brecht Van Lommel.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/gpu/intern/gpu_buffers.c
29  *  \ingroup gpu
30  */
31
32
33 #include <limits.h>
34 #include <stddef.h>
35 #include <string.h>
36
37 #include "GL/glew.h"
38
39 #include "MEM_guardedalloc.h"
40
41 #include "BLI_bitmap.h"
42 #include "BLI_math.h"
43 #include "BLI_utildefines.h"
44 #include "BLI_ghash.h"
45 #include "BLI_threads.h"
46
47 #include "DNA_meshdata_types.h"
48
49 #include "BKE_DerivedMesh.h"
50 #include "BKE_paint.h"
51 #include "BKE_subsurf.h"
52
53 #include "DNA_userdef_types.h"
54
55 #include "GPU_buffers.h"
56
57 typedef enum {
58         GPU_BUFFER_VERTEX_STATE = 1,
59         GPU_BUFFER_NORMAL_STATE = 2,
60         GPU_BUFFER_TEXCOORD_STATE = 4,
61         GPU_BUFFER_COLOR_STATE = 8,
62         GPU_BUFFER_ELEMENT_STATE = 16,
63 } GPUBufferState;
64
65 #define MAX_GPU_ATTRIB_DATA 32
66
67 /* material number is an 16-bit short and the range of short is from -16383 to 16383 (assume material number is non-negative) */
68 #define MAX_MATERIALS 16384
69
70 /* -1 - undefined, 0 - vertex arrays, 1 - VBOs */
71 static int useVBOs = -1;
72 static GPUBufferState GLStates = 0;
73 static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
74
75 /* stores recently-deleted buffers so that new buffers won't have to
76    be recreated as often
77
78    only one instance of this pool is created, stored in
79    gpu_buffer_pool
80
81    note that the number of buffers in the pool is usually limited to
82    MAX_FREE_GPU_BUFFERS, but this limit may be exceeded temporarily
83    when a GPUBuffer is released outside the main thread; due to OpenGL
84    restrictions it cannot be immediately released
85  */
86 typedef struct GPUBufferPool {
87         /* number of allocated buffers stored */
88         int totbuf;
89         /* actual allocated length of the array */
90         int maxsize;
91         GPUBuffer **buffers;
92 } GPUBufferPool;
93 #define MAX_FREE_GPU_BUFFERS 8
94
95 /* create a new GPUBufferPool */
96 static GPUBufferPool *gpu_buffer_pool_new(void)
97 {
98         GPUBufferPool *pool;
99
100         /* enable VBOs if supported */
101         if (useVBOs == -1)
102                 useVBOs = (GLEW_ARB_vertex_buffer_object ? 1 : 0);
103
104         pool = MEM_callocN(sizeof(GPUBufferPool), "GPUBuffer");
105
106         pool->maxsize = MAX_FREE_GPU_BUFFERS;
107         pool->buffers = MEM_callocN(sizeof(GPUBuffer*)*pool->maxsize,
108                                     "GPUBuffer.buffers");
109
110         return pool;
111 }
112
113 /* remove a GPUBuffer from the pool (does not free the GPUBuffer) */
114 static void gpu_buffer_pool_remove_index(GPUBufferPool *pool, int index)
115 {
116         int i;
117
118         if (!pool || index < 0 || index >= pool->totbuf)
119                 return;
120
121         /* shift entries down, overwriting the buffer at `index' */
122         for (i = index; i < pool->totbuf - 1; i++)
123                 pool->buffers[i] = pool->buffers[i+1];
124
125         /* clear the last entry */
126         if (pool->totbuf > 0)
127                 pool->buffers[pool->totbuf - 1] = NULL;
128
129         pool->totbuf--;
130 }
131
132 /* delete the last entry in the pool */
133 static void gpu_buffer_pool_delete_last(GPUBufferPool *pool)
134 {
135         GPUBuffer *last;
136
137         if (pool->totbuf <= 0)
138                 return;
139
140         /* get the last entry */
141         if (!(last = pool->buffers[pool->totbuf - 1]))
142                 return;
143
144         /* delete the buffer's data */
145         if (useVBOs)
146                 glDeleteBuffersARB(1, &last->id);
147         else
148                 MEM_freeN(last->pointer);
149
150         /* delete the buffer and remove from pool */
151         MEM_freeN(last);
152         pool->totbuf--;
153         pool->buffers[pool->totbuf] = NULL;
154 }
155
156 /* free a GPUBufferPool; also frees the data in the pool's
157    GPUBuffers */
158 static void gpu_buffer_pool_free(GPUBufferPool *pool)
159 {
160         if (!pool)
161                 return;
162         
163         while(pool->totbuf)
164                 gpu_buffer_pool_delete_last(pool);
165
166         MEM_freeN(pool->buffers);
167         MEM_freeN(pool);
168 }
169
170 static GPUBufferPool *gpu_buffer_pool = NULL;
171 static GPUBufferPool *gpu_get_global_buffer_pool(void)
172 {
173         /* initialize the pool */
174         if (!gpu_buffer_pool)
175                 gpu_buffer_pool = gpu_buffer_pool_new();
176
177         return gpu_buffer_pool;
178 }
179
180 void GPU_global_buffer_pool_free(void)
181 {
182         gpu_buffer_pool_free(gpu_buffer_pool);
183         gpu_buffer_pool = NULL;
184 }
185
186 /* get a GPUBuffer of at least `size' bytes; uses one from the buffer
187    pool if possible, otherwise creates a new one */
188 GPUBuffer *GPU_buffer_alloc(int size)
189 {
190         GPUBufferPool *pool;
191         GPUBuffer *buf;
192         int i, bufsize, bestfit = -1;
193
194         pool = gpu_get_global_buffer_pool();
195
196         /* not sure if this buffer pool code has been profiled much,
197            seems to me that the graphics driver and system memory
198            management might do this stuff anyway. --nicholas
199         */
200
201         /* check the global buffer pool for a recently-deleted buffer
202            that is at least as big as the request, but not more than
203            twice as big */
204         for (i = 0; i < pool->totbuf; i++) {
205                 bufsize = pool->buffers[i]->size;
206
207                 /* check for an exact size match */
208                 if (bufsize == size) {
209                         bestfit = i;
210                         break;
211                 }
212                 /* smaller buffers won't fit data and buffers at least
213                    twice as big are a waste of memory */
214                 else if (bufsize > size && size > (bufsize / 2)) {
215                         /* is it closer to the required size than the
216                            last appropriate buffer found. try to save
217                            memory */
218                         if (bestfit == -1 || pool->buffers[bestfit]->size > bufsize) {
219                                 bestfit = i;
220                         }
221                 }
222         }
223
224         /* if an acceptable buffer was found in the pool, remove it
225            from the pool and return it */
226         if (bestfit != -1) {
227                 buf = pool->buffers[bestfit];
228                 gpu_buffer_pool_remove_index(pool, bestfit);
229                 return buf;
230         }
231
232         /* no acceptable buffer found in the pool, create a new one */
233         buf = MEM_callocN(sizeof(GPUBuffer), "GPUBuffer");
234         buf->size = size;
235
236         if (useVBOs == 1) {
237                 /* create a new VBO and initialize it to the requested
238                    size */
239                 glGenBuffersARB(1, &buf->id);
240                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buf->id);
241                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, size, NULL, GL_STATIC_DRAW_ARB);
242                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
243         }
244         else {
245                 buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
246                 
247                 /* purpose of this seems to be dealing with
248                    out-of-memory errors? looks a bit iffy to me
249                    though, at least on Linux I expect malloc() would
250                    just overcommit. --nicholas */
251                 while(!buf->pointer && pool->totbuf > 0) {
252                         gpu_buffer_pool_delete_last(pool);
253                         buf->pointer = MEM_mallocN(size, "GPUBuffer.pointer");
254                 }
255                 if (!buf->pointer)
256                         return NULL;
257         }
258
259         return buf;
260 }
261
262 /* release a GPUBuffer; does not free the actual buffer or its data,
263    but rather moves it to the pool of recently-freed buffers for
264    possible re-use*/
265 void GPU_buffer_free(GPUBuffer *buffer)
266 {
267         GPUBufferPool *pool;
268         int i;
269
270         if (!buffer)
271                 return;
272
273         pool = gpu_get_global_buffer_pool();
274
275         /* free the last used buffer in the queue if no more space, but only
276            if we are in the main thread. for e.g. rendering or baking it can
277            happen that we are in other thread and can't call OpenGL, in that
278            case cleanup will be done GPU_buffer_pool_free_unused */
279         if (BLI_thread_is_main()) {
280                 /* in main thread, safe to decrease size of pool back
281                    down to MAX_FREE_GPU_BUFFERS */
282                 while(pool->totbuf >= MAX_FREE_GPU_BUFFERS)
283                         gpu_buffer_pool_delete_last(pool);
284         }
285         else {
286                 /* outside of main thread, can't safely delete the
287                    buffer, so increase pool size */
288                 if (pool->maxsize == pool->totbuf) {
289                         pool->maxsize += MAX_FREE_GPU_BUFFERS;
290                         pool->buffers = MEM_reallocN(pool->buffers,
291                                                      sizeof(GPUBuffer*) * pool->maxsize);
292                 }
293         }
294
295         /* shift pool entries up by one */
296         for (i = pool->totbuf; i > 0; i--)
297                 pool->buffers[i] = pool->buffers[i-1];
298
299         /* insert the buffer into the beginning of the pool */
300         pool->buffers[0] = buffer;
301         pool->totbuf++;
302 }
303
304 typedef struct GPUVertPointLink {
305         struct GPUVertPointLink *next;
306         /* -1 means uninitialized */
307         int point_index;
308 } GPUVertPointLink;
309
310 /* add a new point to the list of points related to a particular
311    vertex */
312 static void gpu_drawobject_add_vert_point(GPUDrawObject *gdo, int vert_index, int point_index)
313 {
314         GPUVertPointLink *lnk;
315
316         lnk = &gdo->vert_points[vert_index];
317
318         /* if first link is in use, add a new link at the end */
319         if (lnk->point_index != -1) {
320                 /* get last link */
321                 for (; lnk->next; lnk = lnk->next);
322
323                 /* add a new link from the pool */
324                 lnk = lnk->next = &gdo->vert_points_mem[gdo->vert_points_usage];
325                 gdo->vert_points_usage++;
326         }
327
328         lnk->point_index = point_index;
329 }
330
331 /* update the vert_points and triangle_to_mface fields with a new
332    triangle */
333 static void gpu_drawobject_add_triangle(GPUDrawObject *gdo,
334                                         int base_point_index,
335                                         int face_index,
336                                         int v1, int v2, int v3)
337 {
338         int i, v[3] = {v1, v2, v3};
339         for (i = 0; i < 3; i++)
340                 gpu_drawobject_add_vert_point(gdo, v[i], base_point_index + i);
341         gdo->triangle_to_mface[base_point_index / 3] = face_index;
342 }
343
344 /* for each vertex, build a list of points related to it; these lists
345    are stored in an array sized to the number of vertices */
346 static void gpu_drawobject_init_vert_points(GPUDrawObject *gdo, MFace *f, int totface)
347 {
348         GPUBufferMaterial *mat;
349         int i, mat_orig_to_new[MAX_MATERIALS];
350
351         /* allocate the array and space for links */
352         gdo->vert_points = MEM_callocN(sizeof(GPUVertPointLink) * gdo->totvert,
353                                        "GPUDrawObject.vert_points");
354         gdo->vert_points_mem = MEM_callocN(sizeof(GPUVertPointLink) * gdo->tot_triangle_point,
355                                               "GPUDrawObject.vert_points_mem");
356         gdo->vert_points_usage = 0;
357
358         /* build a map from the original material indices to the new
359            GPUBufferMaterial indices */
360         for (i = 0; i < gdo->totmaterial; i++)
361                 mat_orig_to_new[gdo->materials[i].mat_nr] = i;
362
363         /* -1 indicates the link is not yet used */
364         for (i = 0; i < gdo->totvert; i++)
365                 gdo->vert_points[i].point_index = -1;
366
367         for (i = 0; i < totface; i++, f++) {
368                 mat = &gdo->materials[mat_orig_to_new[f->mat_nr]];
369
370                 /* add triangle */
371                 gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
372                                             i, f->v1, f->v2, f->v3);
373                 mat->totpoint += 3;
374
375                 /* add second triangle for quads */
376                 if (f->v4) {
377                         gpu_drawobject_add_triangle(gdo, mat->start + mat->totpoint,
378                                                     i, f->v3, f->v4, f->v1);
379                         mat->totpoint += 3;
380                 }
381         }
382
383         /* map any unused vertices to loose points */
384         for (i = 0; i < gdo->totvert; i++) {
385                 if (gdo->vert_points[i].point_index == -1) {
386                         gdo->vert_points[i].point_index = gdo->tot_triangle_point + gdo->tot_loose_point;
387                         gdo->tot_loose_point++;
388                 }
389         }
390 }
391
392 /* see GPUDrawObject's structure definition for a description of the
393    data being initialized here */
394 GPUDrawObject *GPU_drawobject_new( DerivedMesh *dm )
395 {
396         GPUDrawObject *gdo;
397         MFace *mface;
398         int points_per_mat[MAX_MATERIALS];
399         int i, curmat, curpoint, totface;
400
401         mface = dm->getTessFaceArray(dm);
402         totface= dm->getNumTessFaces(dm);
403
404         /* get the number of points used by each material, treating
405            each quad as two triangles */
406         memset(points_per_mat, 0, sizeof(int)*MAX_MATERIALS);
407         for (i = 0; i < totface; i++)
408                 points_per_mat[mface[i].mat_nr] += mface[i].v4 ? 6 : 3;
409
410         /* create the GPUDrawObject */
411         gdo = MEM_callocN(sizeof(GPUDrawObject),"GPUDrawObject");
412         gdo->totvert = dm->getNumVerts(dm);
413         gdo->totedge = dm->getNumEdges(dm);
414
415         /* count the number of materials used by this DerivedMesh */
416         for (i = 0; i < MAX_MATERIALS; i++) {
417                 if (points_per_mat[i] > 0)
418                         gdo->totmaterial++;
419         }
420
421         /* allocate an array of materials used by this DerivedMesh */
422         gdo->materials = MEM_mallocN(sizeof(GPUBufferMaterial) * gdo->totmaterial,
423                                      "GPUDrawObject.materials");
424
425         /* initialize the materials array */
426         for (i = 0, curmat = 0, curpoint = 0; i < MAX_MATERIALS; i++) {
427                 if (points_per_mat[i] > 0) {
428                         gdo->materials[curmat].start = curpoint;
429                         gdo->materials[curmat].totpoint = 0;
430                         gdo->materials[curmat].mat_nr = i;
431
432                         curpoint += points_per_mat[i];
433                         curmat++;
434                 }
435         }
436
437         /* store total number of points used for triangles */
438         gdo->tot_triangle_point = curpoint;
439
440         gdo->triangle_to_mface = MEM_mallocN(sizeof(int) * (gdo->tot_triangle_point / 3),
441                                      "GPUDrawObject.triangle_to_mface");
442
443         gpu_drawobject_init_vert_points(gdo, mface, totface);
444
445         return gdo;
446 }
447
448 void GPU_drawobject_free(DerivedMesh *dm)
449 {
450         GPUDrawObject *gdo;
451
452         if (!dm || !(gdo = dm->drawObject))
453                 return;
454
455         MEM_freeN(gdo->materials);
456         MEM_freeN(gdo->triangle_to_mface);
457         MEM_freeN(gdo->vert_points);
458         MEM_freeN(gdo->vert_points_mem);
459         GPU_buffer_free(gdo->points);
460         GPU_buffer_free(gdo->normals);
461         GPU_buffer_free(gdo->uv);
462         GPU_buffer_free(gdo->colors);
463         GPU_buffer_free(gdo->edges);
464         GPU_buffer_free(gdo->uvedges);
465
466         MEM_freeN(gdo);
467         dm->drawObject = NULL;
468 }
469
470 typedef void (*GPUBufferCopyFunc)(DerivedMesh *dm, float *varray, int *index,
471                                   int *mat_orig_to_new, void *user_data);
472
473 static GPUBuffer *gpu_buffer_setup(DerivedMesh *dm, GPUDrawObject *object,
474                                    int vector_size, int size, GLenum target,
475                                    void *user, GPUBufferCopyFunc copy_f)
476 {
477         GPUBufferPool *pool;
478         GPUBuffer *buffer;
479         float *varray;
480         int mat_orig_to_new[MAX_MATERIALS];
481         int *cur_index_per_mat;
482         int i;
483         int success;
484         GLboolean uploaded;
485
486         pool = gpu_get_global_buffer_pool();
487
488         /* alloc a GPUBuffer; fall back to legacy mode on failure */
489         if (!(buffer = GPU_buffer_alloc(size)))
490                 dm->drawObject->legacy = 1;
491
492         /* nothing to do for legacy mode */
493         if (dm->drawObject->legacy)
494                 return NULL;
495
496         cur_index_per_mat = MEM_mallocN(sizeof(int)*object->totmaterial,
497                                         "GPU_buffer_setup.cur_index_per_mat");
498         for (i = 0; i < object->totmaterial; i++) {
499                 /* for each material, the current index to copy data to */
500                 cur_index_per_mat[i] = object->materials[i].start * vector_size;
501
502                 /* map from original material index to new
503                    GPUBufferMaterial index */
504                 mat_orig_to_new[object->materials[i].mat_nr] = i;
505         }
506
507         if (useVBOs) {
508                 success = 0;
509
510                 while(!success) {
511                         /* bind the buffer and discard previous data,
512                            avoids stalling gpu */
513                         glBindBufferARB(target, buffer->id);
514                         glBufferDataARB(target, buffer->size, NULL, GL_STATIC_DRAW_ARB);
515
516                         /* attempt to map the buffer */
517                         if (!(varray = glMapBufferARB(target, GL_WRITE_ONLY_ARB))) {
518                                 /* failed to map the buffer; delete it */
519                                 GPU_buffer_free(buffer);
520                                 gpu_buffer_pool_delete_last(pool);
521                                 buffer= NULL;
522
523                                 /* try freeing an entry from the pool
524                                    and reallocating the buffer */
525                                 if (pool->totbuf > 0) {
526                                         gpu_buffer_pool_delete_last(pool);
527                                         buffer = GPU_buffer_alloc(size);
528                                 }
529
530                                 /* allocation still failed; fall back
531                                    to legacy mode */
532                                 if (!buffer) {
533                                         dm->drawObject->legacy = 1;
534                                         success = 1;
535                                 }
536                         }
537                         else {
538                                 success = 1;
539                         }
540                 }
541
542                 /* check legacy fallback didn't happen */
543                 if (dm->drawObject->legacy == 0) {
544                         uploaded = GL_FALSE;
545                         /* attempt to upload the data to the VBO */
546                         while(uploaded == GL_FALSE) {
547                                 (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
548                                 /* glUnmapBuffer returns GL_FALSE if
549                                  * the data store is corrupted; retry
550                                  * in that case */
551                                 uploaded = glUnmapBufferARB(target);
552                         }
553                 }
554                 glBindBufferARB(target, 0);
555         }
556         else {
557                 /* VBO not supported, use vertex array fallback */
558                 if (buffer->pointer) {
559                         varray = buffer->pointer;
560                         (*copy_f)(dm, varray, cur_index_per_mat, mat_orig_to_new, user);
561                 }
562                 else {
563                         dm->drawObject->legacy = 1;
564                 }
565         }
566
567         MEM_freeN(cur_index_per_mat);
568
569         return buffer;
570 }
571
572 static void GPU_buffer_copy_vertex(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
573 {
574         MVert *mvert;
575         MFace *f;
576         int i, j, start, totface;
577
578         mvert = dm->getVertArray(dm);
579         f = dm->getTessFaceArray(dm);
580
581         totface= dm->getNumTessFaces(dm);
582         for (i = 0; i < totface; i++, f++) {
583                 start = index[mat_orig_to_new[f->mat_nr]];
584
585                 /* v1 v2 v3 */
586                 copy_v3_v3(&varray[start], mvert[f->v1].co);
587                 copy_v3_v3(&varray[start+3], mvert[f->v2].co);
588                 copy_v3_v3(&varray[start+6], mvert[f->v3].co);
589                 index[mat_orig_to_new[f->mat_nr]] += 9;
590
591                 if (f->v4) {
592                         /* v3 v4 v1 */
593                         copy_v3_v3(&varray[start+9], mvert[f->v3].co);
594                         copy_v3_v3(&varray[start+12], mvert[f->v4].co);
595                         copy_v3_v3(&varray[start+15], mvert[f->v1].co);
596                         index[mat_orig_to_new[f->mat_nr]] += 9;
597                 }
598         }
599
600         /* copy loose points */
601         j = dm->drawObject->tot_triangle_point*3;
602         for (i = 0; i < dm->drawObject->totvert; i++) {
603                 if (dm->drawObject->vert_points[i].point_index >= dm->drawObject->tot_triangle_point) {
604                         copy_v3_v3(&varray[j],mvert[i].co);
605                         j+=3;
606                 }
607         }
608 }
609
610 static void GPU_buffer_copy_normal(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
611 {
612         int i, totface;
613         int start;
614         float f_no[3];
615
616         float *nors= dm->getTessFaceDataArray(dm, CD_NORMAL);
617         MVert *mvert = dm->getVertArray(dm);
618         MFace *f = dm->getTessFaceArray(dm);
619
620         totface= dm->getNumTessFaces(dm);
621         for (i = 0; i < totface; i++, f++) {
622                 const int smoothnormal = (f->flag & ME_SMOOTH);
623
624                 start = index[mat_orig_to_new[f->mat_nr]];
625                 index[mat_orig_to_new[f->mat_nr]] += f->v4 ? 18 : 9;
626
627                 if (smoothnormal) {
628                         /* copy vertex normal */
629                         normal_short_to_float_v3(&varray[start], mvert[f->v1].no);
630                         normal_short_to_float_v3(&varray[start+3], mvert[f->v2].no);
631                         normal_short_to_float_v3(&varray[start+6], mvert[f->v3].no);
632
633                         if (f->v4) {
634                                 normal_short_to_float_v3(&varray[start+9], mvert[f->v3].no);
635                                 normal_short_to_float_v3(&varray[start+12], mvert[f->v4].no);
636                                 normal_short_to_float_v3(&varray[start+15], mvert[f->v1].no);
637                         }
638                 }
639                 else if (nors) {
640                         /* copy cached face normal */
641                         copy_v3_v3(&varray[start], &nors[i*3]);
642                         copy_v3_v3(&varray[start+3], &nors[i*3]);
643                         copy_v3_v3(&varray[start+6], &nors[i*3]);
644
645                         if (f->v4) {
646                                 copy_v3_v3(&varray[start+9], &nors[i*3]);
647                                 copy_v3_v3(&varray[start+12], &nors[i*3]);
648                                 copy_v3_v3(&varray[start+15], &nors[i*3]);
649                         }
650                 }
651                 else {
652                         /* calculate face normal */
653                         if (f->v4)
654                                 normal_quad_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co, mvert[f->v4].co);
655                         else
656                                 normal_tri_v3(f_no, mvert[f->v1].co, mvert[f->v2].co, mvert[f->v3].co);
657
658                         copy_v3_v3(&varray[start], f_no);
659                         copy_v3_v3(&varray[start+3], f_no);
660                         copy_v3_v3(&varray[start+6], f_no);
661
662                         if (f->v4) {
663                                 copy_v3_v3(&varray[start+9], f_no);
664                                 copy_v3_v3(&varray[start+12], f_no);
665                                 copy_v3_v3(&varray[start+15], f_no);
666                         }
667                 }
668         }
669 }
670
671 static void GPU_buffer_copy_uv(DerivedMesh *dm, float *varray, int *index, int *mat_orig_to_new, void *UNUSED(user))
672 {
673         int start;
674         int i, totface;
675
676         MTFace *mtface;
677         MFace *f;
678
679         if (!(mtface = DM_get_tessface_data_layer(dm, CD_MTFACE)))
680                 return;
681         f = dm->getTessFaceArray(dm);
682                 
683         totface = dm->getNumTessFaces(dm);
684         for (i = 0; i < totface; i++, f++) {
685                 start = index[mat_orig_to_new[f->mat_nr]];
686
687                 /* v1 v2 v3 */
688                 copy_v2_v2(&varray[start],mtface[i].uv[0]);
689                 copy_v2_v2(&varray[start+2],mtface[i].uv[1]);
690                 copy_v2_v2(&varray[start+4],mtface[i].uv[2]);
691                 index[mat_orig_to_new[f->mat_nr]] += 6;
692
693                 if (f->v4) {
694                         /* v3 v4 v1 */
695                         copy_v2_v2(&varray[start+6],mtface[i].uv[2]);
696                         copy_v2_v2(&varray[start+8],mtface[i].uv[3]);
697                         copy_v2_v2(&varray[start+10],mtface[i].uv[0]);
698                         index[mat_orig_to_new[f->mat_nr]] += 6;
699                 }
700         }
701 }
702
703
704 static void GPU_buffer_copy_color3(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
705 {
706         int i, totface;
707         char *varray = (char *)varray_;
708         char *mcol = (char *)user;
709         MFace *f = dm->getTessFaceArray(dm);
710
711         totface= dm->getNumTessFaces(dm);
712         for (i=0; i < totface; i++, f++) {
713                 int start = index[mat_orig_to_new[f->mat_nr]];
714
715                 /* v1 v2 v3 */
716                 copy_v3_v3_char(&varray[start], &mcol[i*12]);
717                 copy_v3_v3_char(&varray[start+3], &mcol[i*12+3]);
718                 copy_v3_v3_char(&varray[start+6], &mcol[i*12+6]);
719                 index[mat_orig_to_new[f->mat_nr]] += 9;
720
721                 if (f->v4) {
722                         /* v3 v4 v1 */
723                         copy_v3_v3_char(&varray[start+9], &mcol[i*12+6]);
724                         copy_v3_v3_char(&varray[start+12], &mcol[i*12+9]);
725                         copy_v3_v3_char(&varray[start+15], &mcol[i*12]);
726                         index[mat_orig_to_new[f->mat_nr]] += 9;
727                 }
728         }
729 }
730
731 static void copy_mcol_uc3(unsigned char *v, unsigned char *col)
732 {
733         v[0] = col[3];
734         v[1] = col[2];
735         v[2] = col[1];
736 }
737
738 /* treat varray_ as an array of MCol, four MCol's per face */
739 static void GPU_buffer_copy_mcol(DerivedMesh *dm, float *varray_, int *index, int *mat_orig_to_new, void *user)
740 {
741         int i, totface;
742         unsigned char *varray = (unsigned char *)varray_;
743         unsigned char *mcol = (unsigned char *)user;
744         MFace *f = dm->getTessFaceArray(dm);
745
746         totface= dm->getNumTessFaces(dm);
747         for (i=0; i < totface; i++, f++) {
748                 int start = index[mat_orig_to_new[f->mat_nr]];
749
750                 /* v1 v2 v3 */
751                 copy_mcol_uc3(&varray[start], &mcol[i*16]);
752                 copy_mcol_uc3(&varray[start+3], &mcol[i*16+4]);
753                 copy_mcol_uc3(&varray[start+6], &mcol[i*16+8]);
754                 index[mat_orig_to_new[f->mat_nr]] += 9;
755
756                 if (f->v4) {
757                         /* v3 v4 v1 */
758                         copy_mcol_uc3(&varray[start+9], &mcol[i*16+8]);
759                         copy_mcol_uc3(&varray[start+12], &mcol[i*16+12]);
760                         copy_mcol_uc3(&varray[start+15], &mcol[i*16]);
761                         index[mat_orig_to_new[f->mat_nr]] += 9;
762                 }
763         }
764 }
765
766 static void GPU_buffer_copy_edge(DerivedMesh *dm, float *varray_, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
767 {
768         MEdge *medge;
769         unsigned int *varray = (unsigned int *)varray_;
770         int i, totedge;
771  
772         medge = dm->getEdgeArray(dm);
773         totedge = dm->getNumEdges(dm);
774
775         for (i = 0; i < totedge; i++, medge++) {
776                 varray[i*2] = dm->drawObject->vert_points[medge->v1].point_index;
777                 varray[i*2+1] = dm->drawObject->vert_points[medge->v2].point_index;
778         }
779 }
780
781 static void GPU_buffer_copy_uvedge(DerivedMesh *dm, float *varray, int *UNUSED(index), int *UNUSED(mat_orig_to_new), void *UNUSED(user))
782 {
783         MTFace *tf = DM_get_tessface_data_layer(dm, CD_MTFACE);
784         int i, j=0;
785
786         if (!tf)
787                 return;
788
789         for (i = 0; i < dm->numTessFaceData; i++, tf++) {
790                 MFace mf;
791                 dm->getTessFace(dm,i,&mf);
792
793                 copy_v2_v2(&varray[j],tf->uv[0]);
794                 copy_v2_v2(&varray[j+2],tf->uv[1]);
795
796                 copy_v2_v2(&varray[j+4],tf->uv[1]);
797                 copy_v2_v2(&varray[j+6],tf->uv[2]);
798
799                 if (!mf.v4) {
800                         copy_v2_v2(&varray[j+8],tf->uv[2]);
801                         copy_v2_v2(&varray[j+10],tf->uv[0]);
802                         j+=12;
803                 }
804                 else {
805                         copy_v2_v2(&varray[j+8],tf->uv[2]);
806                         copy_v2_v2(&varray[j+10],tf->uv[3]);
807
808                         copy_v2_v2(&varray[j+12],tf->uv[3]);
809                         copy_v2_v2(&varray[j+14],tf->uv[0]);
810                         j+=16;
811                 }
812         }
813 }
814
815 /* get the DerivedMesh's MCols; choose (in decreasing order of
816    preference) from CD_ID_MCOL, CD_PREVIEW_MCOL, or CD_MCOL */
817 static MCol *gpu_buffer_color_type(DerivedMesh *dm)
818 {
819         MCol *c;
820         int type;
821
822         type = CD_ID_MCOL;
823         c = DM_get_tessface_data_layer(dm, type);
824         if (!c) {
825                 type = CD_PREVIEW_MCOL;
826                 c = DM_get_tessface_data_layer(dm, type);
827                 if (!c) {
828                         type = CD_MCOL;
829                         c = DM_get_tessface_data_layer(dm, type);
830                 }
831         }
832
833         dm->drawObject->colType = type;
834         return c;
835 }
836
837 typedef enum {
838         GPU_BUFFER_VERTEX = 0,
839         GPU_BUFFER_NORMAL,
840         GPU_BUFFER_COLOR,
841         GPU_BUFFER_UV,
842         GPU_BUFFER_EDGE,
843         GPU_BUFFER_UVEDGE,
844 } GPUBufferType;
845
846 typedef struct {
847         GPUBufferCopyFunc copy;
848         GLenum gl_buffer_type;
849         int vector_size;
850 } GPUBufferTypeSettings;
851
852 const GPUBufferTypeSettings gpu_buffer_type_settings[] = {
853         {GPU_buffer_copy_vertex, GL_ARRAY_BUFFER_ARB, 3},
854         {GPU_buffer_copy_normal, GL_ARRAY_BUFFER_ARB, 3},
855         {GPU_buffer_copy_mcol, GL_ARRAY_BUFFER_ARB, 3},
856         {GPU_buffer_copy_uv, GL_ARRAY_BUFFER_ARB, 2},
857         {GPU_buffer_copy_edge, GL_ELEMENT_ARRAY_BUFFER_ARB, 2},
858         {GPU_buffer_copy_uvedge, GL_ELEMENT_ARRAY_BUFFER_ARB, 4}
859 };
860
861 /* get the GPUDrawObject buffer associated with a type */
862 static GPUBuffer **gpu_drawobject_buffer_from_type(GPUDrawObject *gdo, GPUBufferType type)
863 {
864         switch(type) {
865         case GPU_BUFFER_VERTEX:
866                 return &gdo->points;
867         case GPU_BUFFER_NORMAL:
868                 return &gdo->normals;
869         case GPU_BUFFER_COLOR:
870                 return &gdo->colors;
871         case GPU_BUFFER_UV:
872                 return &gdo->uv;
873         case GPU_BUFFER_EDGE:
874                 return &gdo->edges;
875         case GPU_BUFFER_UVEDGE:
876                 return &gdo->uvedges;
877         default:
878                 return NULL;
879         }
880 }
881
882 /* get the amount of space to allocate for a buffer of a particular type */
883 static int gpu_buffer_size_from_type(DerivedMesh *dm, GPUBufferType type)
884 {
885         switch(type) {
886         case GPU_BUFFER_VERTEX:
887                 return sizeof(float)*3 * (dm->drawObject->tot_triangle_point + dm->drawObject->tot_loose_point);
888         case GPU_BUFFER_NORMAL:
889                 return sizeof(float)*3*dm->drawObject->tot_triangle_point;
890         case GPU_BUFFER_COLOR:
891                 return sizeof(char)*3*dm->drawObject->tot_triangle_point;
892         case GPU_BUFFER_UV:
893                 return sizeof(float)*2*dm->drawObject->tot_triangle_point;
894         case GPU_BUFFER_EDGE:
895                 return sizeof(int)*2*dm->drawObject->totedge;
896         case GPU_BUFFER_UVEDGE:
897                 /* each face gets 3 points, 3 edges per triangle, and
898                    each edge has its own, non-shared coords, so each
899                    tri corner needs minimum of 4 floats, quads used
900                    less so here we can over allocate and assume all
901                    tris. */
902                 return sizeof(float) * dm->drawObject->tot_triangle_point;
903         default:
904                 return -1;
905         }
906 }
907
908 /* call gpu_buffer_setup with settings for a particular type of buffer */
909 static GPUBuffer *gpu_buffer_setup_type(DerivedMesh *dm, GPUBufferType type)
910 {
911         const GPUBufferTypeSettings *ts;
912         void *user_data = NULL;
913         GPUBuffer *buf;
914
915         ts = &gpu_buffer_type_settings[type];
916
917         /* special handling for MCol and UV buffers */
918         if (type == GPU_BUFFER_COLOR) {
919                 if (!(user_data = gpu_buffer_color_type(dm)))
920                         return NULL;
921         }
922         else if (type == GPU_BUFFER_UV) {
923                 if (!DM_get_tessface_data_layer(dm, CD_MTFACE))
924                         return NULL;
925         }
926
927         buf = gpu_buffer_setup(dm, dm->drawObject, ts->vector_size,
928                                gpu_buffer_size_from_type(dm, type),
929                                ts->gl_buffer_type, user_data, ts->copy);
930
931         return buf;
932 }
933
934 /* get the buffer of `type', initializing the GPUDrawObject and
935    buffer if needed */
936 static GPUBuffer *gpu_buffer_setup_common(DerivedMesh *dm, GPUBufferType type)
937 {
938         GPUBuffer **buf;
939         
940         if (!dm->drawObject)
941                 dm->drawObject = GPU_drawobject_new(dm);
942
943         buf = gpu_drawobject_buffer_from_type(dm->drawObject, type);
944         if (!(*buf))
945                 *buf = gpu_buffer_setup_type(dm, type);
946
947         return *buf;
948 }
949
950 void GPU_vertex_setup(DerivedMesh *dm)
951 {
952         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
953                 return;
954
955         glEnableClientState(GL_VERTEX_ARRAY);
956         if (useVBOs) {
957                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
958                 glVertexPointer(3, GL_FLOAT, 0, 0);
959         }
960         else {
961                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
962         }
963         
964         GLStates |= GPU_BUFFER_VERTEX_STATE;
965 }
966
967 void GPU_normal_setup(DerivedMesh *dm)
968 {
969         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_NORMAL))
970                 return;
971
972         glEnableClientState(GL_NORMAL_ARRAY);
973         if (useVBOs) {
974                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->normals->id);
975                 glNormalPointer(GL_FLOAT, 0, 0);
976         }
977         else {
978                 glNormalPointer(GL_FLOAT, 0, dm->drawObject->normals->pointer);
979         }
980
981         GLStates |= GPU_BUFFER_NORMAL_STATE;
982 }
983
984 void GPU_uv_setup(DerivedMesh *dm)
985 {
986         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UV))
987                 return;
988
989         glEnableClientState(GL_TEXTURE_COORD_ARRAY);
990         if (useVBOs) {
991                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uv->id);
992                 glTexCoordPointer(2, GL_FLOAT, 0, 0);
993         }
994         else {
995                 glTexCoordPointer(2, GL_FLOAT, 0, dm->drawObject->uv->pointer);
996         }
997
998         GLStates |= GPU_BUFFER_TEXCOORD_STATE;
999 }
1000
1001 void GPU_color_setup(DerivedMesh *dm)
1002 {
1003         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_COLOR))
1004                 return;
1005
1006         glEnableClientState(GL_COLOR_ARRAY);
1007         if (useVBOs) {
1008                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->colors->id);
1009                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, 0);
1010         }
1011         else {
1012                 glColorPointer(3, GL_UNSIGNED_BYTE, 0, dm->drawObject->colors->pointer);
1013         }
1014
1015         GLStates |= GPU_BUFFER_COLOR_STATE;
1016 }
1017
1018 void GPU_edge_setup(DerivedMesh *dm)
1019 {
1020         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_EDGE))
1021                 return;
1022
1023         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_VERTEX))
1024                 return;
1025
1026         glEnableClientState(GL_VERTEX_ARRAY);
1027         if (useVBOs) {
1028                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->points->id);
1029                 glVertexPointer(3, GL_FLOAT, 0, 0);
1030         }
1031         else {
1032                 glVertexPointer(3, GL_FLOAT, 0, dm->drawObject->points->pointer);
1033         }
1034         
1035         GLStates |= GPU_BUFFER_VERTEX_STATE;
1036
1037         if (useVBOs)
1038                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, dm->drawObject->edges->id);
1039
1040         GLStates |= GPU_BUFFER_ELEMENT_STATE;
1041 }
1042
1043 void GPU_uvedge_setup(DerivedMesh *dm)
1044 {
1045         if (!gpu_buffer_setup_common(dm, GPU_BUFFER_UVEDGE))
1046                 return;
1047
1048         glEnableClientState(GL_VERTEX_ARRAY);
1049         if (useVBOs) {
1050                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, dm->drawObject->uvedges->id);
1051                 glVertexPointer(2, GL_FLOAT, 0, 0);
1052         }
1053         else {
1054                 glVertexPointer(2, GL_FLOAT, 0, dm->drawObject->uvedges->pointer);
1055         }
1056         
1057         GLStates |= GPU_BUFFER_VERTEX_STATE;
1058 }
1059
1060 static int GPU_typesize(int type)
1061 {
1062         switch(type) {
1063         case GL_FLOAT:
1064                 return sizeof(float);
1065         case GL_INT:
1066                 return sizeof(int);
1067         case GL_UNSIGNED_INT:
1068                 return sizeof(unsigned int);
1069         case GL_BYTE:
1070                 return sizeof(char);
1071         case GL_UNSIGNED_BYTE:
1072                 return sizeof(unsigned char);
1073         default:
1074                 return 0;
1075         }
1076 }
1077
1078 int GPU_attrib_element_size(GPUAttrib data[], int numdata)
1079 {
1080         int i, elementsize = 0;
1081
1082         for (i = 0; i < numdata; i++) {
1083                 int typesize = GPU_typesize(data[i].type);
1084                 if (typesize != 0)
1085                         elementsize += typesize*data[i].size;
1086         }
1087         return elementsize;
1088 }
1089
1090 void GPU_interleaved_attrib_setup(GPUBuffer *buffer, GPUAttrib data[], int numdata)
1091 {
1092         int i;
1093         int elementsize;
1094         intptr_t offset = 0;
1095
1096         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1097                 if (attribData[i].index != -1) {
1098                         glDisableVertexAttribArrayARB(attribData[i].index);
1099                 }
1100                 else
1101                         break;
1102         }
1103         elementsize = GPU_attrib_element_size(data, numdata);
1104
1105         if (useVBOs) {
1106                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1107                 for (i = 0; i < numdata; i++) {
1108                         glEnableVertexAttribArrayARB(data[i].index);
1109                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1110                                                  GL_FALSE, elementsize, (void *)offset);
1111                         offset += data[i].size*GPU_typesize(data[i].type);
1112
1113                         attribData[i].index = data[i].index;
1114                         attribData[i].size = data[i].size;
1115                         attribData[i].type = data[i].type;
1116                 }
1117                 attribData[numdata].index = -1;
1118         }
1119         else {
1120                 for (i = 0; i < numdata; i++) {
1121                         glEnableVertexAttribArrayARB(data[i].index);
1122                         glVertexAttribPointerARB(data[i].index, data[i].size, data[i].type,
1123                                                  GL_FALSE, elementsize, (char *)buffer->pointer + offset);
1124                         offset += data[i].size*GPU_typesize(data[i].type);
1125                 }
1126         }
1127 }
1128
1129
1130 void GPU_buffer_unbind(void)
1131 {
1132         int i;
1133
1134         if (GLStates & GPU_BUFFER_VERTEX_STATE)
1135                 glDisableClientState(GL_VERTEX_ARRAY);
1136         if (GLStates & GPU_BUFFER_NORMAL_STATE)
1137                 glDisableClientState(GL_NORMAL_ARRAY);
1138         if (GLStates & GPU_BUFFER_TEXCOORD_STATE)
1139                 glDisableClientState(GL_TEXTURE_COORD_ARRAY);
1140         if (GLStates & GPU_BUFFER_COLOR_STATE)
1141                 glDisableClientState(GL_COLOR_ARRAY);
1142         if (GLStates & GPU_BUFFER_ELEMENT_STATE) {
1143                 if (useVBOs) {
1144                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1145                 }
1146         }
1147         GLStates &= !(GPU_BUFFER_VERTEX_STATE | GPU_BUFFER_NORMAL_STATE |
1148                       GPU_BUFFER_TEXCOORD_STATE | GPU_BUFFER_COLOR_STATE |
1149                       GPU_BUFFER_ELEMENT_STATE);
1150
1151         for (i = 0; i < MAX_GPU_ATTRIB_DATA; i++) {
1152                 if (attribData[i].index != -1) {
1153                         glDisableVertexAttribArrayARB(attribData[i].index);
1154                 }
1155                 else
1156                         break;
1157         }
1158
1159         if (useVBOs)
1160                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1161 }
1162
1163 /* confusion: code in cdderivedmesh calls both GPU_color_setup and
1164    GPU_color3_upload; both of these set the `colors' buffer, so seems
1165    like it will just needlessly overwrite? --nicholas */
1166 void GPU_color3_upload(DerivedMesh *dm, unsigned char *data)
1167 {
1168         if (dm->drawObject == 0)
1169                 dm->drawObject = GPU_drawobject_new(dm);
1170         GPU_buffer_free(dm->drawObject->colors);
1171
1172         dm->drawObject->colors = gpu_buffer_setup(dm, dm->drawObject, 3,
1173                                                   sizeof(char)*3*dm->drawObject->tot_triangle_point,
1174                                                   GL_ARRAY_BUFFER_ARB, data, GPU_buffer_copy_color3);
1175 }
1176
1177 void GPU_color_switch(int mode)
1178 {
1179         if (mode) {
1180                 if (!(GLStates & GPU_BUFFER_COLOR_STATE))
1181                         glEnableClientState(GL_COLOR_ARRAY);
1182                 GLStates |= GPU_BUFFER_COLOR_STATE;
1183         }
1184         else {
1185                 if (GLStates & GPU_BUFFER_COLOR_STATE)
1186                         glDisableClientState(GL_COLOR_ARRAY);
1187                 GLStates &= (!GPU_BUFFER_COLOR_STATE);
1188         }
1189 }
1190
1191 /* return 1 if drawing should be done using old immediate-mode
1192    code, 0 otherwise */
1193 int GPU_buffer_legacy(DerivedMesh *dm)
1194 {
1195         int test= (U.gameflags & USER_DISABLE_VBO);
1196         if (test)
1197                 return 1;
1198
1199         if (dm->drawObject == 0)
1200                 dm->drawObject = GPU_drawobject_new(dm);
1201         return dm->drawObject->legacy;
1202 }
1203
1204 void *GPU_buffer_lock(GPUBuffer *buffer)
1205 {
1206         float *varray;
1207
1208         if (!buffer)
1209                 return 0;
1210
1211         if (useVBOs) {
1212                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1213                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1214                 return varray;
1215         }
1216         else {
1217                 return buffer->pointer;
1218         }
1219 }
1220
1221 void *GPU_buffer_lock_stream(GPUBuffer *buffer)
1222 {
1223         float *varray;
1224
1225         if (!buffer)
1226                 return 0;
1227
1228         if (useVBOs) {
1229                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffer->id);
1230                 /* discard previous data, avoid stalling gpu */
1231                 glBufferDataARB(GL_ARRAY_BUFFER_ARB, buffer->size, 0, GL_STREAM_DRAW_ARB);
1232                 varray = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1233                 return varray;
1234         }
1235         else {
1236                 return buffer->pointer;
1237         }
1238 }
1239
1240 void GPU_buffer_unlock(GPUBuffer *buffer)
1241 {
1242         if (useVBOs) {
1243                 if (buffer) {
1244                         /* note: this operation can fail, could return
1245                            an error code from this function? */
1246                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1247                 }
1248                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1249         }
1250 }
1251
1252 /* used for drawing edges */
1253 void GPU_buffer_draw_elements(GPUBuffer *elements, unsigned int mode, int start, int count)
1254 {
1255         glDrawElements(mode, count, GL_UNSIGNED_INT,
1256                        (useVBOs ?
1257                         (void*)(start * sizeof(unsigned int)) :
1258                         ((int*)elements->pointer) + start));
1259 }
1260
1261
1262 /* XXX: the rest of the code in this file is used for optimized PBVH
1263    drawing and doesn't interact at all with the buffer code above */
1264
1265 /* Convenience struct for building the VBO. */
1266 typedef struct {
1267         float co[3];
1268         short no[3];
1269 } VertexBufferFormat;
1270
1271 struct GPU_Buffers {
1272         /* opengl buffer handles */
1273         GLuint vert_buf, index_buf;
1274         GLenum index_type;
1275
1276         /* mesh pointers in case buffer allocation fails */
1277         MFace *mface;
1278         MVert *mvert;
1279         int *face_indices;
1280         int totface;
1281
1282         /* grid pointers */
1283         DMGridData **grids;
1284         const DMFlagMat *grid_flag_mats;
1285         const BLI_bitmap *grid_hidden;
1286         int *grid_indices;
1287         int totgrid;
1288         int gridsize;
1289         int has_hidden;
1290
1291         unsigned int tot_tri, tot_quad;
1292 };
1293
1294 void GPU_update_mesh_buffers(GPU_Buffers *buffers, MVert *mvert,
1295                         int *vert_indices, int totvert)
1296 {
1297         VertexBufferFormat *vert_data;
1298         int i;
1299
1300         if (buffers->vert_buf) {
1301                 /* Build VBO */
1302                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1303                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1304                                 sizeof(VertexBufferFormat) * totvert,
1305                                 NULL, GL_STATIC_DRAW_ARB);
1306                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1307
1308                 if (vert_data) {
1309                         for (i = 0; i < totvert; ++i) {
1310                                 MVert *v = mvert + vert_indices[i];
1311                                 VertexBufferFormat *out = vert_data + i;
1312
1313                                 copy_v3_v3(out->co, v->co);
1314                                 memcpy(out->no, v->no, sizeof(short) * 3);
1315                         }
1316
1317                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1318                 }
1319                 else {
1320                         glDeleteBuffersARB(1, &buffers->vert_buf);
1321                         buffers->vert_buf = 0;
1322                 }
1323
1324                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1325         }
1326
1327         buffers->mvert = mvert;
1328 }
1329
1330 GPU_Buffers *GPU_build_mesh_buffers(int (*face_vert_indices)[4],
1331                                                                         MFace *mface, MVert *mvert,
1332                                                                         int *face_indices,
1333                                                                         int totface)
1334 {
1335         GPU_Buffers *buffers;
1336         unsigned short *tri_data;
1337         int i, j, k, tottri;
1338
1339         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1340         buffers->index_type = GL_UNSIGNED_SHORT;
1341
1342         /* Count the number of visible triangles */
1343         for (i = 0, tottri = 0; i < totface; ++i) {
1344                 const MFace *f = &mface[face_indices[i]];
1345                 if (!paint_is_face_hidden(f, mvert))
1346                         tottri += f->v4 ? 2 : 1;
1347         }
1348         
1349         if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO))
1350                 glGenBuffersARB(1, &buffers->index_buf);
1351
1352         if (buffers->index_buf) {
1353                 /* Generate index buffer object */
1354                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1355                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,
1356                                 sizeof(unsigned short) * tottri * 3, NULL, GL_STATIC_DRAW_ARB);
1357
1358                 /* Fill the triangle buffer */
1359                 tri_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1360                 if (tri_data) {
1361                         for (i = 0; i < totface; ++i) {
1362                                 const MFace *f = mface + face_indices[i];
1363                                 int v[3];
1364
1365                                 /* Skip hidden faces */
1366                                 if (paint_is_face_hidden(f, mvert))
1367                                         continue;
1368
1369                                 v[0]= 0;
1370                                 v[1]= 1;
1371                                 v[2]= 2;
1372
1373                                 for (j = 0; j < (f->v4 ? 2 : 1); ++j) {
1374                                         for (k = 0; k < 3; ++k) {
1375                                                 *tri_data = face_vert_indices[i][v[k]];
1376                                                 ++tri_data;
1377                                         }
1378                                         v[0] = 3;
1379                                         v[1] = 0;
1380                                         v[2] = 2;
1381                                 }
1382                         }
1383                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);
1384                 }
1385                 else {
1386                         glDeleteBuffersARB(1, &buffers->index_buf);
1387                         buffers->index_buf = 0;
1388                 }
1389
1390                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1391         }
1392
1393         if (buffers->index_buf)
1394                 glGenBuffersARB(1, &buffers->vert_buf);
1395
1396         buffers->tot_tri = tottri;
1397
1398         buffers->mface = mface;
1399         buffers->face_indices = face_indices;
1400         buffers->totface = totface;
1401
1402         return buffers;
1403 }
1404
1405 void GPU_update_grid_buffers(GPU_Buffers *buffers, DMGridData **grids,
1406         const DMFlagMat *grid_flag_mats, int *grid_indices, int totgrid, int gridsize)
1407 {
1408         DMGridData *vert_data;
1409         int i, j, k, totvert;
1410
1411         totvert= gridsize*gridsize*totgrid;
1412
1413         /* Build VBO */
1414         if (buffers->vert_buf) {
1415                 int smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
1416
1417                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1418                 glBufferDataARB(GL_ARRAY_BUFFER_ARB,
1419                                 sizeof(DMGridData) * totvert,
1420                                 NULL, GL_STATIC_DRAW_ARB);
1421                 vert_data = glMapBufferARB(GL_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB);
1422                 if (vert_data) {
1423                         for (i = 0; i < totgrid; ++i) {
1424                                 DMGridData *grid= grids[grid_indices[i]];
1425                                 memcpy(vert_data, grid, sizeof(DMGridData)*gridsize*gridsize);
1426
1427                                 if (!smooth) {
1428                                         /* for flat shading, recalc normals and set the last vertex of
1429                                            each quad in the index buffer to have the flat normal as
1430                                            that is what opengl will use */
1431                                         for (j = 0; j < gridsize-1; ++j) {
1432                                                 for (k = 0; k < gridsize-1; ++k) {
1433                                                         float fno[3];
1434                                                         normal_quad_v3(fno,
1435                                                                 grid[(j+1)*gridsize + k].co,
1436                                                                 grid[(j+1)*gridsize + k+1].co,
1437                                                                 grid[j*gridsize + k+1].co,
1438                                                                 grid[j*gridsize + k].co);
1439
1440                                                         copy_v3_v3(vert_data[(j+1)*gridsize + (k+1)].no, fno);
1441                                                 }
1442                                         }
1443                                 }
1444
1445                                 vert_data += gridsize*gridsize;
1446                         }
1447                         glUnmapBufferARB(GL_ARRAY_BUFFER_ARB);
1448                 }
1449                 else {
1450                         glDeleteBuffersARB(1, &buffers->vert_buf);
1451                         buffers->vert_buf = 0;
1452                 }
1453                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1454         }
1455
1456         buffers->grids = grids;
1457         buffers->grid_indices = grid_indices;
1458         buffers->totgrid = totgrid;
1459         buffers->gridsize = gridsize;
1460         buffers->grid_flag_mats = grid_flag_mats;
1461
1462         //printf("node updated %p\n", buffers);
1463 }
1464
1465 /* Returns the number of visible quads in the nodes' grids. */
1466 static int gpu_count_grid_quads(BLI_bitmap *grid_hidden,
1467                                                                 int *grid_indices, int totgrid,
1468                                                                 int gridsize)
1469 {
1470         int gridarea = (gridsize-1) * (gridsize-1);
1471         int i, x, y, totquad;
1472
1473         /* grid hidden layer is present, so have to check each grid for
1474            visiblity */
1475
1476         for (i = 0, totquad = 0; i < totgrid; i++) {
1477                 const BLI_bitmap gh = grid_hidden[grid_indices[i]];
1478
1479                 if (gh) {
1480                         /* grid hidden are present, have to check each element */
1481                         for (y = 0; y < gridsize-1; y++) {
1482                                 for (x = 0; x < gridsize-1; x++) {
1483                                         if (!paint_is_grid_face_hidden(gh, gridsize, x, y))
1484                                                 totquad++;
1485                                 }
1486                         }
1487                 }
1488                 else
1489                         totquad += gridarea;
1490         }
1491
1492         return totquad;
1493 }
1494
1495 /* Build the element array buffer of grid indices using either
1496    unsigned shorts or unsigned ints. */
1497 #define FILL_QUAD_BUFFER(type_, tot_quad_, buffer_)                                             \
1498         {                                                                   \
1499                 type_ *quad_data;                                               \
1500                 int offset = 0;                                                 \
1501         int i, j, k;                                                    \
1502                                                                         \
1503                 glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB,                    \
1504                                                 sizeof(type_) * (tot_quad_) * 4, NULL,                  \
1505                                                 GL_STATIC_DRAW_ARB);                            \
1506                                                                         \
1507                 /* Fill the quad buffer */                                      \
1508                 quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB,         \
1509                                                                    GL_WRITE_ONLY_ARB);                  \
1510                 if (quad_data) {                                                 \
1511                         for (i = 0; i < totgrid; ++i) {                                                         \
1512                                 BLI_bitmap gh = NULL;                                                                   \
1513                                 if (grid_hidden)                                                                                        \
1514                                         gh = grid_hidden[(grid_indices)[i]];                            \
1515                                                                                                                                                 \
1516                                 for (j = 0; j < gridsize-1; ++j) {                       \
1517                                         for (k = 0; k < gridsize-1; ++k) {                   \
1518                                                 /* Skip hidden grid face */                                             \
1519                                                 if (gh &&                                                                               \
1520                                                    paint_is_grid_face_hidden(gh,                                \
1521                                                                                                          gridsize, k, j))       \
1522                                                         continue;                                                                       \
1523                                                                                                                                                 \
1524                                                 *(quad_data++)= offset + j*gridsize + k+1;      \
1525                                                 *(quad_data++)= offset + j*gridsize + k;        \
1526                                                 *(quad_data++)= offset + (j+1)*gridsize + k;    \
1527                                                 *(quad_data++)= offset + (j+1)*gridsize + k+1;  \
1528                                         }                                                   \
1529                                 }                                                       \
1530                                                                                                                                                 \
1531                                 offset += gridsize*gridsize;                            \
1532                         }                                                           \
1533                         glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB);              \
1534                 }                                                               \
1535                 else {                                                          \
1536                         glDeleteBuffersARB(1, &(buffer_));                                                      \
1537                         (buffer_) = 0;                                                                                          \
1538                 }                                                               \
1539         }
1540 /* end FILL_QUAD_BUFFER */
1541
1542 static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
1543 {
1544         static int prev_gridsize = -1;
1545         static GLenum prev_index_type = 0;
1546         static GLuint buffer = 0;
1547         static unsigned prev_totquad;
1548
1549         /* used in the FILL_QUAD_BUFFER macro */
1550         const BLI_bitmap *grid_hidden = NULL;
1551         int *grid_indices = NULL;
1552         int totgrid = 1;
1553
1554         /* VBO is disabled; delete the previous buffer (if it exists) and
1555            return an invalid handle */
1556         if (!GLEW_ARB_vertex_buffer_object || (U.gameflags & USER_DISABLE_VBO)) {
1557                 if (buffer)
1558                         glDeleteBuffersARB(1, &buffer);
1559                 return 0;
1560         }
1561
1562         /* VBO is already built */
1563         if (buffer && prev_gridsize == gridsize) {
1564                 *index_type = prev_index_type;
1565                 *totquad = prev_totquad;
1566                 return buffer;
1567         }
1568
1569         /* Build new VBO */
1570         glGenBuffersARB(1, &buffer);
1571         if (buffer) {
1572                 *totquad= (gridsize-1)*(gridsize-1);
1573
1574                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
1575
1576                 if (gridsize * gridsize < USHRT_MAX) {
1577                         *index_type = GL_UNSIGNED_SHORT;
1578                         FILL_QUAD_BUFFER(unsigned short, *totquad, buffer);
1579                 }
1580                 else {
1581                         *index_type = GL_UNSIGNED_INT;
1582                         FILL_QUAD_BUFFER(unsigned int, *totquad, buffer);
1583                 }
1584
1585                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1586         }
1587
1588         prev_gridsize = gridsize;
1589         prev_index_type = *index_type;
1590         prev_totquad = *totquad;
1591         return buffer;
1592 }
1593
1594 GPU_Buffers *GPU_build_grid_buffers(int *grid_indices, int totgrid,
1595                                                                         BLI_bitmap *grid_hidden, int gridsize)
1596 {
1597         GPU_Buffers *buffers;
1598         int totquad;
1599         int fully_visible_totquad = (gridsize-1) * (gridsize-1) * totgrid;
1600
1601         buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
1602         buffers->grid_hidden = grid_hidden;
1603         buffers->gridsize = gridsize;
1604         buffers->totgrid = totgrid;
1605
1606         /* Count the number of quads */
1607         totquad= gpu_count_grid_quads(grid_hidden, grid_indices, totgrid, gridsize);
1608
1609         if (totquad == fully_visible_totquad) {
1610                 buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
1611                 buffers->has_hidden = 0;
1612         }
1613         else if (GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO)) {
1614                 /* Build new VBO */
1615                 glGenBuffersARB(1, &buffers->index_buf);
1616                 if (buffers->index_buf) {
1617                         buffers->tot_quad= totquad;
1618
1619                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1620
1621                         if (totgrid * gridsize * gridsize < USHRT_MAX) {
1622                                 buffers->index_type = GL_UNSIGNED_SHORT;
1623                                 FILL_QUAD_BUFFER(unsigned short, totquad, buffers->index_buf);
1624                         }
1625                         else {
1626                                 buffers->index_type = GL_UNSIGNED_INT;
1627                                 FILL_QUAD_BUFFER(unsigned int, totquad, buffers->index_buf);
1628                         }
1629
1630                         glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1631                 }
1632
1633                 buffers->has_hidden = 1;
1634         }
1635
1636         /* Build coord/normal VBO */
1637         if (buffers->index_buf)
1638                 glGenBuffersARB(1, &buffers->vert_buf);
1639
1640         return buffers;
1641 }
1642
1643 #undef FILL_QUAD_BUFFER
1644
1645 static void gpu_draw_buffers_legacy_mesh(GPU_Buffers *buffers, int smooth)
1646 {
1647         const MVert *mvert = buffers->mvert;
1648         int i, j;
1649
1650         for (i = 0; i < buffers->totface; ++i) {
1651                 MFace *f = buffers->mface + buffers->face_indices[i];
1652                 int S = f->v4 ? 4 : 3;
1653                 unsigned int *fv = &f->v1;
1654
1655                 if (paint_is_face_hidden(f, buffers->mvert))
1656                         continue;
1657
1658                 glBegin((f->v4)? GL_QUADS: GL_TRIANGLES);
1659
1660                 if (smooth) {
1661                         for (j = 0; j < S; j++) {
1662                                 glNormal3sv(mvert[fv[j]].no);
1663                                 glVertex3fv(mvert[fv[j]].co);
1664                         }
1665                 }
1666                 else {
1667                         float fno[3];
1668
1669                         /* calculate face normal */
1670                         if (f->v4) {
1671                                 normal_quad_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co,
1672                                                            mvert[fv[2]].co, mvert[fv[3]].co);
1673                         }
1674                         else
1675                                 normal_tri_v3(fno, mvert[fv[0]].co, mvert[fv[1]].co, mvert[fv[2]].co);
1676                         glNormal3fv(fno);
1677                         
1678                         for (j = 0; j < S; j++)
1679                                 glVertex3fv(mvert[fv[j]].co);
1680                 }
1681                 
1682                 glEnd();
1683         }
1684 }
1685
1686 static void gpu_draw_buffers_legacy_grids(GPU_Buffers *buffers, int smooth)
1687 {
1688         int i, j, x, y, gridsize = buffers->gridsize;
1689
1690         for (i = 0; i < buffers->totgrid; ++i) {
1691                 int g = buffers->grid_indices[i];
1692                 const DMGridData *grid = buffers->grids[g];
1693                 BLI_bitmap gh = buffers->grid_hidden[g];
1694
1695                 /* TODO: could use strips with hiding as well */
1696
1697                 if (gh) {
1698                         glBegin(GL_QUADS);
1699                         
1700                         for (y = 0; y < gridsize-1; y++) {
1701                                 for (x = 0; x < gridsize-1; x++) {
1702                                         const DMGridData *e[4] = {
1703                                                 &grid[y*gridsize + x],
1704                                                 &grid[(y+1)*gridsize + x],
1705                                                 &grid[(y+1)*gridsize + x+1],
1706                                                 &grid[y*gridsize + x+1]
1707                                         };
1708
1709                                         /* skip face if any of its corners are hidden */
1710                                         if (paint_is_grid_face_hidden(gh, gridsize, x, y))
1711                                                 continue;
1712
1713                                         if (smooth) {
1714                                                 for (j = 0; j < 4; j++) {
1715                                                         glNormal3fv(e[j]->no);
1716                                                         glVertex3fv(e[j]->co);
1717                                                 }
1718                                         }
1719                                         else {
1720                                                 float fno[3];
1721                                                 normal_quad_v3(fno, e[0]->co, e[1]->co, e[2]->co, e[3]->co);
1722                                                 glNormal3fv(fno);
1723
1724                                                 for (j = 0; j < 4; j++)
1725                                                         glVertex3fv(e[j]->co);
1726                                         }
1727                                 }
1728                         }
1729
1730                         glEnd();
1731                 }
1732                 else if (smooth) {
1733                         for (y = 0; y < gridsize-1; y++) {
1734                                 glBegin(GL_QUAD_STRIP);
1735                                 for (x = 0; x < gridsize; x++) {
1736                                         const DMGridData *a = &grid[y*gridsize + x];
1737                                         const DMGridData *b = &grid[(y+1)*gridsize + x];
1738
1739                                         glNormal3fv(a->no);
1740                                         glVertex3fv(a->co);
1741                                         glNormal3fv(b->no);
1742                                         glVertex3fv(b->co);
1743                                 }
1744                                 glEnd();
1745                         }
1746                 }
1747                 else {
1748                         for (y = 0; y < gridsize-1; y++) {
1749                                 glBegin(GL_QUAD_STRIP);
1750                                 for (x = 0; x < gridsize; x++) {
1751                                         const DMGridData *a = &grid[y*gridsize + x];
1752                                         const DMGridData *b = &grid[(y+1)*gridsize + x];
1753
1754                                         if (x > 0) {
1755                                                 const DMGridData *c = &grid[y*gridsize + x-1];
1756                                                 const DMGridData *d = &grid[(y+1)*gridsize + x-1];
1757                                                 float fno[3];
1758                                                 normal_quad_v3(fno, d->co, b->co, a->co, c->co);
1759                                                 glNormal3fv(fno);
1760                                         }
1761
1762                                         glVertex3fv(a->co);
1763                                         glVertex3fv(b->co);
1764                                 }
1765                                 glEnd();
1766                         }
1767                 }
1768         }
1769 }
1770
1771 void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial)
1772 {
1773         int smooth = 0;
1774
1775         if (buffers->totface) {
1776                 const MFace *f = &buffers->mface[buffers->face_indices[0]];
1777                 if (!setMaterial(f->mat_nr+1, NULL))
1778                         return;
1779
1780                 smooth = f->flag & ME_SMOOTH;
1781                 glShadeModel(smooth ? GL_SMOOTH: GL_FLAT);
1782         }
1783         else if (buffers->totgrid) {
1784                 const DMFlagMat *f = &buffers->grid_flag_mats[buffers->grid_indices[0]];
1785                 if (!setMaterial(f->mat_nr+1, NULL))
1786                         return;
1787
1788                 smooth = f->flag & ME_SMOOTH;
1789                 glShadeModel(smooth ? GL_SMOOTH: GL_FLAT);
1790         }
1791
1792         if (buffers->vert_buf && buffers->index_buf) {
1793                 glEnableClientState(GL_VERTEX_ARRAY);
1794                 glEnableClientState(GL_NORMAL_ARRAY);
1795
1796                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, buffers->vert_buf);
1797                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
1798
1799                 if (buffers->tot_quad) {
1800                         unsigned offset = 0;
1801                         int i, last = buffers->has_hidden ? 1 : buffers->totgrid;
1802                         for (i = 0; i < last; i++) {
1803                                 glVertexPointer(3, GL_FLOAT, sizeof(DMGridData), offset + (char*)offsetof(DMGridData, co));
1804                                 glNormalPointer(GL_FLOAT, sizeof(DMGridData), offset + (char*)offsetof(DMGridData, no));
1805                                 
1806                                 glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
1807
1808                                 offset += buffers->gridsize * buffers->gridsize * sizeof(DMGridData);
1809                         }
1810                 }
1811                 else {
1812                         glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat), (void*)offsetof(VertexBufferFormat, co));
1813                         glNormalPointer(GL_SHORT, sizeof(VertexBufferFormat), (void*)offsetof(VertexBufferFormat, no));
1814
1815                         glDrawElements(GL_TRIANGLES, buffers->tot_tri * 3, buffers->index_type, 0);
1816                 }
1817
1818                 glBindBufferARB(GL_ARRAY_BUFFER_ARB, 0);
1819                 glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
1820
1821                 glDisableClientState(GL_VERTEX_ARRAY);
1822                 glDisableClientState(GL_NORMAL_ARRAY);
1823         }
1824         /* fallbacks if we are out of memory or VBO is disabled */
1825         else if (buffers->totface) {
1826                 gpu_draw_buffers_legacy_mesh(buffers, smooth);
1827         }
1828         else if (buffers->totgrid) {
1829                 gpu_draw_buffers_legacy_grids(buffers, smooth);
1830         }
1831 }
1832
1833 void GPU_free_buffers(GPU_Buffers *buffers)
1834 {
1835         if (buffers) {
1836                 if (buffers->vert_buf)
1837                         glDeleteBuffersARB(1, &buffers->vert_buf);
1838                 if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
1839                         glDeleteBuffersARB(1, &buffers->index_buf);
1840
1841                 MEM_freeN(buffers);
1842         }
1843 }