cb2cad8a36e493d75869ebcbc0ef6fa4c78642bd
[blender.git] / source / blender / draw / intern / draw_manager_exec.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager_exec.c
23  *  \ingroup draw
24  */
25
26 #include "draw_manager.h"
27
28 #include "BLI_mempool.h"
29
30 #include "BIF_glutil.h"
31
32 #include "BKE_global.h"
33 #include "BKE_object.h"
34
35 #include "GPU_draw.h"
36 #include "GPU_extensions.h"
37
38 #ifdef USE_GPU_SELECT
39 #  include "ED_view3d.h"
40 #  include "ED_armature.h"
41 #  include "GPU_select.h"
42 #endif
43
44 #ifdef USE_GPU_SELECT
45 void DRW_select_load_id(uint id)
46 {
47         BLI_assert(G.f & G_PICKSEL);
48         DST.select_id = id;
49 }
50 #endif
51
52 struct GPUUniformBuffer *view_ubo;
53
54 /* -------------------------------------------------------------------- */
55
56 /** \name Draw State (DRW_state)
57  * \{ */
58
59 void drw_state_set(DRWState state)
60 {
61         if (DST.state == state) {
62                 return;
63         }
64
65 #define CHANGED_TO(f) \
66         ((DST.state_lock & (f)) ? 0 : \
67          (((DST.state & (f)) ? \
68            ((state & (f)) ?  0 : -1) : \
69            ((state & (f)) ?  1 :  0))))
70
71 #define CHANGED_ANY(f) \
72         (((DST.state & (f)) != (state & (f))) && \
73          ((DST.state_lock & (f)) == 0))
74
75 #define CHANGED_ANY_STORE_VAR(f, enabled) \
76         (((DST.state & (f)) != (enabled = (state & (f)))) && \
77          (((DST.state_lock & (f)) == 0)))
78
79         /* Depth Write */
80         {
81                 int test;
82                 if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
83                         if (test == 1) {
84                                 glDepthMask(GL_TRUE);
85                         }
86                         else {
87                                 glDepthMask(GL_FALSE);
88                         }
89                 }
90         }
91
92         /* Color Write */
93         {
94                 int test;
95                 if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
96                         if (test == 1) {
97                                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
98                         }
99                         else {
100                                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
101                         }
102                 }
103         }
104
105         /* Raster Discard */
106         {
107                 if (CHANGED_ANY(DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR |
108                                 DRW_STATE_WRITE_STENCIL |
109                                 DRW_STATE_WRITE_STENCIL_SHADOW_PASS |
110                                 DRW_STATE_WRITE_STENCIL_SHADOW_FAIL))
111                 {
112                         if ((state & (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR |
113                                       DRW_STATE_WRITE_STENCIL | DRW_STATE_WRITE_STENCIL_SHADOW_PASS |
114                                       DRW_STATE_WRITE_STENCIL_SHADOW_FAIL)) != 0)
115                         {
116                                 glDisable(GL_RASTERIZER_DISCARD);
117                         }
118                         else {
119                                 glEnable(GL_RASTERIZER_DISCARD);
120                         }
121                 }
122         }
123
124         /* Cull */
125         {
126                 DRWState test;
127                 if (CHANGED_ANY_STORE_VAR(
128                         DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
129                         test))
130                 {
131                         if (test) {
132                                 glEnable(GL_CULL_FACE);
133
134                                 if ((state & DRW_STATE_CULL_BACK) != 0) {
135                                         glCullFace(GL_BACK);
136                                 }
137                                 else if ((state & DRW_STATE_CULL_FRONT) != 0) {
138                                         glCullFace(GL_FRONT);
139                                 }
140                                 else {
141                                         BLI_assert(0);
142                                 }
143                         }
144                         else {
145                                 glDisable(GL_CULL_FACE);
146                         }
147                 }
148         }
149
150         /* Depth Test */
151         {
152                 DRWState test;
153                 if (CHANGED_ANY_STORE_VAR(
154                         DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_DEPTH_EQUAL |
155                         DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_DEPTH_ALWAYS,
156                         test))
157                 {
158                         if (test) {
159                                 glEnable(GL_DEPTH_TEST);
160
161                                 if (state & DRW_STATE_DEPTH_LESS) {
162                                         glDepthFunc(GL_LESS);
163                                 }
164                                 else if (state & DRW_STATE_DEPTH_LESS_EQUAL) {
165                                         glDepthFunc(GL_LEQUAL);
166                                 }
167                                 else if (state & DRW_STATE_DEPTH_EQUAL) {
168                                         glDepthFunc(GL_EQUAL);
169                                 }
170                                 else if (state & DRW_STATE_DEPTH_GREATER) {
171                                         glDepthFunc(GL_GREATER);
172                                 }
173                                 else if (state & DRW_STATE_DEPTH_GREATER_EQUAL) {
174                                         glDepthFunc(GL_GEQUAL);
175                                 }
176                                 else if (state & DRW_STATE_DEPTH_ALWAYS) {
177                                         glDepthFunc(GL_ALWAYS);
178                                 }
179                                 else {
180                                         BLI_assert(0);
181                                 }
182                         }
183                         else {
184                                 glDisable(GL_DEPTH_TEST);
185                         }
186                 }
187         }
188
189         /* Wire Width */
190         {
191                 if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_SMOOTH)) {
192                         if ((state & DRW_STATE_WIRE_SMOOTH) != 0) {
193                                 glLineWidth(2.0f);
194                                 glEnable(GL_LINE_SMOOTH);
195                         }
196                         else if ((state & DRW_STATE_WIRE) != 0) {
197                                 glLineWidth(1.0f);
198                         }
199                         else {
200                                 glDisable(GL_LINE_SMOOTH);
201                         }
202                 }
203         }
204
205         /* Points Size */
206         {
207                 int test;
208                 if ((test = CHANGED_TO(DRW_STATE_POINT))) {
209                         if (test == 1) {
210                                 GPU_enable_program_point_size();
211                                 glPointSize(5.0f);
212                         }
213                         else {
214                                 GPU_disable_program_point_size();
215                         }
216                 }
217         }
218
219         /* Blending (all buffer) */
220         {
221                 int test;
222                 if (CHANGED_ANY_STORE_VAR(
223                         DRW_STATE_BLEND | DRW_STATE_BLEND_PREMUL | DRW_STATE_ADDITIVE |
224                         DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION | DRW_STATE_ADDITIVE_FULL |
225                         DRW_STATE_TRANSPARENT_REVEALAGE,
226                         test))
227                 {
228                         if (test) {
229                                 glEnable(GL_BLEND);
230
231                                 if ((state & DRW_STATE_BLEND) != 0) {
232                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
233                                                             GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
234                                 }
235                                 else if ((state & DRW_STATE_BLEND_PREMUL) != 0) {
236                                         glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
237                                 }
238                                 else if ((state & DRW_STATE_MULTIPLY) != 0) {
239                                         glBlendFunc(GL_DST_COLOR, GL_ZERO);
240                                 }
241                                 else if ((state & DRW_STATE_TRANSMISSION) != 0) {
242                                         glBlendFunc(GL_ONE, GL_SRC_ALPHA);
243                                 }
244                                 else if ((state & DRW_STATE_TRANSPARENT_REVEALAGE) != 0) {
245                                         glBlendFunc(GL_ZERO, GL_ONE_MINUS_SRC_COLOR);
246                                 }
247                                 else if ((state & DRW_STATE_ADDITIVE) != 0) {
248                                         /* Do not let alpha accumulate but premult the source RGB by it. */
249                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
250                                                             GL_ZERO, GL_ONE); /* Alpha */
251                                 }
252                                 else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
253                                         /* Let alpha accumulate. */
254                                         glBlendFunc(GL_ONE, GL_ONE);
255                                 }
256                                 else {
257                                         BLI_assert(0);
258                                 }
259                         }
260                         else {
261                                 glDisable(GL_BLEND);
262                                 glBlendFunc(GL_ONE, GL_ONE); /* Don't multiply incoming color by alpha. */
263                         }
264                 }
265         }
266
267         /* Clip Planes */
268         {
269                 int test;
270                 if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
271                         if (test == 1) {
272                                 for (int i = 0; i < DST.num_clip_planes; ++i) {
273                                         glEnable(GL_CLIP_DISTANCE0 + i);
274                                 }
275                         }
276                         else {
277                                 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
278                                         glDisable(GL_CLIP_DISTANCE0 + i);
279                                 }
280                         }
281                 }
282         }
283
284         /* Line Stipple */
285         {
286                 int test;
287                 if (CHANGED_ANY_STORE_VAR(
288                         DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
289                         test))
290                 {
291                         if (test) {
292                                 if ((state & DRW_STATE_STIPPLE_2) != 0) {
293                                         setlinestyle(2);
294                                 }
295                                 else if ((state & DRW_STATE_STIPPLE_3) != 0) {
296                                         setlinestyle(3);
297                                 }
298                                 else if ((state & DRW_STATE_STIPPLE_4) != 0) {
299                                         setlinestyle(4);
300                                 }
301                                 else {
302                                         BLI_assert(0);
303                                 }
304                         }
305                         else {
306                                 setlinestyle(0);
307                         }
308                 }
309         }
310
311         /* Stencil */
312         {
313                 DRWState test;
314                 if (CHANGED_ANY_STORE_VAR(
315                         DRW_STATE_WRITE_STENCIL |
316                         DRW_STATE_WRITE_STENCIL_SHADOW_PASS |
317                         DRW_STATE_WRITE_STENCIL_SHADOW_FAIL |
318                         DRW_STATE_STENCIL_EQUAL |
319                         DRW_STATE_STENCIL_NEQUAL,
320                         test))
321                 {
322                         if (test) {
323                                 glEnable(GL_STENCIL_TEST);
324                                 /* Stencil Write */
325                                 if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
326                                         glStencilMask(0xFF);
327                                         glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
328                                 }
329                                 else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_PASS) != 0) {
330                                         glStencilMask(0xFF);
331                                         glStencilOpSeparate(GL_BACK,  GL_KEEP, GL_KEEP, GL_INCR_WRAP);
332                                         glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
333                                 }
334                                 else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_FAIL) != 0) {
335                                         glStencilMask(0xFF);
336                                         glStencilOpSeparate(GL_BACK,  GL_KEEP, GL_DECR_WRAP, GL_KEEP);
337                                         glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_INCR_WRAP, GL_KEEP);
338                                 }
339                                 /* Stencil Test */
340                                 else if ((state & (DRW_STATE_STENCIL_EQUAL | DRW_STATE_STENCIL_NEQUAL)) != 0) {
341                                         glStencilMask(0x00); /* disable write */
342                                         DST.stencil_mask = STENCIL_UNDEFINED;
343                                 }
344                                 else {
345                                         BLI_assert(0);
346                                 }
347                         }
348                         else {
349                                 /* disable write & test */
350                                 DST.stencil_mask = 0;
351                                 glStencilMask(0x00);
352                                 glStencilFunc(GL_ALWAYS, 1, 0xFF);
353                                 glDisable(GL_STENCIL_TEST);
354                         }
355                 }
356         }
357
358 #undef CHANGED_TO
359 #undef CHANGED_ANY
360 #undef CHANGED_ANY_STORE_VAR
361
362         DST.state = state;
363 }
364
365 static void drw_stencil_set(uint mask)
366 {
367         if (DST.stencil_mask != mask) {
368                 DST.stencil_mask = mask;
369                 /* Stencil Write */
370                 if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
371                         glStencilFunc(GL_ALWAYS, mask, 0xFF);
372                 }
373                 /* Stencil Test */
374                 else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
375                         glStencilFunc(GL_EQUAL, mask, 0xFF);
376                 }
377                 else if ((DST.state & DRW_STATE_STENCIL_NEQUAL) != 0) {
378                         glStencilFunc(GL_NOTEQUAL, mask, 0xFF);
379                 }
380         }
381 }
382
383 /* Reset state to not interfer with other UI drawcall */
384 void DRW_state_reset_ex(DRWState state)
385 {
386         DST.state = ~state;
387         drw_state_set(state);
388 }
389
390 /**
391  * Use with care, intended so selection code can override passes depth settings,
392  * which is important for selection to work properly.
393  *
394  * Should be set in main draw loop, cleared afterwards
395  */
396 void DRW_state_lock(DRWState state)
397 {
398         DST.state_lock = state;
399 }
400
401 void DRW_state_reset(void)
402 {
403         DRW_state_reset_ex(DRW_STATE_DEFAULT);
404
405         /* Reset blending function */
406         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
407 }
408
409 /* NOTE : Make sure to reset after use! */
410 void DRW_state_invert_facing(void)
411 {
412         SWAP(GLenum, DST.backface, DST.frontface);
413         glFrontFace(DST.frontface);
414 }
415
416 /**
417  * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
418  * and if the shaders have support for it (see usage of gl_ClipDistance).
419  * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
420  **/
421 void DRW_state_clip_planes_count_set(uint plane_ct)
422 {
423         BLI_assert(plane_ct <= MAX_CLIP_PLANES);
424         DST.num_clip_planes = plane_ct;
425 }
426
427 void DRW_state_clip_planes_reset(void)
428 {
429         DST.num_clip_planes = 0;
430 }
431
432 /** \} */
433
434 /* -------------------------------------------------------------------- */
435
436 /** \name Clipping (DRW_clipping)
437  * \{ */
438
439 /* Extract the 8 corners from a Projection Matrix.
440  * Although less accurate, this solution can be simplified as follows:
441  * BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
442  * for (int i = 0; i < 8; i++) {mul_project_m4_v3(projinv, bbox.vec[i]);}
443  */
444 static void draw_frustum_boundbox_calc(const float(*projmat)[4], BoundBox *r_bbox)
445 {
446         float near, far, left, right, bottom, top;
447         bool is_persp = projmat[3][3] == 0.0f;
448
449         if (is_persp) {
450                 near   = projmat[3][2] / (projmat[2][2] - 1.0f);
451                 far    = projmat[3][2] / (projmat[2][2] + 1.0f);
452                 left   = near * (projmat[2][0] - 1.0f) / projmat[0][0];
453                 right  = near * (projmat[2][0] + 1.0f) / projmat[0][0];
454                 bottom = near * (projmat[2][1] - 1.0f) / projmat[1][1];
455                 top    = near * (projmat[2][1] + 1.0f) / projmat[1][1];
456         }
457         else {
458                 near   = ( projmat[3][2] + 1.0f) / projmat[2][2];
459                 far    = ( projmat[3][2] - 1.0f) / projmat[2][2];
460                 left   = (-projmat[3][0] - 1.0f) / projmat[0][0];
461                 right  = (-projmat[3][0] + 1.0f) / projmat[0][0];
462                 bottom = (-projmat[3][1] - 1.0f) / projmat[1][1];
463                 top    = (-projmat[3][1] + 1.0f) / projmat[1][1];
464         }
465
466         r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
467         r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
468         r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
469         r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
470         r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
471
472         /* Get the coordinates of the far plane. */
473         if (is_persp) {
474                 float sca_far = far / near;
475                 left   *= sca_far;
476                 bottom *= sca_far;
477                 right  *= sca_far;
478                 top    *= sca_far;
479         }
480
481         r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
482         r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
483         r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
484         r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
485         r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
486 }
487
488 static void draw_clipping_setup_from_view(void)
489 {
490         if (DST.clipping.updated)
491                 return;
492
493         float (*viewinv)[4] = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
494         float (*projmat)[4] = DST.view_data.matstate.mat[DRW_MAT_WIN];
495         float (*projinv)[4] = DST.view_data.matstate.mat[DRW_MAT_WININV];
496         BoundSphere *bsphere = &DST.clipping.frustum_bsphere;
497
498         /* Extract Clipping Planes */
499         BoundBox bbox;
500 #if 0 /* It has accuracy problems. */
501         BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
502         for (int i = 0; i < 8; i++) {
503                 mul_project_m4_v3(projinv, bbox.vec[i]);
504         }
505 #else
506         draw_frustum_boundbox_calc(projmat, &bbox);
507 #endif
508         /* Transform into world space. */
509         for (int i = 0; i < 8; i++) {
510                 mul_m4_v3(viewinv, bbox.vec[i]);
511         }
512
513         memcpy(&DST.clipping.frustum_corners, &bbox, sizeof(BoundBox));
514
515         /* Compute clip planes using the world space frustum corners. */
516         for (int p = 0; p < 6; p++) {
517                 int q, r;
518                 switch (p) {
519                         case 0:  q = 1; r = 2; break;
520                         case 1:  q = 0; r = 5; break;
521                         case 2:  q = 1; r = 5; break;
522                         case 3:  q = 2; r = 6; break;
523                         case 4:  q = 0; r = 3; break;
524                         default: q = 4; r = 7; break;
525                 }
526                 if (DST.frontface == GL_CW) {
527                         SWAP(int, q, r);
528                 }
529
530                 normal_tri_v3(DST.clipping.frustum_planes[p], bbox.vec[p], bbox.vec[q], bbox.vec[r]);
531                 DST.clipping.frustum_planes[p][3] = -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[p]);
532         }
533
534         /* Extract Bounding Sphere */
535         if (projmat[3][3] != 0.0f) {
536                 /* Orthographic */
537                 /* The most extreme points on the near and far plane. (normalized device coords). */
538                 float *nearpoint = bbox.vec[0];
539                 float *farpoint = bbox.vec[6];
540
541                 /* just use median point */
542                 mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
543                 bsphere->radius = len_v3v3(bsphere->center, farpoint);
544         }
545         else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
546                 /* Perspective with symmetrical frustum. */
547
548                 /* We obtain the center and radius of the circumscribed circle of the
549                  * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
550
551                 /* center of each clipping plane */
552                 float mid_min[3], mid_max[3];
553                 mid_v3_v3v3(mid_min, bbox.vec[3], bbox.vec[4]);
554                 mid_v3_v3v3(mid_max, bbox.vec[2], bbox.vec[5]);
555
556                 /* square length of the diagonals of each clipping plane */
557                 float a_sq = len_squared_v3v3(bbox.vec[3], bbox.vec[4]);
558                 float b_sq = len_squared_v3v3(bbox.vec[2], bbox.vec[5]);
559
560                 /* distance squared between clipping planes */
561                 float h_sq = len_squared_v3v3(mid_min, mid_max);
562
563                 float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
564
565                 /* The goal is to get the smallest sphere,
566                  * not the sphere that passes through each corner */
567                 CLAMP(fac, 0.0f, 1.0f);
568
569                 interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
570
571                 /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
572                 bsphere->radius = len_v3v3(bsphere->center, bbox.vec[1]);
573         }
574         else {
575                 /* Perspective with asymmetrical frustum. */
576
577                 /* We put the sphere center on the line that goes from origin
578                  * to the center of the far clipping plane. */
579
580                 /* Detect which of the corner of the far clipping plane is the farthest to the origin */
581                 float nfar[4];       /* most extreme far point in NDC space */
582                 float farxy[2];      /* farpoint projection onto the near plane */
583                 float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
584                 float nearpoint[3];  /* most extreme near point in camera coordinate */
585                 float farcenter[3] = {0.0f}; /* center of far cliping plane in camera coordinate */
586                 float F = -1.0f, N;  /* square distance of far and near point to origin */
587                 float f, n;          /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
588                 float e, s;          /* far and near clipping distance (<0) */
589                 float c;             /* slope of center line = distance of far clipping center to z axis / far clipping distance */
590                 float z;             /* projection of sphere center on z axis (<0) */
591
592                 /* Find farthest corner and center of far clip plane. */
593                 float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
594                 for (int i = 0; i < 4; i++) {
595                         float point[3];
596                         mul_v3_project_m4_v3(point, projinv, corner);
597                         float len = len_squared_v3(point);
598                         if (len > F) {
599                                 copy_v3_v3(nfar, corner);
600                                 copy_v3_v3(farpoint, point);
601                                 F = len;
602                         }
603                         add_v3_v3(farcenter, point);
604                         /* rotate by 90 degree to walk through the 4 points of the far clip plane */
605                         float tmp = corner[0];
606                         corner[0] = -corner[1];
607                         corner[1] = tmp;
608                 }
609
610                 /* the far center is the average of the far clipping points */
611                 mul_v3_fl(farcenter, 0.25f);
612                 /* the extreme near point is the opposite point on the near clipping plane */
613                 copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
614                 mul_v3_project_m4_v3(nearpoint, projinv, nfar);
615                 /* this is a frustum projection */
616                 N = len_squared_v3(nearpoint);
617                 e = farpoint[2];
618                 s = nearpoint[2];
619                 /* distance to view Z axis */
620                 f = len_v2(farpoint);
621                 /* get corresponding point on the near plane */
622                 mul_v2_v2fl(farxy, farpoint, s / e);
623                 /* this formula preserve the sign of n */
624                 sub_v2_v2(nearpoint, farxy);
625                 n = f * s / e - len_v2(nearpoint);
626                 c = len_v2(farcenter) / e;
627                 /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
628                 z = (F - N) / (2.0f * (e - s + c * (f - n)));
629
630                 bsphere->center[0] = farcenter[0] * z / e;
631                 bsphere->center[1] = farcenter[1] * z / e;
632                 bsphere->center[2] = z;
633                 bsphere->radius = len_v3v3(bsphere->center, farpoint);
634
635                 /* Transform to world space. */
636                 mul_m4_v3(viewinv, bsphere->center);
637         }
638
639         DST.clipping.updated = true;
640 }
641
642 /* Return True if the given BoundSphere intersect the current view frustum */
643 bool DRW_culling_sphere_test(BoundSphere *bsphere)
644 {
645         draw_clipping_setup_from_view();
646
647         /* Bypass test if radius is negative. */
648         if (bsphere->radius < 0.0f)
649                 return true;
650
651         /* Do a rough test first: Sphere VS Sphere intersect. */
652         BoundSphere *frustum_bsphere = &DST.clipping.frustum_bsphere;
653         float center_dist = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
654         if (center_dist > SQUARE(bsphere->radius + frustum_bsphere->radius))
655                 return false;
656
657         /* Test against the 6 frustum planes. */
658         for (int p = 0; p < 6; p++) {
659                 float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bsphere->center);
660                 if (dist < -bsphere->radius) {
661                         return false;
662                 }
663         }
664
665         return true;
666 }
667
668 /* Return True if the given BoundBox intersect the current view frustum.
669  * bbox must be in world space. */
670 bool DRW_culling_box_test(BoundBox *bbox)
671 {
672         draw_clipping_setup_from_view();
673
674         /* 6 view frustum planes */
675         for (int p = 0; p < 6; p++) {
676                 /* 8 box vertices. */
677                 for (int v = 0; v < 8 ; v++) {
678                         float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bbox->vec[v]);
679                         if (dist > 0.0f) {
680                                 /* At least one point in front of this plane.
681                                  * Go to next plane. */
682                                 break;
683                         }
684                         else if (v == 7) {
685                                 /* 8 points behind this plane. */
686                                 return false;
687                         }
688                 }
689         }
690
691         return true;
692 }
693
694 /* Return True if the current view frustum is inside or intersect the given plane */
695 bool DRW_culling_plane_test(float plane[4])
696 {
697         draw_clipping_setup_from_view();
698
699         /* Test against the 8 frustum corners. */
700         for (int c = 0; c < 8; c++) {
701                 float dist = plane_point_side_v3(plane, DST.clipping.frustum_corners.vec[c]);
702                 if (dist < 0.0f) {
703                         return true;
704                 }
705         }
706
707         return false;
708 }
709
710 void DRW_culling_frustum_corners_get(BoundBox *corners)
711 {
712         draw_clipping_setup_from_view();
713         memcpy(corners, &DST.clipping.frustum_corners, sizeof(BoundBox));
714 }
715
716 /** \} */
717
718 /* -------------------------------------------------------------------- */
719
720 /** \name Draw (DRW_draw)
721  * \{ */
722
723 static void draw_visibility_eval(DRWCallState *st)
724 {
725         bool culled = st->flag & DRW_CALL_CULLED;
726
727         if (st->cache_id != DST.state_cache_id) {
728                 /* Update culling result for this view. */
729                 culled = !DRW_culling_sphere_test(&st->bsphere);
730         }
731
732         if (st->visibility_cb) {
733                 culled = !st->visibility_cb(!culled, st->user_data);
734         }
735
736         SET_FLAG_FROM_TEST(st->flag, culled, DRW_CALL_CULLED);
737 }
738
739 static void draw_matrices_model_prepare(DRWCallState *st)
740 {
741         if (st->cache_id == DST.state_cache_id) {
742                 /* Values are already updated for this view. */
743                 return;
744         }
745         else {
746                 st->cache_id = DST.state_cache_id;
747         }
748
749         /* No need to go further the call will not be used. */
750         if ((st->flag & DRW_CALL_CULLED) != 0 &&
751             (st->flag & DRW_CALL_BYPASS_CULLING) == 0)
752         {
753                 return;
754         }
755         /* Order matters */
756         if (st->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE |
757                            DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC))
758         {
759                 mul_m4_m4m4(st->modelview, DST.view_data.matstate.mat[DRW_MAT_VIEW], st->model);
760         }
761         if (st->matflag & DRW_CALL_MODELVIEWINVERSE) {
762                 invert_m4_m4(st->modelviewinverse, st->modelview);
763         }
764         if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) {
765                 mul_m4_m4m4(st->modelviewprojection, DST.view_data.matstate.mat[DRW_MAT_PERS], st->model);
766         }
767         if (st->matflag & (DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC)) {
768                 copy_m3_m4(st->normalview, st->modelview);
769                 invert_m3(st->normalview);
770                 transpose_m3(st->normalview);
771         }
772         if (st->matflag & DRW_CALL_EYEVEC) {
773                 /* Used by orthographic wires */
774                 float tmp[3][3];
775                 copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f);
776                 invert_m3_m3(tmp, st->normalview);
777                 /* set eye vector, transformed to object coords */
778                 mul_m3_v3(tmp, st->eyevec);
779         }
780         /* Non view dependant */
781         if (st->matflag & DRW_CALL_MODELINVERSE) {
782                 invert_m4_m4(st->modelinverse, st->model);
783                 st->matflag &= ~DRW_CALL_MODELINVERSE;
784         }
785         if (st->matflag & DRW_CALL_NORMALWORLD) {
786                 copy_m3_m4(st->normalworld, st->model);
787                 invert_m3(st->normalworld);
788                 transpose_m3(st->normalworld);
789                 st->matflag &= ~DRW_CALL_NORMALWORLD;
790         }
791 }
792
793 static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCallState *state)
794 {
795         /* step 1 : bind object dependent matrices */
796         if (state != NULL) {
797                 GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model);
798                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse);
799                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview);
800                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse);
801                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection);
802                 GPU_shader_uniform_vector(shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview);
803                 GPU_shader_uniform_vector(shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld);
804                 GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac);
805                 GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec);
806         }
807         else {
808                 BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) && (shgroup->eye == -1));
809                 /* For instancing and batching. */
810                 float unitmat[4][4];
811                 unit_m4(unitmat);
812                 GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat);
813                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat);
814                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEW]);
815                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEWINV]);
816                 GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_PERS]);
817                 GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
818         }
819 }
820
821 static void draw_geometry_execute_ex(
822         DRWShadingGroup *shgroup, Gwn_Batch *geom, uint start, uint count, bool draw_instance)
823 {
824         /* Special case: empty drawcall, placement is done via shader, don't bind anything. */
825         /* TODO use DRW_CALL_PROCEDURAL instead */
826         if (geom == NULL) {
827                 BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
828                 /* Shader is already bound. */
829                 GWN_draw_primitive(GWN_PRIM_TRIS, count);
830                 return;
831         }
832
833         /* step 2 : bind vertex array & draw */
834         GWN_batch_program_set_no_use(
835                 geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
836         /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */
837         geom->program_in_use = true;
838
839         GWN_batch_draw_range_ex(geom, start, count, draw_instance);
840
841         geom->program_in_use = false; /* XXX hacking gawain */
842 }
843
844 static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
845 {
846         draw_geometry_execute_ex(shgroup, geom, 0, 0, false);
847 }
848
849 enum {
850         BIND_NONE = 0,
851         BIND_TEMP = 1,         /* Release slot after this shading group. */
852         BIND_PERSIST = 2,      /* Release slot only after the next shader change. */
853 };
854
855 static void bind_texture(GPUTexture *tex, char bind_type)
856 {
857         int index;
858         char *slot_flags = DST.RST.bound_tex_slots;
859         int bind_num = GPU_texture_bound_number(tex);
860         if (bind_num == -1) {
861                 for (int i = 0; i < GPU_max_textures(); ++i) {
862                         index = DST.RST.bind_tex_inc = (DST.RST.bind_tex_inc + 1) % GPU_max_textures();
863                         if (slot_flags[index] == BIND_NONE) {
864                                 if (DST.RST.bound_texs[index] != NULL) {
865                                         GPU_texture_unbind(DST.RST.bound_texs[index]);
866                                 }
867                                 GPU_texture_bind(tex, index);
868                                 DST.RST.bound_texs[index] = tex;
869                                 slot_flags[index] = bind_type;
870                                 // printf("Binds Texture %d %p\n", DST.RST.bind_tex_inc, tex);
871                                 return;
872                         }
873                 }
874                 printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
875         }
876         slot_flags[bind_num] = bind_type;
877 }
878
879 static void bind_ubo(GPUUniformBuffer *ubo, char bind_type)
880 {
881         int index;
882         char *slot_flags = DST.RST.bound_ubo_slots;
883         int bind_num = GPU_uniformbuffer_bindpoint(ubo);
884         if (bind_num == -1) {
885                 for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
886                         index = DST.RST.bind_ubo_inc = (DST.RST.bind_ubo_inc + 1) % GPU_max_ubo_binds();
887                         if (slot_flags[index] == BIND_NONE) {
888                                 if (DST.RST.bound_ubos[index] != NULL) {
889                                         GPU_uniformbuffer_unbind(DST.RST.bound_ubos[index]);
890                                 }
891                                 GPU_uniformbuffer_bind(ubo, index);
892                                 DST.RST.bound_ubos[index] = ubo;
893                                 slot_flags[index] = bind_type;
894                                 return;
895                         }
896                 }
897                 /* printf so user can report bad behaviour */
898                 printf("Not enough ubo slots! This should not happen!\n");
899                 /* This is not depending on user input.
900                  * It is our responsability to make sure there is enough slots. */
901                 BLI_assert(0);
902         }
903         slot_flags[bind_num] = bind_type;
904 }
905
906 static void release_texture_slots(bool with_persist)
907 {
908         if (with_persist) {
909                 memset(DST.RST.bound_tex_slots, 0x0, sizeof(*DST.RST.bound_tex_slots) * GPU_max_textures());
910         }
911         else {
912                 for (int i = 0; i < GPU_max_textures(); ++i) {
913                         if (DST.RST.bound_tex_slots[i] != BIND_PERSIST)
914                                 DST.RST.bound_tex_slots[i] = BIND_NONE;
915                 }
916         }
917
918         /* Reset so that slots are consistenly assigned for different shader
919          * draw calls, to avoid shader specialization/patching by the driver. */
920         DST.RST.bind_tex_inc = 0;
921 }
922
923 static void release_ubo_slots(bool with_persist)
924 {
925         if (with_persist) {
926                 memset(DST.RST.bound_ubo_slots, 0x0, sizeof(*DST.RST.bound_ubo_slots) * GPU_max_ubo_binds());
927         }
928         else {
929                 for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
930                         if (DST.RST.bound_ubo_slots[i] != BIND_PERSIST)
931                                 DST.RST.bound_ubo_slots[i] = BIND_NONE;
932                 }
933         }
934
935         /* Reset so that slots are consistenly assigned for different shader
936          * draw calls, to avoid shader specialization/patching by the driver. */
937         DST.RST.bind_ubo_inc = 0;
938 }
939
940 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
941 {
942         BLI_assert(shgroup->shader);
943
944         GPUTexture *tex;
945         GPUUniformBuffer *ubo;
946         int val;
947         float fval;
948         const bool shader_changed = (DST.shader != shgroup->shader);
949         bool use_tfeedback = false;
950
951         if (shader_changed) {
952                 if (DST.shader) GPU_shader_unbind();
953                 GPU_shader_bind(shgroup->shader);
954                 DST.shader = shgroup->shader;
955         }
956
957         if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 &&
958             (shgroup->type == DRW_SHG_FEEDBACK_TRANSFORM))
959         {
960                 use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
961                                                                      shgroup->tfeedback_target->vbo_id);
962         }
963
964         release_ubo_slots(shader_changed);
965         release_texture_slots(shader_changed);
966
967         drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
968         drw_stencil_set(shgroup->stencil_mask);
969
970         /* Binding Uniform */
971         for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) {
972                 switch (uni->type) {
973                         case DRW_UNIFORM_SHORT_TO_INT:
974                                 val = (int)*((short *)uni->pvalue);
975                                 GPU_shader_uniform_vector_int(
976                                         shgroup->shader, uni->location, uni->length, uni->arraysize, &val);
977                                 break;
978                         case DRW_UNIFORM_SHORT_TO_FLOAT:
979                                 fval = (float)*((short *)uni->pvalue);
980                                 GPU_shader_uniform_vector(
981                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
982                                 break;
983                         case DRW_UNIFORM_BOOL_COPY:
984                         case DRW_UNIFORM_INT_COPY:
985                                 GPU_shader_uniform_vector_int(
986                                         shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->ivalue);
987                                 break;
988                         case DRW_UNIFORM_BOOL:
989                         case DRW_UNIFORM_INT:
990                                 GPU_shader_uniform_vector_int(
991                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->pvalue);
992                                 break;
993                         case DRW_UNIFORM_FLOAT_COPY:
994                                 GPU_shader_uniform_vector(
995                                         shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->fvalue);
996                                 break;
997                         case DRW_UNIFORM_FLOAT:
998                                 GPU_shader_uniform_vector(
999                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->pvalue);
1000                                 break;
1001                         case DRW_UNIFORM_TEXTURE:
1002                                 tex = (GPUTexture *)uni->pvalue;
1003                                 BLI_assert(tex);
1004                                 bind_texture(tex, BIND_TEMP);
1005                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1006                                 break;
1007                         case DRW_UNIFORM_TEXTURE_PERSIST:
1008                                 tex = (GPUTexture *)uni->pvalue;
1009                                 BLI_assert(tex);
1010                                 bind_texture(tex, BIND_PERSIST);
1011                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1012                                 break;
1013                         case DRW_UNIFORM_TEXTURE_REF:
1014                                 tex = *((GPUTexture **)uni->pvalue);
1015                                 BLI_assert(tex);
1016                                 bind_texture(tex, BIND_TEMP);
1017                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1018                                 break;
1019                         case DRW_UNIFORM_BLOCK:
1020                                 ubo = (GPUUniformBuffer *)uni->pvalue;
1021                                 bind_ubo(ubo, BIND_TEMP);
1022                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
1023                                 break;
1024                         case DRW_UNIFORM_BLOCK_PERSIST:
1025                                 ubo = (GPUUniformBuffer *)uni->pvalue;
1026                                 bind_ubo(ubo, BIND_PERSIST);
1027                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
1028                                 break;
1029                 }
1030         }
1031
1032 #ifdef USE_GPU_SELECT
1033 #  define GPU_SELECT_LOAD_IF_PICKSEL(_select_id) \
1034         if (G.f & G_PICKSEL) { \
1035                 GPU_select_load_id(_select_id); \
1036         } ((void)0)
1037
1038 #  define GPU_SELECT_LOAD_IF_PICKSEL_CALL(_call) \
1039         if ((G.f & G_PICKSEL) && (_call)) { \
1040                 GPU_select_load_id((_call)->select_id); \
1041         } ((void)0)
1042
1043 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count)  \
1044         _start = 0;                                                      \
1045         _count = _shgroup->instance_count;                     \
1046         int *select_id = NULL;                                           \
1047         if (G.f & G_PICKSEL) {                                           \
1048                 if (_shgroup->override_selectid == -1) {                        \
1049                         /* Hack : get vbo data without actually drawing. */     \
1050                         Gwn_VertBufRaw raw;                   \
1051                         GWN_vertbuf_attr_get_raw_data(_shgroup->inst_selectid, 0, &raw); \
1052                         select_id = GWN_vertbuf_raw_step(&raw);                               \
1053                         switch (_shgroup->type) {                                             \
1054                                 case DRW_SHG_TRIANGLE_BATCH: _count = 3; break;                   \
1055                                 case DRW_SHG_LINE_BATCH: _count = 2; break;                       \
1056                                 default: _count = 1; break;                                       \
1057                         }                                                                     \
1058                 }                                                                         \
1059                 else {                                                                    \
1060                         GPU_select_load_id(_shgroup->override_selectid);            \
1061                 }                                                                         \
1062         }                                                                \
1063         while (_start < _shgroup->instance_count) {            \
1064                 if (select_id) {                                             \
1065                         GPU_select_load_id(select_id[_start]);                   \
1066                 }
1067
1068 # define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
1069                 _start += _count;                                    \
1070         }
1071
1072 #else
1073 #  define GPU_SELECT_LOAD_IF_PICKSEL(select_id)
1074 #  define GPU_SELECT_LOAD_IF_PICKSEL_CALL(call)
1075 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
1076 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
1077         _start = 0;                                                     \
1078         _count = _shgroup->instance_count;
1079
1080 #endif
1081
1082         /* Rendering Calls */
1083         if (!ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)) {
1084                 /* Replacing multiple calls with only one */
1085                 if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
1086                         if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
1087                                 if (shgroup->instance_geom != NULL) {
1088                                         GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
1089                                         draw_geometry_prepare(shgroup, NULL);
1090                                         draw_geometry_execute_ex(shgroup, shgroup->instance_geom, 0, 0, true);
1091                                 }
1092                         }
1093                         else {
1094                                 if (shgroup->instance_count > 0) {
1095                                         uint count, start;
1096                                         draw_geometry_prepare(shgroup, NULL);
1097                                         GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
1098                                         {
1099                                                 draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count, true);
1100                                         }
1101                                         GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
1102                                 }
1103                         }
1104                 }
1105                 else { /* DRW_SHG_***_BATCH */
1106                         /* Some dynamic batch can have no geom (no call to aggregate) */
1107                         if (shgroup->instance_count > 0) {
1108                                 uint count, start;
1109                                 draw_geometry_prepare(shgroup, NULL);
1110                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
1111                                 {
1112                                         draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count, false);
1113                                 }
1114                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
1115                         }
1116                 }
1117         }
1118         else {
1119                 bool prev_neg_scale = false;
1120                 int callid = 0;
1121                 for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
1122
1123                         /* OPTI/IDEA(clem): Do this preparation in another thread. */
1124                         draw_visibility_eval(call->state);
1125                         draw_matrices_model_prepare(call->state);
1126
1127                         if ((call->state->flag & DRW_CALL_CULLED) != 0 &&
1128                             (call->state->flag & DRW_CALL_BYPASS_CULLING) == 0)
1129                         {
1130                                 continue;
1131                         }
1132
1133                         /* XXX small exception/optimisation for outline rendering. */
1134                         if (shgroup->callid != -1) {
1135                                 GPU_shader_uniform_vector_int(shgroup->shader, shgroup->callid, 1, 1, &callid);
1136                                 callid += 1;
1137                         }
1138
1139                         /* Negative scale objects */
1140                         bool neg_scale = call->state->flag & DRW_CALL_NEGSCALE;
1141                         if (neg_scale != prev_neg_scale) {
1142                                 glFrontFace((neg_scale) ? DST.backface : DST.frontface);
1143                                 prev_neg_scale = neg_scale;
1144                         }
1145
1146                         GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
1147                         draw_geometry_prepare(shgroup, call->state);
1148
1149                         switch (call->type) {
1150                                 case DRW_CALL_SINGLE:
1151                                         draw_geometry_execute(shgroup, call->single.geometry);
1152                                         break;
1153                                 case DRW_CALL_RANGE:
1154                                         draw_geometry_execute_ex(shgroup, call->range.geometry, call->range.start, call->range.count, false);
1155                                         break;
1156                                 case DRW_CALL_INSTANCES:
1157                                         draw_geometry_execute_ex(shgroup, call->instances.geometry, 0, *call->instances.count, true);
1158                                         break;
1159                                 case DRW_CALL_GENERATE:
1160                                         call->generate.geometry_fn(shgroup, draw_geometry_execute, call->generate.user_data);
1161                                         break;
1162                                 case DRW_CALL_PROCEDURAL:
1163                                         GWN_draw_primitive(call->procedural.prim_type, call->procedural.vert_count);
1164                                         break;
1165                                 default:
1166                                         BLI_assert(0);
1167                         }
1168                 }
1169                 /* Reset state */
1170                 glFrontFace(DST.frontface);
1171         }
1172
1173         if (use_tfeedback) {
1174                 GPU_shader_transform_feedback_disable(shgroup->shader);
1175         }
1176
1177         /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
1178         DRW_state_reset();
1179 }
1180
1181 static void drw_update_view(void)
1182 {
1183         if (DST.dirty_mat) {
1184                 DST.state_cache_id++;
1185                 DST.dirty_mat = false;
1186
1187                 DRW_uniformbuffer_update(view_ubo, &DST.view_data);
1188
1189                 /* Catch integer wrap around. */
1190                 if (UNLIKELY(DST.state_cache_id == 0)) {
1191                         DST.state_cache_id = 1;
1192                         /* We must reset all CallStates to ensure that not
1193                          * a single one stayed with cache_id equal to 1. */
1194                         BLI_mempool_iter iter;
1195                         DRWCallState *state;
1196                         BLI_mempool_iternew(DST.vmempool->states, &iter);
1197                         while ((state = BLI_mempool_iterstep(&iter))) {
1198                                 state->cache_id = 0;
1199                         }
1200                 }
1201
1202                 /* TODO dispatch threads to compute matrices/culling */
1203         }
1204
1205         draw_clipping_setup_from_view();
1206 }
1207
1208 static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
1209 {
1210         if (start_group == NULL)
1211                 return;
1212
1213         DST.shader = NULL;
1214
1215         BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing");
1216
1217         drw_update_view();
1218
1219         drw_state_set(pass->state);
1220
1221         DRW_stats_query_start(pass->name);
1222
1223         for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
1224                 draw_shgroup(shgroup, pass->state);
1225                 /* break if upper limit */
1226                 if (shgroup == end_group) {
1227                         break;
1228                 }
1229         }
1230
1231         /* Clear Bound textures */
1232         for (int i = 0; i < GPU_max_textures(); i++) {
1233                 if (DST.RST.bound_texs[i] != NULL) {
1234                         GPU_texture_unbind(DST.RST.bound_texs[i]);
1235                         DST.RST.bound_texs[i] = NULL;
1236                 }
1237         }
1238
1239         /* Clear Bound Ubos */
1240         for (int i = 0; i < GPU_max_ubo_binds(); i++) {
1241                 if (DST.RST.bound_ubos[i] != NULL) {
1242                         GPU_uniformbuffer_unbind(DST.RST.bound_ubos[i]);
1243                         DST.RST.bound_ubos[i] = NULL;
1244                 }
1245         }
1246
1247         if (DST.shader) {
1248                 GPU_shader_unbind();
1249                 DST.shader = NULL;
1250         }
1251
1252         DRW_stats_query_end();
1253 }
1254
1255 void DRW_draw_pass(DRWPass *pass)
1256 {
1257         drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
1258 }
1259
1260 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
1261 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
1262 {
1263         drw_draw_pass_ex(pass, start_group, end_group);
1264 }
1265
1266 /** \} */