Draw manager: Cleanup, use full name for depsgraph variable
[blender.git] / source / blender / draw / intern / draw_manager.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager.c
23  *  \ingroup draw
24  */
25
26 #include <stdio.h>
27
28 #include "BLI_dynstr.h"
29 #include "BLI_listbase.h"
30 #include "BLI_mempool.h"
31 #include "BLI_rect.h"
32 #include "BLI_string.h"
33
34 #include "BIF_glutil.h"
35
36 #include "BKE_curve.h"
37 #include "BKE_global.h"
38 #include "BKE_mesh.h"
39 #include "BKE_object.h"
40 #include "BKE_pbvh.h"
41 #include "BKE_paint.h"
42 #include "BKE_workspace.h"
43
44 #include "BLT_translation.h"
45 #include "BLF_api.h"
46
47 #include "DRW_engine.h"
48 #include "DRW_render.h"
49
50 #include "DNA_camera_types.h"
51 #include "DNA_curve_types.h"
52 #include "DNA_view3d_types.h"
53 #include "DNA_screen_types.h"
54 #include "DNA_mesh_types.h"
55 #include "DNA_meshdata_types.h"
56 #include "DNA_meta_types.h"
57
58 #include "ED_space_api.h"
59 #include "ED_screen.h"
60
61 #include "intern/gpu_codegen.h"
62 #include "GPU_batch.h"
63 #include "GPU_draw.h"
64 #include "GPU_extensions.h"
65 #include "GPU_framebuffer.h"
66 #include "GPU_immediate.h"
67 #include "GPU_lamp.h"
68 #include "GPU_material.h"
69 #include "GPU_shader.h"
70 #include "GPU_texture.h"
71 #include "GPU_uniformbuffer.h"
72 #include "GPU_viewport.h"
73 #include "GPU_matrix.h"
74
75 #include "IMB_colormanagement.h"
76
77 #include "RE_engine.h"
78
79 #include "UI_interface.h"
80 #include "UI_resources.h"
81
82 #include "WM_api.h"
83 #include "WM_types.h"
84
85 #include "draw_manager_text.h"
86 #include "draw_manager_profiling.h"
87
88 /* only for callbacks */
89 #include "draw_cache_impl.h"
90
91 #include "draw_instance_data.h"
92
93 #include "draw_mode_engines.h"
94 #include "engines/clay/clay_engine.h"
95 #include "engines/eevee/eevee_engine.h"
96 #include "engines/basic/basic_engine.h"
97 #include "engines/external/external_engine.h"
98
99 #include "DEG_depsgraph.h"
100 #include "DEG_depsgraph_query.h"
101
102 /* -------------------------------------------------------------------- */
103 /** \name Local Features
104  * \{ */
105
106 #define USE_PROFILE
107
108 #ifdef USE_PROFILE
109 #  include "PIL_time.h"
110
111 #  define PROFILE_TIMER_FALLOFF 0.1
112
113 #  define PROFILE_START(time_start) \
114         double time_start = PIL_check_seconds_timer();
115
116 #  define PROFILE_END_ACCUM(time_accum, time_start) { \
117         time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
118 } ((void)0)
119
120 /* exp average */
121 #  define PROFILE_END_UPDATE(time_update, time_start) { \
122         double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
123         time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
124                       (_time_delta * PROFILE_TIMER_FALLOFF); \
125 } ((void)0)
126
127 #else  /* USE_PROFILE */
128
129 #  define PROFILE_START(time_start) ((void)0)
130 #  define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
131 #  define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
132
133 #endif  /* USE_PROFILE */
134
135
136 /* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
137 #define USE_GPU_SELECT
138
139 #ifdef USE_GPU_SELECT
140 #  include "ED_view3d.h"
141 #  include "ED_armature.h"
142 #  include "GPU_select.h"
143 #endif
144
145 /** \} */
146
147
148 #define MAX_ATTRIB_NAME 32
149 #define MAX_ATTRIB_COUNT 6 /* Can be adjusted for more */
150 #define MAX_PASS_NAME 32
151 #define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
152
153 extern char datatoc_gpu_shader_2D_vert_glsl[];
154 extern char datatoc_gpu_shader_3D_vert_glsl[];
155 extern char datatoc_gpu_shader_fullscreen_vert_glsl[];
156
157 /* Prototypes. */
158 static void drw_engines_enable_external(void);
159
160 /* Structures */
161 typedef enum {
162         DRW_UNIFORM_BOOL,
163         DRW_UNIFORM_SHORT_TO_INT,
164         DRW_UNIFORM_SHORT_TO_FLOAT,
165         DRW_UNIFORM_INT,
166         DRW_UNIFORM_FLOAT,
167         DRW_UNIFORM_TEXTURE,
168         DRW_UNIFORM_BUFFER,
169         DRW_UNIFORM_MAT3,
170         DRW_UNIFORM_MAT4,
171         DRW_UNIFORM_BLOCK
172 } DRWUniformType;
173
174 typedef enum {
175         DRW_ATTRIB_INT,
176         DRW_ATTRIB_FLOAT,
177 } DRWAttribType;
178
179 struct DRWUniform {
180         struct DRWUniform *next;
181         DRWUniformType type;
182         int location;
183         int length;
184         int arraysize;
185         const void *value;
186 };
187
188 struct DRWInterface {
189         DRWUniform *uniforms;   /* DRWUniform, single-linked list */
190         int attribs_count;
191         int attribs_stride;
192         int attribs_size[16];
193         int attribs_loc[16];
194         /* matrices locations */
195         int model;
196         int modelinverse;
197         int modelview;
198         int modelviewinverse;
199         int projection;
200         int projectioninverse;
201         int view;
202         int viewinverse;
203         int modelviewprojection;
204         int viewprojection;
205         int viewprojectioninverse;
206         int normal;
207         int worldnormal;
208         int camtexfac;
209         int orcotexfac;
210         int eye;
211         int clipplanes;
212         /* Dynamic batch */
213         Gwn_Batch *instance_batch; /* contains instances attributes */
214         GLuint instance_vbo; /* same as instance_batch but generated from DRWCalls */
215         struct DRWInstanceData *inst_data;
216 #ifdef USE_GPU_SELECT
217         struct DRWInstanceData *inst_selectid;
218         /* Override for single object instances. */
219         int override_selectid;
220 #endif
221         int instance_count;
222         Gwn_VertFormat vbo_format;
223 };
224
225 struct DRWPass {
226         /* Single linked list with last member to append */
227         DRWShadingGroup *shgroups;
228         DRWShadingGroup *shgroups_last;
229
230         DRWState state;
231         char name[MAX_PASS_NAME];
232 };
233
234 typedef struct DRWCallHeader {
235         void *prev;
236
237 #ifdef USE_GPU_SELECT
238         int select_id;
239 #endif
240         uchar type;
241 } DRWCallHeader;
242
243 typedef struct DRWCall {
244         DRWCallHeader head;
245
246         float obmat[4][4];
247         Gwn_Batch *geometry;
248
249         Object *ob; /* Optional */
250         ID *ob_data; /* Optional. */
251 } DRWCall;
252
253 typedef struct DRWCallGenerate {
254         DRWCallHeader head;
255
256         float obmat[4][4];
257
258         DRWCallGenerateFn *geometry_fn;
259         void *user_data;
260 } DRWCallGenerate;
261
262 struct DRWShadingGroup {
263         struct DRWShadingGroup *next;
264
265         GPUShader *shader;               /* Shader to bind */
266         DRWInterface interface;          /* Uniforms pointers */
267
268         /* DRWCall or DRWCallDynamic depending of type */
269         void *calls;
270         void *calls_first; /* To be able to traverse the list in the order of addition */
271
272         DRWState state_extra;            /* State changes for this batch only (or'd with the pass's state) */
273         DRWState state_extra_disable;    /* State changes for this batch only (and'd with the pass's state) */
274         unsigned int stencil_mask;       /* Stencil mask to use for stencil test / write operations */
275         int type;
276
277         ID *instance_data;         /* Object->data to instance */
278         Gwn_Batch *instance_geom;  /* Geometry to instance */
279         Gwn_Batch *batch_geom;     /* Result of call batching */
280
281 #ifdef USE_GPU_SELECT
282         /* backlink to pass we're in */
283         DRWPass *pass_parent;
284 #endif
285 };
286
287 /* Used by DRWShadingGroup.type */
288 enum {
289         DRW_SHG_NORMAL,
290         DRW_SHG_POINT_BATCH,
291         DRW_SHG_LINE_BATCH,
292         DRW_SHG_TRIANGLE_BATCH,
293         DRW_SHG_INSTANCE,
294 };
295
296 /* Used by DRWCall.type */
297 enum {
298         /* A single batch */
299         DRW_CALL_SINGLE,
300         /* Uses a callback to draw with any number of batches. */
301         DRW_CALL_GENERATE,
302         /* Arbitrary number of multiple args. */
303         DRW_CALL_DYNAMIC,
304 };
305
306 /** Render State: No persistent data between draw calls. */
307 static struct DRWGlobalState {
308         /* Cache generation */
309         ViewportMemoryPool *vmempool;
310         DRWUniform *last_uniform;
311         DRWCall *last_call;
312         DRWCallGenerate *last_callgenerate;
313         DRWShadingGroup *last_shgroup;
314         DRWInstanceDataList *idatalist;
315
316         /* Rendering state */
317         GPUShader *shader;
318
319         /* Managed by `DRW_state_set`, `DRW_state_reset` */
320         DRWState state;
321         unsigned int stencil_mask;
322
323         /* Per viewport */
324         GPUViewport *viewport;
325         struct GPUFrameBuffer *default_framebuffer;
326         float size[2];
327         float screenvecs[2][3];
328         float pixsize;
329
330         GLenum backface, frontface;
331
332         /* Clip planes */
333         int num_clip_planes;
334         float clip_planes_eq[MAX_CLIP_PLANES][4];
335
336         struct {
337                 unsigned int is_select : 1;
338                 unsigned int is_depth : 1;
339                 unsigned int is_image_render : 1;
340                 unsigned int is_scene_render : 1;
341                 unsigned int draw_background : 1;
342         } options;
343
344         /* Current rendering context */
345         DRWContextState draw_ctx;
346
347         /* Convenience pointer to text_store owned by the viewport */
348         struct DRWTextStore **text_store_p;
349
350         ListBase enabled_engines; /* RenderEngineType */
351
352         /* Profiling */
353         double cache_time;
354 } DST = {NULL};
355
356 /** GPU Resource State: Memory storage between drawing. */
357 static struct DRWResourceState {
358         GPUTexture **bound_texs;
359
360         bool *bound_tex_slots;
361
362         int bind_tex_inc;
363         int bind_ubo_inc;
364 } RST = {NULL};
365
366 static struct DRWMatrixOveride {
367         float mat[6][4][4];
368         bool override[6];
369 } viewport_matrix_override = {{{{0}}}};
370
371 ListBase DRW_engines = {NULL, NULL};
372
373 #ifdef USE_GPU_SELECT
374 static unsigned int g_DRW_select_id = (unsigned int)-1;
375
376 void DRW_select_load_id(unsigned int id)
377 {
378         BLI_assert(G.f & G_PICKSEL);
379         g_DRW_select_id = id;
380 }
381 #endif
382
383
384 /* -------------------------------------------------------------------- */
385
386 /** \name Textures (DRW_texture)
387  * \{ */
388
389 static void drw_texture_get_format(
390         DRWTextureFormat format,
391         GPUTextureFormat *r_data_type, int *r_channels)
392 {
393         switch (format) {
394                 case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
395                 case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
396                 case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
397                 case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
398                 case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
399                 case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
400                 case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break;
401                 case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
402                 case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
403                 case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
404                 case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
405 #if 0
406                 case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
407                 case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
408                 case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
409 #endif
410                 case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
411                 case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
412                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
413                 case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
414                 default :
415                         /* file type not supported you must uncomment it from above */
416                         BLI_assert(false);
417                         break;
418         }
419
420         switch (format) {
421                 case DRW_TEX_RGBA_8:
422                 case DRW_TEX_RGBA_16:
423                 case DRW_TEX_RGBA_32:
424                         *r_channels = 4;
425                         break;
426                 case DRW_TEX_RGB_8:
427                 case DRW_TEX_RGB_16:
428                 case DRW_TEX_RGB_32:
429                 case DRW_TEX_RGB_11_11_10:
430                         *r_channels = 3;
431                         break;
432                 case DRW_TEX_RG_8:
433                 case DRW_TEX_RG_16:
434                 case DRW_TEX_RG_16I:
435                 case DRW_TEX_RG_32:
436                         *r_channels = 2;
437                         break;
438                 default:
439                         *r_channels = 1;
440                         break;
441         }
442 }
443
444 static void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
445 {
446         GPU_texture_bind(tex, 0);
447         if (flags & DRW_TEX_MIPMAP) {
448                 GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
449                 DRW_texture_generate_mipmaps(tex);
450         }
451         else {
452                 GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
453         }
454         GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
455         GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
456         GPU_texture_unbind(tex);
457 }
458
459 GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
460 {
461         GPUTexture *tex;
462         GPUTextureFormat data_type;
463         int channels;
464
465         drw_texture_get_format(format, &data_type, &channels);
466         tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
467         drw_texture_set_parameters(tex, flags);
468
469         return tex;
470 }
471
472 GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
473 {
474         GPUTexture *tex;
475         GPUTextureFormat data_type;
476         int channels;
477
478         drw_texture_get_format(format, &data_type, &channels);
479         tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
480         drw_texture_set_parameters(tex, flags);
481
482         return tex;
483 }
484
485 GPUTexture *DRW_texture_create_2D_array(
486         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
487 {
488         GPUTexture *tex;
489         GPUTextureFormat data_type;
490         int channels;
491
492         drw_texture_get_format(format, &data_type, &channels);
493         tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
494         drw_texture_set_parameters(tex, flags);
495
496         return tex;
497 }
498
499 GPUTexture *DRW_texture_create_3D(
500         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
501 {
502         GPUTexture *tex;
503         GPUTextureFormat data_type;
504         int channels;
505
506         drw_texture_get_format(format, &data_type, &channels);
507         tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
508         drw_texture_set_parameters(tex, flags);
509
510         return tex;
511 }
512
513 GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
514 {
515         GPUTexture *tex;
516         GPUTextureFormat data_type;
517         int channels;
518
519         drw_texture_get_format(format, &data_type, &channels);
520         tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
521         drw_texture_set_parameters(tex, flags);
522
523         return tex;
524 }
525
526 void DRW_texture_generate_mipmaps(GPUTexture *tex)
527 {
528         GPU_texture_bind(tex, 0);
529         GPU_texture_generate_mipmap(tex);
530         GPU_texture_unbind(tex);
531 }
532
533 void DRW_texture_update(GPUTexture *tex, const float *pixels)
534 {
535         GPU_texture_update(tex, pixels);
536 }
537
538 void DRW_texture_free(GPUTexture *tex)
539 {
540         GPU_texture_free(tex);
541 }
542
543 /** \} */
544
545
546 /* -------------------------------------------------------------------- */
547
548 /** \name Uniform Buffer Object (DRW_uniformbuffer)
549  * \{ */
550
551 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
552 {
553         return GPU_uniformbuffer_create(size, data, NULL);
554 }
555
556 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
557 {
558         GPU_uniformbuffer_update(ubo, data);
559 }
560
561 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
562 {
563         GPU_uniformbuffer_free(ubo);
564 }
565
566 /** \} */
567
568
569 /* -------------------------------------------------------------------- */
570
571 /** \name Shaders (DRW_shader)
572  * \{ */
573
574 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
575 {
576         return GPU_shader_create(vert, frag, geom, NULL, defines);
577 }
578
579 GPUShader *DRW_shader_create_with_lib(
580         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
581 {
582         GPUShader *sh;
583         char *vert_with_lib = NULL;
584         char *frag_with_lib = NULL;
585         char *geom_with_lib = NULL;
586
587         DynStr *ds_vert = BLI_dynstr_new();
588         BLI_dynstr_append(ds_vert, lib);
589         BLI_dynstr_append(ds_vert, vert);
590         vert_with_lib = BLI_dynstr_get_cstring(ds_vert);
591         BLI_dynstr_free(ds_vert);
592
593         DynStr *ds_frag = BLI_dynstr_new();
594         BLI_dynstr_append(ds_frag, lib);
595         BLI_dynstr_append(ds_frag, frag);
596         frag_with_lib = BLI_dynstr_get_cstring(ds_frag);
597         BLI_dynstr_free(ds_frag);
598
599         if (geom) {
600                 DynStr *ds_geom = BLI_dynstr_new();
601                 BLI_dynstr_append(ds_geom, lib);
602                 BLI_dynstr_append(ds_geom, geom);
603                 geom_with_lib = BLI_dynstr_get_cstring(ds_geom);
604                 BLI_dynstr_free(ds_geom);
605         }
606
607         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
608
609         MEM_freeN(vert_with_lib);
610         MEM_freeN(frag_with_lib);
611         if (geom) {
612                 MEM_freeN(geom_with_lib);
613         }
614
615         return sh;
616 }
617
618 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
619 {
620         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
621 }
622
623 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
624 {
625         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
626 }
627
628 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
629 {
630         return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines);
631 }
632
633 GPUShader *DRW_shader_create_3D_depth_only(void)
634 {
635         return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
636 }
637
638 void DRW_shader_free(GPUShader *shader)
639 {
640         GPU_shader_free(shader);
641 }
642
643 /** \} */
644
645
646 /* -------------------------------------------------------------------- */
647
648 /** \name Interface (DRW_interface)
649  * \{ */
650
651 static void drw_interface_create(DRWInterface *interface, GPUShader *shader)
652 {
653         interface->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
654         interface->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
655         interface->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
656         interface->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
657         interface->projection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION);
658         interface->projectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION_INV);
659         interface->view = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW);
660         interface->viewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW_INV);
661         interface->viewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION);
662         interface->viewprojectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION_INV);
663         interface->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
664         interface->normal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
665         interface->worldnormal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
666         interface->camtexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CAMERATEXCO);
667         interface->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
668         interface->clipplanes = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CLIPPLANES);
669         interface->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
670         interface->instance_count = 0;
671         interface->attribs_count = 0;
672         interface->attribs_stride = 0;
673         interface->instance_vbo = 0;
674         interface->instance_batch = NULL;
675         interface->inst_data = NULL;
676         interface->uniforms = NULL;
677 #ifdef USE_GPU_SELECT
678         interface->inst_selectid = NULL;
679         interface->override_selectid = -1;
680 #endif
681
682         memset(&interface->vbo_format, 0, sizeof(Gwn_VertFormat));
683 }
684
685
686 static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name,
687                                   DRWUniformType type, const void *value, int length, int arraysize)
688 {
689         int location;
690         if (type == DRW_UNIFORM_BLOCK) {
691                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
692         }
693         else {
694                 location = GPU_shader_get_uniform(shgroup->shader, name);
695         }
696
697         if (location == -1) {
698                 if (G.debug & G_DEBUG)
699                         fprintf(stderr, "Uniform '%s' not found!\n", name);
700                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
701                 // BLI_assert(0);
702                 return;
703         }
704
705         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
706
707         BLI_assert(arraysize > 0);
708
709         uni->location = location;
710         uni->type = type;
711         uni->value = value;
712         uni->length = length;
713         uni->arraysize = arraysize;
714
715         /* Prepend */
716         uni->next = shgroup->interface.uniforms;
717         shgroup->interface.uniforms = uni;
718 }
719
720 static void drw_interface_attrib(DRWShadingGroup *shgroup, const char *name, DRWAttribType UNUSED(type), int size, bool dummy)
721 {
722         unsigned int attrib_id = shgroup->interface.attribs_count;
723         GLuint program = GPU_shader_get_program(shgroup->shader);
724
725         shgroup->interface.attribs_loc[attrib_id] = glGetAttribLocation(program, name);
726         shgroup->interface.attribs_size[attrib_id] = size;
727         shgroup->interface.attribs_stride += size;
728         shgroup->interface.attribs_count += 1;
729
730         if (shgroup->type != DRW_SHG_INSTANCE) {
731                 BLI_assert(size <= 4); /* Matrices are not supported by Gawain. */
732                 GWN_vertformat_attr_add(&shgroup->interface.vbo_format, name, GWN_COMP_F32, size, GWN_FETCH_FLOAT);
733         }
734
735         BLI_assert(shgroup->interface.attribs_count < MAX_ATTRIB_COUNT);
736
737 /* Adding attribute even if not found for now (to keep memory alignment).
738  * Should ideally take vertex format automatically from batch eventually */
739 #if 0
740         if (attrib->location == -1 && !dummy) {
741                 if (G.debug & G_DEBUG)
742                         fprintf(stderr, "Attribute '%s' not found!\n", name);
743                 BLI_assert(0);
744                 MEM_freeN(attrib);
745                 return;
746         }
747 #else
748         UNUSED_VARS(dummy);
749 #endif
750 }
751
752 /** \} */
753
754
755 /* -------------------------------------------------------------------- */
756
757 /** \name Shading Group (DRW_shgroup)
758  * \{ */
759
760 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
761 {
762         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
763
764         /* Append */
765         if (pass->shgroups != NULL) {
766                 pass->shgroups_last->next = shgroup;
767         }
768         else {
769                 pass->shgroups = shgroup;
770         }
771         pass->shgroups_last = shgroup;
772         shgroup->next = NULL;
773
774         drw_interface_create(&shgroup->interface, shader);
775
776         shgroup->type = DRW_SHG_NORMAL;
777         shgroup->shader = shader;
778         shgroup->state_extra = 0;
779         shgroup->state_extra_disable = ~0x0;
780         shgroup->stencil_mask = 0;
781         shgroup->batch_geom = NULL;
782         shgroup->instance_geom = NULL;
783         shgroup->instance_data = NULL;
784
785         shgroup->calls = NULL;
786         shgroup->calls_first = NULL;
787
788 #ifdef USE_GPU_SELECT
789         shgroup->pass_parent = pass;
790 #endif
791
792         return shgroup;
793 }
794
795 DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
796 {
797         double time = 0.0; /* TODO make time variable */
798
799         /* TODO : Ideally we should not convert. But since the whole codegen
800          * is relying on GPUPass we keep it as is for now. */
801         GPUPass *gpupass = GPU_material_get_pass(material);
802
803         if (!gpupass) {
804                 /* Shader compilation error */
805                 return NULL;
806         }
807
808         struct GPUShader *shader = GPU_pass_shader(gpupass);
809
810         DRWShadingGroup *grp = DRW_shgroup_create(shader, pass);
811
812         /* Converting dynamic GPUInput to DRWUniform */
813         ListBase *inputs = &gpupass->inputs;
814
815         for (GPUInput *input = inputs->first; input; input = input->next) {
816                 /* Textures */
817                 if (input->ima) {
818                         GPUTexture *tex = GPU_texture_from_blender(
819                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
820
821                         if (input->bindtex) {
822                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
823                         }
824                 }
825                 /* Color Ramps */
826                 else if (input->tex) {
827                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
828                 }
829                 /* Floats */
830                 else {
831                         switch (input->type) {
832                                 case GPU_FLOAT:
833                                         DRW_shgroup_uniform_float(grp, input->shadername, (float *)input->dynamicvec, 1);
834                                         break;
835                                 case GPU_VEC2:
836                                         DRW_shgroup_uniform_vec2(grp, input->shadername, (float *)input->dynamicvec, 1);
837                                         break;
838                                 case GPU_VEC3:
839                                         DRW_shgroup_uniform_vec3(grp, input->shadername, (float *)input->dynamicvec, 1);
840                                         break;
841                                 case GPU_VEC4:
842                                         DRW_shgroup_uniform_vec4(grp, input->shadername, (float *)input->dynamicvec, 1);
843                                         break;
844                                 case GPU_MAT3:
845                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
846                                         break;
847                                 case GPU_MAT4:
848                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
849                                         break;
850                                 default:
851                                         break;
852                         }
853                 }
854         }
855
856         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
857         if (ubo != NULL) {
858                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
859         }
860
861         return grp;
862 }
863
864 DRWShadingGroup *DRW_shgroup_material_instance_create(
865         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob)
866 {
867         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
868
869         if (shgroup) {
870                 shgroup->type = DRW_SHG_INSTANCE;
871                 shgroup->instance_geom = geom;
872                 shgroup->instance_data = ob->data;
873         }
874
875         return shgroup;
876 }
877
878 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
879         struct GPUMaterial *material, DRWPass *pass, int size)
880 {
881         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
882
883         if (shgroup) {
884                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
885                 shgroup->interface.instance_count = size * 3;
886                 drw_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
887         }
888
889         return shgroup;
890 }
891
892 DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom)
893 {
894         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
895
896         shgroup->type = DRW_SHG_INSTANCE;
897         shgroup->instance_geom = geom;
898
899         return shgroup;
900 }
901
902 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
903 {
904         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
905
906         shgroup->type = DRW_SHG_POINT_BATCH;
907         DRW_shgroup_attrib_float(shgroup, "pos", 3);
908
909         return shgroup;
910 }
911
912 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
913 {
914         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
915
916         shgroup->type = DRW_SHG_LINE_BATCH;
917         DRW_shgroup_attrib_float(shgroup, "pos", 3);
918
919         return shgroup;
920 }
921
922 /* Very special batch. Use this if you position
923  * your vertices with the vertex shader
924  * and dont need any VBO attrib */
925 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int size)
926 {
927         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
928
929         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
930         shgroup->interface.instance_count = size * 3;
931         drw_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
932
933         return shgroup;
934 }
935
936 void DRW_shgroup_free(struct DRWShadingGroup *shgroup)
937 {
938         if (shgroup->interface.instance_vbo &&
939             (shgroup->interface.instance_batch == 0))
940         {
941                 glDeleteBuffers(1, &shgroup->interface.instance_vbo);
942         }
943
944         GWN_BATCH_DISCARD_SAFE(shgroup->batch_geom);
945 }
946
947 #define CALL_PREPEND(shgroup, call) { \
948         if (shgroup->calls == NULL) { \
949                 shgroup->calls = call; \
950                 shgroup->calls_first = call; \
951         } \
952         else { \
953                 ((DRWCall *)(shgroup->calls))->head.prev = call; \
954                 shgroup->calls = call; \
955         } \
956         call->head.prev = NULL; \
957 } ((void)0)
958
959 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *instances)
960 {
961         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
962         BLI_assert(shgroup->interface.instance_batch == NULL);
963
964         shgroup->interface.instance_batch = instances;
965
966 #ifdef USE_GPU_SELECT
967         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
968         call->head.select_id = g_DRW_select_id;
969
970         CALL_PREPEND(shgroup, call);
971 #endif
972 }
973
974 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
975 {
976         BLI_assert(geom != NULL);
977         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
978
979         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
980
981         CALL_PREPEND(shgroup, call);
982
983         call->head.type = DRW_CALL_SINGLE;
984 #ifdef USE_GPU_SELECT
985         call->head.select_id = g_DRW_select_id;
986 #endif
987
988         if (obmat != NULL) {
989                 copy_m4_m4(call->obmat, obmat);
990         }
991
992         call->geometry = geom;
993         call->ob_data = NULL;
994 }
995
996 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
997 {
998         BLI_assert(geom != NULL);
999         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1000
1001         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
1002
1003         CALL_PREPEND(shgroup, call);
1004
1005         call->head.type = DRW_CALL_SINGLE;
1006 #ifdef USE_GPU_SELECT
1007         call->head.select_id = g_DRW_select_id;
1008 #endif
1009
1010         copy_m4_m4(call->obmat, ob->obmat);
1011         call->geometry = geom;
1012         call->ob_data = ob->data;
1013 }
1014
1015 void DRW_shgroup_call_generate_add(
1016         DRWShadingGroup *shgroup,
1017         DRWCallGenerateFn *geometry_fn, void *user_data,
1018         float (*obmat)[4])
1019 {
1020         BLI_assert(geometry_fn != NULL);
1021         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1022
1023         DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
1024
1025         CALL_PREPEND(shgroup, call);
1026
1027         call->head.type = DRW_CALL_GENERATE;
1028 #ifdef USE_GPU_SELECT
1029         call->head.select_id = g_DRW_select_id;
1030 #endif
1031
1032         if (obmat != NULL) {
1033                 copy_m4_m4(call->obmat, obmat);
1034         }
1035
1036         call->geometry_fn = geometry_fn;
1037         call->user_data = user_data;
1038 }
1039
1040 static void sculpt_draw_cb(
1041         DRWShadingGroup *shgroup,
1042         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
1043         void *user_data)
1044 {
1045         Object *ob = user_data;
1046         PBVH *pbvh = ob->sculpt->pbvh;
1047
1048         if (pbvh) {
1049                 BKE_pbvh_draw_cb(
1050                         pbvh, NULL, NULL, false,
1051                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
1052         }
1053 }
1054
1055 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
1056 {
1057         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
1058 }
1059
1060 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
1061 {
1062         DRWInterface *interface = &shgroup->interface;
1063
1064 #ifdef USE_GPU_SELECT
1065         if (G.f & G_PICKSEL) {
1066                 if (interface->inst_selectid == NULL) {
1067                         interface->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
1068                 }
1069
1070                 int *select_id = DRW_instance_data_next(interface->inst_selectid);
1071                 *select_id = g_DRW_select_id;
1072         }
1073 #endif
1074
1075         BLI_assert(attr_len == interface->attribs_count);
1076         UNUSED_VARS_NDEBUG(attr_len);
1077
1078         if (interface->attribs_stride > 0) {
1079                 if (interface->inst_data == NULL) {
1080                         interface->inst_data = DRW_instance_data_request(DST.idatalist, interface->attribs_stride, 16);
1081                 }
1082
1083                 float *data = DRW_instance_data_next(interface->inst_data);
1084
1085                 for (int i = 0; i < interface->attribs_count; ++i) {
1086                         memcpy(data, attr[i], sizeof(float) * interface->attribs_size[i]);
1087                         data = data + interface->attribs_size[i];
1088                 }
1089         }
1090
1091         interface->instance_count += 1;
1092 }
1093
1094 /* Used for instancing with no attributes */
1095 void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, int count)
1096 {
1097         DRWInterface *interface = &shgroup->interface;
1098
1099         BLI_assert(interface->instance_count == 0);
1100         BLI_assert(interface->attribs_count == 0);
1101
1102 #ifdef USE_GPU_SELECT
1103         if (G.f & G_PICKSEL) {
1104                 interface->override_selectid = g_DRW_select_id;
1105         }
1106 #endif
1107
1108         interface->instance_count = count;
1109 }
1110
1111 /**
1112  * State is added to #Pass.state while drawing.
1113  * Use to temporarily enable draw options.
1114  */
1115 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1116 {
1117         shgroup->state_extra |= state;
1118 }
1119
1120 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1121 {
1122         shgroup->state_extra_disable &= ~state;
1123 }
1124
1125 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
1126 {
1127         shgroup->stencil_mask = mask;
1128 }
1129
1130 void DRW_shgroup_attrib_float(DRWShadingGroup *shgroup, const char *name, int size)
1131 {
1132         drw_interface_attrib(shgroup, name, DRW_ATTRIB_FLOAT, size, false);
1133 }
1134
1135 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
1136 {
1137         drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
1138 }
1139
1140 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
1141 {
1142         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
1143 }
1144
1145 void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
1146 {
1147         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
1148 }
1149
1150 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1151 {
1152         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
1153 }
1154
1155 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1156 {
1157         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
1158 }
1159
1160 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1161 {
1162         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
1163 }
1164
1165 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1166 {
1167         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
1168 }
1169
1170 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1171 {
1172         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
1173 }
1174
1175 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1176 {
1177         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
1178 }
1179
1180 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1181 {
1182         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
1183 }
1184
1185 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1186 {
1187         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
1188 }
1189
1190 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1191 {
1192         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
1193 }
1194
1195 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1196 {
1197         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
1198 }
1199
1200 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
1201 {
1202         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT3, value, 9, 1);
1203 }
1204
1205 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
1206 {
1207         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT4, value, 16, 1);
1208 }
1209
1210 /* Creates a VBO containing OGL primitives for all DRWCallDynamic */
1211 static void shgroup_dynamic_batch(DRWShadingGroup *shgroup)
1212 {
1213         DRWInterface *interface = &shgroup->interface;
1214         int nbr = interface->instance_count;
1215
1216         Gwn_PrimType type = (shgroup->type == DRW_SHG_POINT_BATCH) ? GWN_PRIM_POINTS :
1217                              (shgroup->type == DRW_SHG_TRIANGLE_BATCH) ? GWN_PRIM_TRIS : GWN_PRIM_LINES;
1218
1219         if (nbr == 0)
1220                 return;
1221
1222         /* Upload Data */
1223         Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&interface->vbo_format);
1224         if (interface->inst_data) {
1225                 GWN_vertbuf_data_set(vbo, nbr, DRW_instance_data_get(interface->inst_data), false);
1226         } else {
1227                 /* Use unitialized memory. This is for dummy vertex buffers. */
1228                 /* XXX TODO do not alloc at all. */
1229                 GWN_vertbuf_data_alloc(vbo, nbr);
1230         }
1231
1232         /* TODO make the batch dynamic instead of freeing it every times */
1233         if (shgroup->batch_geom)
1234                 GWN_batch_discard(shgroup->batch_geom);
1235
1236         shgroup->batch_geom = GWN_batch_create_ex(type, vbo, NULL, GWN_BATCH_OWNS_VBO);
1237 }
1238
1239 static void shgroup_dynamic_instance(DRWShadingGroup *shgroup)
1240 {
1241         DRWInterface *interface = &shgroup->interface;
1242         int buffer_size = 0;
1243         void *data = NULL;
1244
1245         if (interface->instance_batch != NULL) {
1246                 return;
1247         }
1248
1249         /* TODO We still need this because gawain does not support Matrix attribs. */
1250         if (interface->instance_count == 0) {
1251                 if (interface->instance_vbo) {
1252                         glDeleteBuffers(1, &interface->instance_vbo);
1253                         interface->instance_vbo = 0;
1254                 }
1255                 return;
1256         }
1257
1258         /* Gather Data */
1259         buffer_size = sizeof(float) * interface->attribs_stride * interface->instance_count;
1260
1261         /* TODO poke mike to add this to gawain */
1262         if (interface->instance_vbo) {
1263                 glDeleteBuffers(1, &interface->instance_vbo);
1264                 interface->instance_vbo = 0;
1265         }
1266
1267         if (interface->inst_data) {
1268                 data = DRW_instance_data_get(interface->inst_data);
1269         }
1270
1271         glGenBuffers(1, &interface->instance_vbo);
1272         glBindBuffer(GL_ARRAY_BUFFER, interface->instance_vbo);
1273         glBufferData(GL_ARRAY_BUFFER, buffer_size, data, GL_STATIC_DRAW);
1274 }
1275
1276 static void shgroup_dynamic_batch_from_calls(DRWShadingGroup *shgroup)
1277 {
1278         if ((shgroup->interface.instance_vbo || shgroup->batch_geom) &&
1279             (G.debug_value == 667))
1280         {
1281                 return;
1282         }
1283
1284         if (shgroup->type == DRW_SHG_INSTANCE) {
1285                 shgroup_dynamic_instance(shgroup);
1286         }
1287         else {
1288                 shgroup_dynamic_batch(shgroup);
1289         }
1290 }
1291
1292 /** \} */
1293
1294
1295 /* -------------------------------------------------------------------- */
1296
1297 /** \name Passes (DRW_pass)
1298  * \{ */
1299
1300 DRWPass *DRW_pass_create(const char *name, DRWState state)
1301 {
1302         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1303         pass->state = state;
1304         BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1305
1306         pass->shgroups = NULL;
1307         pass->shgroups_last = NULL;
1308
1309         return pass;
1310 }
1311
1312 void DRW_pass_free(DRWPass *pass)
1313 {
1314         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1315                 DRW_shgroup_free(shgroup);
1316         }
1317
1318         pass->shgroups = NULL;
1319         pass->shgroups_last = NULL;
1320 }
1321
1322 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1323 {
1324         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1325                 callback(userData, shgroup);
1326         }
1327 }
1328
1329 typedef struct ZSortData {
1330         float *axis;
1331         float *origin;
1332 } ZSortData;
1333
1334 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1335 {
1336         const ZSortData *zsortdata = (ZSortData *)thunk;
1337         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1338         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1339
1340         const DRWCall *call_a;
1341         const DRWCall *call_b;
1342
1343         call_a = shgrp_a->calls_first;
1344         call_b = shgrp_b->calls_first;
1345
1346         if (call_a == NULL) return -1;
1347         if (call_b == NULL) return -1;
1348
1349         float tmp[3];
1350         sub_v3_v3v3(tmp, zsortdata->origin, call_a->obmat[3]);
1351         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1352         sub_v3_v3v3(tmp, zsortdata->origin, call_b->obmat[3]);
1353         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1354
1355         if      (a_sq < b_sq) return  1;
1356         else if (a_sq > b_sq) return -1;
1357         else {
1358                 /* If there is a depth prepass put it before */
1359                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1360                         return -1;
1361                 }
1362                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1363                         return  1;
1364                 }
1365                 else return  0;
1366         }
1367 }
1368
1369 /* ------------------ Shading group sorting --------------------- */
1370
1371 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1372
1373 #define SORT_IMPL_USE_THUNK
1374 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1375 #include "../../blenlib/intern/list_sort_impl.h"
1376 #undef SORT_IMPL_FUNC
1377 #undef SORT_IMPL_USE_THUNK
1378
1379 #undef SORT_IMPL_LINKTYPE
1380
1381 /**
1382  * Sort Shading groups by decreasing Z of their first draw call.
1383  * This is usefull for order dependant effect such as transparency.
1384  **/
1385 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1386 {
1387         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1388
1389         float (*viewinv)[4];
1390         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1391                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1392
1393         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1394
1395         if (pass->shgroups && pass->shgroups->next) {
1396                 pass->shgroups = shgroup_sort_fn_r(pass->shgroups, pass_shgroup_dist_sort, &zsortdata);
1397
1398                 /* Find the next last */
1399                 DRWShadingGroup *last = pass->shgroups;
1400                 while ((last = last->next)) {
1401                         /* Do nothing */
1402                 }
1403                 pass->shgroups_last = last;
1404         }
1405 }
1406
1407 /** \} */
1408
1409
1410 /* -------------------------------------------------------------------- */
1411
1412 /** \name Draw (DRW_draw)
1413  * \{ */
1414
1415 static void drw_state_set(DRWState state)
1416 {
1417         if (DST.state == state) {
1418                 return;
1419         }
1420
1421
1422 #define CHANGED_TO(f) \
1423         ((DST.state & (f)) ? \
1424                 ((state & (f)) ?  0 : -1) : \
1425                 ((state & (f)) ?  1 :  0))
1426
1427 #define CHANGED_ANY(f) \
1428         ((DST.state & (f)) != (state & (f)))
1429
1430 #define CHANGED_ANY_STORE_VAR(f, enabled) \
1431         ((DST.state & (f)) != (enabled = (state & (f))))
1432
1433         /* Depth Write */
1434         {
1435                 int test;
1436                 if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
1437                         if (test == 1) {
1438                                 glDepthMask(GL_TRUE);
1439                         }
1440                         else {
1441                                 glDepthMask(GL_FALSE);
1442                         }
1443                 }
1444         }
1445
1446         /* Color Write */
1447         {
1448                 int test;
1449                 if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
1450                         if (test == 1) {
1451                                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1452                         }
1453                         else {
1454                                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1455                         }
1456                 }
1457         }
1458
1459         /* Cull */
1460         {
1461                 DRWState test;
1462                 if (CHANGED_ANY_STORE_VAR(
1463                         DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
1464                         test))
1465                 {
1466                         if (test) {
1467                                 glEnable(GL_CULL_FACE);
1468
1469                                 if ((state & DRW_STATE_CULL_BACK) != 0) {
1470                                         glCullFace(GL_BACK);
1471                                 }
1472                                 else if ((state & DRW_STATE_CULL_FRONT) != 0) {
1473                                         glCullFace(GL_FRONT);
1474                                 }
1475                                 else {
1476                                         BLI_assert(0);
1477                                 }
1478                         }
1479                         else {
1480                                 glDisable(GL_CULL_FACE);
1481                         }
1482                 }
1483         }
1484
1485         /* Depth Test */
1486         {
1487                 DRWState test;
1488                 if (CHANGED_ANY_STORE_VAR(
1489                         DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
1490                         test))
1491                 {
1492                         if (test) {
1493                                 glEnable(GL_DEPTH_TEST);
1494
1495                                 if (state & DRW_STATE_DEPTH_LESS) {
1496                                         glDepthFunc(GL_LEQUAL);
1497                                 }
1498                                 else if (state & DRW_STATE_DEPTH_EQUAL) {
1499                                         glDepthFunc(GL_EQUAL);
1500                                 }
1501                                 else if (state & DRW_STATE_DEPTH_GREATER) {
1502                                         glDepthFunc(GL_GREATER);
1503                                 }
1504                                 else if (state & DRW_STATE_DEPTH_ALWAYS) {
1505                                         glDepthFunc(GL_ALWAYS);
1506                                 }
1507                                 else {
1508                                         BLI_assert(0);
1509                                 }
1510                         }
1511                         else {
1512                                 glDisable(GL_DEPTH_TEST);
1513                         }
1514                 }
1515         }
1516
1517         /* Wire Width */
1518         {
1519                 if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_LARGE)) {
1520                         if ((state & DRW_STATE_WIRE) != 0) {
1521                                 glLineWidth(1.0f);
1522                         }
1523                         else if ((state & DRW_STATE_WIRE_LARGE) != 0) {
1524                                 glLineWidth(UI_GetThemeValuef(TH_OUTLINE_WIDTH) * 2.0f);
1525                         }
1526                         else {
1527                                 /* do nothing */
1528                         }
1529                 }
1530         }
1531
1532         /* Points Size */
1533         {
1534                 int test;
1535                 if ((test = CHANGED_TO(DRW_STATE_POINT))) {
1536                         if (test == 1) {
1537                                 GPU_enable_program_point_size();
1538                                 glPointSize(5.0f);
1539                         }
1540                         else {
1541                                 GPU_disable_program_point_size();
1542                         }
1543                 }
1544         }
1545
1546         /* Blending (all buffer) */
1547         {
1548                 int test;
1549                 if (CHANGED_ANY_STORE_VAR(
1550                         DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION |
1551                         DRW_STATE_ADDITIVE_FULL,
1552                         test))
1553                 {
1554                         if (test) {
1555                                 glEnable(GL_BLEND);
1556
1557                                 if ((state & DRW_STATE_BLEND) != 0) {
1558                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
1559                                                             GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
1560                                 }
1561                                 else if ((state & DRW_STATE_MULTIPLY) != 0) {
1562                                         glBlendFunc(GL_DST_COLOR, GL_ZERO);
1563                                 }
1564                                 else if ((state & DRW_STATE_TRANSMISSION) != 0) {
1565                                         glBlendFunc(GL_ONE, GL_SRC_ALPHA);
1566                                 }
1567                                 else if ((state & DRW_STATE_ADDITIVE) != 0) {
1568                                         /* Do not let alpha accumulate but premult the source RGB by it. */
1569                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
1570                                                             GL_ZERO, GL_ONE); /* Alpha */
1571                                 }
1572                                 else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
1573                                         /* Let alpha accumulate. */
1574                                         glBlendFunc(GL_ONE, GL_ONE);
1575                                 }
1576                                 else {
1577                                         BLI_assert(0);
1578                                 }
1579                         }
1580                         else {
1581                                 glDisable(GL_BLEND);
1582                         }
1583                 }
1584         }
1585
1586         /* Clip Planes */
1587         {
1588                 int test;
1589                 if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
1590                         if (test == 1) {
1591                                 for (int i = 0; i < DST.num_clip_planes; ++i) {
1592                                         glEnable(GL_CLIP_DISTANCE0 + i);
1593                                 }
1594                         }
1595                         else {
1596                                 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
1597                                         glDisable(GL_CLIP_DISTANCE0 + i);
1598                                 }
1599                         }
1600                 }
1601         }
1602
1603         /* Line Stipple */
1604         {
1605                 int test;
1606                 if (CHANGED_ANY_STORE_VAR(
1607                         DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
1608                         test))
1609                 {
1610                         if (test) {
1611                                 if ((state & DRW_STATE_STIPPLE_2) != 0) {
1612                                         setlinestyle(2);
1613                                 }
1614                                 else if ((state & DRW_STATE_STIPPLE_3) != 0) {
1615                                         setlinestyle(3);
1616                                 }
1617                                 else if ((state & DRW_STATE_STIPPLE_4) != 0) {
1618                                         setlinestyle(4);
1619                                 }
1620                                 else {
1621                                         BLI_assert(0);
1622                                 }
1623                         }
1624                         else {
1625                                 setlinestyle(0);
1626                         }
1627                 }
1628         }
1629
1630         /* Stencil */
1631         {
1632                 DRWState test;
1633                 if (CHANGED_ANY_STORE_VAR(
1634                         DRW_STATE_WRITE_STENCIL |
1635                         DRW_STATE_STENCIL_EQUAL,
1636                         test))
1637                 {
1638                         if (test) {
1639                                 glEnable(GL_STENCIL_TEST);
1640
1641                                 /* Stencil Write */
1642                                 if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
1643                                         glStencilMask(0xFF);
1644                                         glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
1645                                 }
1646                                 /* Stencil Test */
1647                                 else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
1648                                         glStencilMask(0x00); /* disable write */
1649                                         DST.stencil_mask = 0;
1650                                 }
1651                                 else {
1652                                         BLI_assert(0);
1653                                 }
1654                         }
1655                         else {
1656                                 /* disable write & test */
1657                                 DST.stencil_mask = 0;
1658                                 glStencilMask(0x00);
1659                                 glStencilFunc(GL_ALWAYS, 1, 0xFF);
1660                                 glDisable(GL_STENCIL_TEST);
1661                         }
1662                 }
1663         }
1664
1665 #undef CHANGED_TO
1666 #undef CHANGED_ANY
1667 #undef CHANGED_ANY_STORE_VAR
1668
1669         DST.state = state;
1670 }
1671
1672 static void drw_stencil_set(unsigned int mask)
1673 {
1674         if (DST.stencil_mask != mask) {
1675                 /* Stencil Write */
1676                 if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
1677                         glStencilFunc(GL_ALWAYS, mask, 0xFF);
1678                         DST.stencil_mask = mask;
1679                 }
1680                 /* Stencil Test */
1681                 else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
1682                         glStencilFunc(GL_EQUAL, mask, 0xFF);
1683                         DST.stencil_mask = mask;
1684                 }
1685         }
1686 }
1687
1688 typedef struct DRWBoundTexture {
1689         struct DRWBoundTexture *next, *prev;
1690         GPUTexture *tex;
1691 } DRWBoundTexture;
1692
1693 static void draw_geometry_prepare(
1694         DRWShadingGroup *shgroup, const float (*obmat)[4], const float *texcoloc, const float *texcosize)
1695 {
1696         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1697         DRWInterface *interface = &shgroup->interface;
1698
1699         float mvp[4][4], mv[4][4], mi[4][4], mvi[4][4], pi[4][4], n[3][3], wn[3][3];
1700         float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
1701         float eye[3] = { 0.0f, 0.0f, 1.0f }; /* looking into the screen */
1702
1703         bool do_pi = (interface->projectioninverse != -1);
1704         bool do_mvp = (interface->modelviewprojection != -1);
1705         bool do_mi = (interface->modelinverse != -1);
1706         bool do_mv = (interface->modelview != -1);
1707         bool do_mvi = (interface->modelviewinverse != -1);
1708         bool do_n = (interface->normal != -1);
1709         bool do_wn = (interface->worldnormal != -1);
1710         bool do_eye = (interface->eye != -1);
1711         bool do_orco = (interface->orcotexfac != -1) && (texcoloc != NULL) && (texcosize != NULL);
1712
1713         /* Matrix override */
1714         float (*persmat)[4];
1715         float (*persinv)[4];
1716         float (*viewmat)[4];
1717         float (*viewinv)[4];
1718         float (*winmat)[4];
1719         float (*wininv)[4];
1720
1721         persmat = (viewport_matrix_override.override[DRW_MAT_PERS])
1722                   ? viewport_matrix_override.mat[DRW_MAT_PERS] : rv3d->persmat;
1723         persinv = (viewport_matrix_override.override[DRW_MAT_PERSINV])
1724                   ? viewport_matrix_override.mat[DRW_MAT_PERSINV] : rv3d->persinv;
1725         viewmat = (viewport_matrix_override.override[DRW_MAT_VIEW])
1726                   ? viewport_matrix_override.mat[DRW_MAT_VIEW] : rv3d->viewmat;
1727         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1728                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1729         winmat = (viewport_matrix_override.override[DRW_MAT_WIN])
1730                   ? viewport_matrix_override.mat[DRW_MAT_WIN] : rv3d->winmat;
1731         wininv = viewport_matrix_override.mat[DRW_MAT_WININV];
1732
1733         if (do_pi) {
1734                 if (!viewport_matrix_override.override[DRW_MAT_WININV]) {
1735                         invert_m4_m4(pi, winmat);
1736                         wininv = pi;
1737                 }
1738         }
1739         if (do_mi) {
1740                 invert_m4_m4(mi, obmat);
1741         }
1742         if (do_mvp) {
1743                 mul_m4_m4m4(mvp, persmat, obmat);
1744         }
1745         if (do_mv || do_mvi || do_n || do_eye) {
1746                 mul_m4_m4m4(mv, viewmat, obmat);
1747         }
1748         if (do_mvi) {
1749                 invert_m4_m4(mvi, mv);
1750         }
1751         if (do_n || do_eye) {
1752                 copy_m3_m4(n, mv);
1753                 invert_m3(n);
1754                 transpose_m3(n);
1755         }
1756         if (do_wn) {
1757                 copy_m3_m4(wn, obmat);
1758                 invert_m3(wn);
1759                 transpose_m3(wn);
1760         }
1761         if (do_eye) {
1762                 /* Used by orthographic wires */
1763                 float tmp[3][3];
1764                 invert_m3_m3(tmp, n);
1765                 /* set eye vector, transformed to object coords */
1766                 mul_m3_v3(tmp, eye);
1767         }
1768         if (do_orco) {
1769                 mul_v3_v3fl(orcofacs[1], texcosize, 2.0f);
1770                 invert_v3(orcofacs[1]);
1771                 sub_v3_v3v3(orcofacs[0], texcoloc, texcosize);
1772                 negate_v3(orcofacs[0]);
1773                 mul_v3_v3(orcofacs[0], orcofacs[1]); /* result in a nice MADD in the shader */
1774         }
1775
1776         /* Should be really simple */
1777         /* step 1 : bind object dependent matrices */
1778         /* TODO : Some of these are not object dependant.
1779          * They should be grouped inside a UBO updated once per redraw.
1780          * The rest can also go into a UBO to reduce API calls. */
1781         GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)obmat);
1782         GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)mi);
1783         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)mvp);
1784         GPU_shader_uniform_vector(shgroup->shader, interface->viewinverse, 16, 1, (float *)viewinv);
1785         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojection, 16, 1, (float *)persmat);
1786         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojectioninverse, 16, 1, (float *)persinv);
1787         GPU_shader_uniform_vector(shgroup->shader, interface->projection, 16, 1, (float *)winmat);
1788         GPU_shader_uniform_vector(shgroup->shader, interface->projectioninverse, 16, 1, (float *)wininv);
1789         GPU_shader_uniform_vector(shgroup->shader, interface->view, 16, 1, (float *)viewmat);
1790         GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)mv);
1791         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)mvi);
1792         GPU_shader_uniform_vector(shgroup->shader, interface->normal, 9, 1, (float *)n);
1793         GPU_shader_uniform_vector(shgroup->shader, interface->worldnormal, 9, 1, (float *)wn);
1794         GPU_shader_uniform_vector(shgroup->shader, interface->camtexfac, 4, 1, (float *)rv3d->viewcamtexcofac);
1795         GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)orcofacs);
1796         GPU_shader_uniform_vector(shgroup->shader, interface->eye, 3, 1, (float *)eye);
1797         GPU_shader_uniform_vector(shgroup->shader, interface->clipplanes, 4, DST.num_clip_planes, (float *)DST.clip_planes_eq);
1798 }
1799
1800 static void draw_geometry_execute_ex(
1801         DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count)
1802 {
1803         DRWInterface *interface = &shgroup->interface;
1804         /* step 2 : bind vertex array & draw */
1805         GWN_batch_program_set(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
1806         if (interface->instance_batch) {
1807                 /* Used for Particles. Cannot do partial drawing. */
1808                 GWN_batch_draw_stupid_instanced_with_batch(geom, interface->instance_batch);
1809         }
1810         else if (interface->instance_vbo) {
1811                 GWN_batch_draw_stupid_instanced(
1812                         geom, interface->instance_vbo, start, count, interface->attribs_count,
1813                         interface->attribs_stride, interface->attribs_size, interface->attribs_loc);
1814         }
1815         else {
1816                 GWN_batch_draw_stupid(geom, start, count);
1817         }
1818         /* XXX this just tells gawain we are done with the shader.
1819          * This does not unbind the shader. */
1820         GWN_batch_program_unset(geom);
1821 }
1822
1823 static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
1824 {
1825         draw_geometry_execute_ex(shgroup, geom, 0, 0);
1826 }
1827
1828 static void draw_geometry(
1829         DRWShadingGroup *shgroup, Gwn_Batch *geom, const float (*obmat)[4], ID *ob_data,
1830         unsigned int start, unsigned int count)
1831 {
1832         float *texcoloc = NULL;
1833         float *texcosize = NULL;
1834
1835         if (ob_data != NULL) {
1836                 switch (GS(ob_data->name)) {
1837                         case ID_ME:
1838                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
1839                                 break;
1840                         case ID_CU:
1841                         {
1842                                 Curve *cu = (Curve *)ob_data;
1843                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
1844                                         BKE_curve_texspace_calc(cu);
1845                                 }
1846                                 texcoloc = cu->loc;
1847                                 texcosize = cu->size;
1848                                 break;
1849                         }
1850                         case ID_MB:
1851                         {
1852                                 MetaBall *mb = (MetaBall *)ob_data;
1853                                 texcoloc = mb->loc;
1854                                 texcosize = mb->size;
1855                                 break;
1856                         }
1857                         default:
1858                                 break;
1859                 }
1860         }
1861
1862         draw_geometry_prepare(shgroup, obmat, texcoloc, texcosize);
1863
1864         draw_geometry_execute_ex(shgroup, geom, start, count);
1865 }
1866
1867 static void bind_texture(GPUTexture *tex)
1868 {
1869         int bind_num = GPU_texture_bound_number(tex);
1870         if (bind_num == -1) {
1871                 for (int i = 0; i < GPU_max_textures(); ++i) {
1872                         RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures();
1873                         if (RST.bound_tex_slots[RST.bind_tex_inc] == false) {
1874                                 if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
1875                                         GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
1876                                 }
1877                                 GPU_texture_bind(tex, RST.bind_tex_inc);
1878                                 RST.bound_texs[RST.bind_tex_inc] = tex;
1879                                 RST.bound_tex_slots[RST.bind_tex_inc] = true;
1880                                 return;
1881                         }
1882                 }
1883
1884                 printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
1885         }
1886         RST.bound_tex_slots[bind_num] = true;
1887 }
1888
1889 static void bind_ubo(GPUUniformBuffer *ubo)
1890 {
1891         if (RST.bind_ubo_inc < GPU_max_ubo_binds()) {
1892                 GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
1893                 RST.bind_ubo_inc++;
1894         }
1895         else {
1896                 /* This is not depending on user input.
1897                  * It is our responsability to make sure there enough slots. */
1898                 BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
1899
1900                 /* printf so user can report bad behaviour */
1901                 printf("Not enough ubo slots! This should not happen!\n");
1902         }
1903 }
1904
1905 static void release_texture_slots(void)
1906 {
1907         memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
1908 }
1909
1910 static void release_ubo_slots(void)
1911 {
1912         RST.bind_ubo_inc = 0;
1913 }
1914
1915 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
1916 {
1917         BLI_assert(shgroup->shader);
1918
1919         DRWInterface *interface = &shgroup->interface;
1920         GPUTexture *tex;
1921         GPUUniformBuffer *ubo;
1922         int val;
1923         float fval;
1924
1925         if (DST.shader != shgroup->shader) {
1926                 if (DST.shader) GPU_shader_unbind();
1927                 GPU_shader_bind(shgroup->shader);
1928                 DST.shader = shgroup->shader;
1929         }
1930
1931         const bool is_normal = ELEM(shgroup->type, DRW_SHG_NORMAL);
1932
1933         if (!is_normal) {
1934                 shgroup_dynamic_batch_from_calls(shgroup);
1935         }
1936
1937         release_texture_slots();
1938         release_ubo_slots();
1939
1940         drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
1941         drw_stencil_set(shgroup->stencil_mask);
1942
1943         /* Binding Uniform */
1944         /* Don't check anything, Interface should already contain the least uniform as possible */
1945         for (DRWUniform *uni = interface->uniforms; uni; uni = uni->next) {
1946                 switch (uni->type) {
1947                         case DRW_UNIFORM_SHORT_TO_INT:
1948                                 val = (int)*((short *)uni->value);
1949                                 GPU_shader_uniform_vector_int(
1950                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
1951                                 break;
1952                         case DRW_UNIFORM_SHORT_TO_FLOAT:
1953                                 fval = (float)*((short *)uni->value);
1954                                 GPU_shader_uniform_vector(
1955                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
1956                                 break;
1957                         case DRW_UNIFORM_BOOL:
1958                         case DRW_UNIFORM_INT:
1959                                 GPU_shader_uniform_vector_int(
1960                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
1961                                 break;
1962                         case DRW_UNIFORM_FLOAT:
1963                         case DRW_UNIFORM_MAT3:
1964                         case DRW_UNIFORM_MAT4:
1965                                 GPU_shader_uniform_vector(
1966                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
1967                                 break;
1968                         case DRW_UNIFORM_TEXTURE:
1969                                 tex = (GPUTexture *)uni->value;
1970                                 BLI_assert(tex);
1971                                 bind_texture(tex);
1972                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1973                                 break;
1974                         case DRW_UNIFORM_BUFFER:
1975                                 if (!DRW_state_is_fbo()) {
1976                                         break;
1977                                 }
1978                                 tex = *((GPUTexture **)uni->value);
1979                                 BLI_assert(tex);
1980                                 bind_texture(tex);
1981                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1982                                 break;
1983                         case DRW_UNIFORM_BLOCK:
1984                                 ubo = (GPUUniformBuffer *)uni->value;
1985                                 bind_ubo(ubo);
1986                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
1987                                 break;
1988                 }
1989         }
1990
1991 #ifdef USE_GPU_SELECT
1992         /* use the first item because of selection we only ever add one */
1993 #  define GPU_SELECT_LOAD_IF_PICKSEL(_call) \
1994         if ((G.f & G_PICKSEL) && (_call)) { \
1995                 GPU_select_load_id((_call)->head.select_id); \
1996         } ((void)0)
1997
1998 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count)  \
1999         _start = 0;                                                      \
2000         _count = _shgroup->interface.instance_count;                     \
2001         int *select_id = NULL;                                           \
2002         if (G.f & G_PICKSEL) {                                           \
2003                 if (_shgroup->interface.override_selectid == -1) {                        \
2004                         select_id = DRW_instance_data_get(_shgroup->interface.inst_selectid); \
2005                         switch (_shgroup->type) {                                             \
2006                                 case DRW_SHG_TRIANGLE_BATCH: _count = 3; break;                   \
2007                                 case DRW_SHG_LINE_BATCH: _count = 2; break;                       \
2008                                 default: _count = 1; break;                                       \
2009                         }                                                                     \
2010                 }                                                                         \
2011                 else {                                                                    \
2012                         GPU_select_load_id(_shgroup->interface.override_selectid);            \
2013                 }                                                                         \
2014         }                                                                \
2015         while (_start < _shgroup->interface.instance_count) {            \
2016                 if (select_id) {                                             \
2017                         GPU_select_load_id(select_id[_start]);                   \
2018                 }
2019
2020 # define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
2021                 _start += _count;                                    \
2022         }
2023
2024 #else
2025 #  define GPU_SELECT_LOAD_IF_PICKSEL(call)
2026 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2027 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
2028         _start = 0;                                                     \
2029         _count = _shgroup->interface.instance_count;
2030
2031 #endif
2032
2033         /* Rendering Calls */
2034         if (!is_normal) {
2035                 /* Replacing multiple calls with only one */
2036                 float obmat[4][4];
2037                 unit_m4(obmat);
2038
2039                 if (shgroup->type == DRW_SHG_INSTANCE &&
2040                     (interface->instance_count > 0 || interface->instance_batch != NULL))
2041                 {
2042                         if (interface->instance_batch != NULL) {
2043                                 GPU_SELECT_LOAD_IF_PICKSEL((DRWCall *)shgroup->calls_first);
2044                                 draw_geometry(shgroup, shgroup->instance_geom, obmat, shgroup->instance_data, 0, 0);
2045                         }
2046                         else {
2047                                 unsigned int count, start;
2048                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2049                                 {
2050                                         draw_geometry(shgroup, shgroup->instance_geom, obmat, shgroup->instance_data, start, count);
2051                                 }
2052                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2053                         }
2054                 }
2055                 else {
2056                         /* Some dynamic batch can have no geom (no call to aggregate) */
2057                         if (shgroup->batch_geom) {
2058                                 unsigned int count, start;
2059                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2060                                 {
2061                                         draw_geometry(shgroup, shgroup->batch_geom, obmat, NULL, start, count);
2062                                 }
2063                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2064                         }
2065                 }
2066         }
2067         else {
2068                 for (DRWCall *call = shgroup->calls_first; call; call = call->head.prev) {
2069                         bool neg_scale = is_negative_m4(call->obmat);
2070
2071                         /* Negative scale objects */
2072                         if (neg_scale) {
2073                                 glFrontFace(DST.backface);
2074                         }
2075
2076                         GPU_SELECT_LOAD_IF_PICKSEL(call);
2077
2078                         if (call->head.type == DRW_CALL_SINGLE) {
2079                                 draw_geometry(shgroup, call->geometry, call->obmat, call->ob_data, 0, 0);
2080                         }
2081                         else {
2082                                 BLI_assert(call->head.type == DRW_CALL_GENERATE);
2083                                 DRWCallGenerate *callgen = ((DRWCallGenerate *)call);
2084                                 draw_geometry_prepare(shgroup, callgen->obmat, NULL, NULL);
2085                                 callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data);
2086                         }
2087
2088                         /* Reset state */
2089                         if (neg_scale) {
2090                                 glFrontFace(DST.frontface);
2091                         }
2092                 }
2093         }
2094
2095         /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
2096         DRW_state_reset();
2097 }
2098
2099 static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2100 {
2101         /* Start fresh */
2102         DST.shader = NULL;
2103
2104         drw_state_set(pass->state);
2105
2106         DRW_stats_query_start(pass->name);
2107
2108         for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
2109                 draw_shgroup(shgroup, pass->state);
2110                 /* break if upper limit */
2111                 if (shgroup == end_group) {
2112                         break;
2113                 }
2114         }
2115
2116         /* Clear Bound textures */
2117         for (int i = 0; i < GPU_max_textures(); i++) {
2118                 if (RST.bound_texs[i] != NULL) {
2119                         GPU_texture_unbind(RST.bound_texs[i]);
2120                         RST.bound_texs[i] = NULL;
2121                 }
2122         }
2123
2124         if (DST.shader) {
2125                 GPU_shader_unbind();
2126                 DST.shader = NULL;
2127         }
2128
2129         DRW_stats_query_end();
2130 }
2131
2132 void DRW_draw_pass(DRWPass *pass)
2133 {
2134         drw_draw_pass_ex(pass, pass->shgroups, pass->shgroups_last);
2135 }
2136
2137 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
2138 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2139 {
2140         drw_draw_pass_ex(pass, start_group, end_group);
2141 }
2142
2143 void DRW_draw_callbacks_pre_scene(void)
2144 {
2145         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2146
2147         gpuLoadProjectionMatrix(rv3d->winmat);
2148         gpuLoadMatrix(rv3d->viewmat);
2149 }
2150
2151 void DRW_draw_callbacks_post_scene(void)
2152 {
2153         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2154
2155         gpuLoadProjectionMatrix(rv3d->winmat);
2156         gpuLoadMatrix(rv3d->viewmat);
2157 }
2158
2159 /* Reset state to not interfer with other UI drawcall */
2160 void DRW_state_reset_ex(DRWState state)
2161 {
2162         DST.state = ~state;
2163         drw_state_set(state);
2164 }
2165
2166 void DRW_state_reset(void)
2167 {
2168         /* Reset blending function */
2169         glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
2170
2171         DRW_state_reset_ex(DRW_STATE_DEFAULT);
2172 }
2173
2174 /* NOTE : Make sure to reset after use! */
2175 void DRW_state_invert_facing(void)
2176 {
2177         SWAP(GLenum, DST.backface, DST.frontface);
2178         glFrontFace(DST.frontface);
2179 }
2180
2181 /**
2182  * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
2183  * and if the shaders have support for it (see usage of gl_ClipDistance).
2184  * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
2185  **/
2186 void DRW_state_clip_planes_add(float plane_eq[4])
2187 {
2188         copy_v4_v4(DST.clip_planes_eq[DST.num_clip_planes++], plane_eq);
2189 }
2190
2191 void DRW_state_clip_planes_reset(void)
2192 {
2193         DST.num_clip_planes = 0;
2194 }
2195
2196 /** \} */
2197
2198
2199 struct DRWTextStore *DRW_text_cache_ensure(void)
2200 {
2201         BLI_assert(DST.text_store_p);
2202         if (*DST.text_store_p == NULL) {
2203                 *DST.text_store_p = DRW_text_cache_create();
2204         }
2205         return *DST.text_store_p;
2206 }
2207
2208
2209 /* -------------------------------------------------------------------- */
2210
2211 /** \name Settings
2212  * \{ */
2213
2214 bool DRW_object_is_renderable(Object *ob)
2215 {
2216         Scene *scene = DST.draw_ctx.scene;
2217         Object *obedit = scene->obedit;
2218
2219         BLI_assert(BKE_object_is_visible(ob, OB_VISIBILITY_CHECK_UNKNOWN_RENDER_MODE));
2220
2221         if (ob->type == OB_MESH) {
2222                 if (ob == obedit) {
2223                         IDProperty *props = BKE_layer_collection_engine_evaluated_get(ob, COLLECTION_MODE_EDIT, "");
2224                         bool do_show_occlude_wire = BKE_collection_engine_property_value_get_bool(props, "show_occlude_wire");
2225                         if (do_show_occlude_wire) {
2226                                 return false;
2227                         }
2228                         bool do_show_weight = BKE_collection_engine_property_value_get_bool(props, "show_weight");
2229                         if (do_show_weight) {
2230                                 return false;
2231                         }
2232                 }
2233         }
2234
2235         return true;
2236 }
2237
2238 /**
2239  * Return whether this object is visible depending if
2240  * we are rendering or drawing in the viewport.
2241  */
2242 bool DRW_check_object_visible_within_active_context(Object *ob)
2243 {
2244         const eObjectVisibilityCheck mode = DRW_state_is_scene_render() ?
2245                                              OB_VISIBILITY_CHECK_FOR_RENDER :
2246                                              OB_VISIBILITY_CHECK_FOR_VIEWPORT;
2247         return BKE_object_is_visible(ob, mode);
2248 }
2249
2250 bool DRW_object_is_flat_normal(const Object *ob)
2251 {
2252         if (ob->type == OB_MESH) {
2253                 const Mesh *me = ob->data;
2254                 if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
2255                         return false;
2256                 }
2257         }
2258         return true;
2259 }
2260
2261 /**
2262  * Return true if the object has its own draw mode.
2263  * Caller must check this is active */
2264 int DRW_object_is_mode_shade(const Object *ob)
2265 {
2266         BLI_assert(ob == DST.draw_ctx.obact);
2267         if ((ob->mode & OB_MODE_EDIT) == 0) {
2268                 if (ob->mode & (OB_MODE_VERTEX_PAINT | OB_MODE_WEIGHT_PAINT | OB_MODE_TEXTURE_PAINT)) {
2269                         if ((DST.draw_ctx.v3d->flag2 & V3D_SHOW_MODE_SHADE_OVERRIDE) == 0) {
2270                                 return true;
2271                         }
2272                         else {
2273                                 return false;
2274                         }
2275                 }
2276         }
2277         return -1;
2278 }
2279
2280 /** \} */
2281
2282
2283 /* -------------------------------------------------------------------- */
2284
2285 /** \name Framebuffers (DRW_framebuffer)
2286  * \{ */
2287
2288 static GPUTextureFormat convert_tex_format(
2289         int fbo_format,
2290         int *r_channels, bool *r_is_depth)
2291 {
2292         *r_is_depth = ELEM(fbo_format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
2293
2294         switch (fbo_format) {
2295                 case DRW_TEX_R_16:     *r_channels = 1; return GPU_R16F;
2296                 case DRW_TEX_R_32:     *r_channels = 1; return GPU_R32F;
2297                 case DRW_TEX_RG_8:     *r_channels = 2; return GPU_RG8;
2298                 case DRW_TEX_RG_16:    *r_channels = 2; return GPU_RG16F;
2299                 case DRW_TEX_RG_16I:   *r_channels = 2; return GPU_RG16I;
2300                 case DRW_TEX_RG_32:    *r_channels = 2; return GPU_RG32F;
2301                 case DRW_TEX_RGBA_8:   *r_channels = 4; return GPU_RGBA8;
2302                 case DRW_TEX_RGBA_16:  *r_channels = 4; return GPU_RGBA16F;
2303                 case DRW_TEX_RGBA_32:  *r_channels = 4; return GPU_RGBA32F;
2304                 case DRW_TEX_DEPTH_16: *r_channels = 1; return GPU_DEPTH_COMPONENT16;
2305                 case DRW_TEX_DEPTH_24: *r_channels = 1; return GPU_DEPTH_COMPONENT24;
2306                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_channels = 1; return GPU_DEPTH24_STENCIL8;
2307                 case DRW_TEX_DEPTH_32: *r_channels = 1; return GPU_DEPTH_COMPONENT32F;
2308                 case DRW_TEX_RGB_11_11_10: *r_channels = 3; return GPU_R11F_G11F_B10F;
2309                 default:
2310                         BLI_assert(false && "Texture format unsupported as render target!");
2311                         *r_channels = 4; return GPU_RGBA8;
2312         }
2313 }
2314
2315 void DRW_framebuffer_init(
2316         struct GPUFrameBuffer **fb, void *engine_type, int width, int height,
2317         DRWFboTexture textures[MAX_FBO_TEX], int textures_len)
2318 {
2319         BLI_assert(textures_len <= MAX_FBO_TEX);
2320
2321         bool create_fb = false;
2322         int color_attachment = -1;
2323
2324         if (!*fb) {
2325                 *fb = GPU_framebuffer_create();
2326                 create_fb = true;
2327         }
2328
2329         for (int i = 0; i < textures_len; ++i) {
2330                 int channels;
2331                 bool is_depth;
2332
2333                 DRWFboTexture fbotex = textures[i];
2334                 bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0;
2335
2336                 GPUTextureFormat gpu_format = convert_tex_format(fbotex.format, &channels, &is_depth);
2337
2338                 if (!*fbotex.tex || is_temp) {
2339                         /* Temp textures need to be queried each frame, others not. */
2340                         if (is_temp) {
2341                                 *fbotex.tex = GPU_viewport_texture_pool_query(
2342                                         DST.viewport, engine_type, width, height, channels, gpu_format);
2343                         }
2344                         else if (create_fb) {
2345                                 *fbotex.tex = GPU_texture_create_2D_custom(
2346                                         width, height, channels, gpu_format, NULL, NULL);
2347                         }
2348                 }
2349
2350                 if (create_fb) {
2351                         if (!is_depth) {
2352                                 ++color_attachment;
2353                         }
2354                         drw_texture_set_parameters(*fbotex.tex, fbotex.flag);
2355                         GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0);
2356                 }
2357         }
2358
2359         if (create_fb && (textures_len > 0)) {
2360                 if (!GPU_framebuffer_check_valid(*fb, NULL)) {
2361                         printf("Error invalid framebuffer\n");
2362                 }
2363
2364                 /* Detach temp textures */
2365                 for (int i = 0; i < textures_len; ++i) {
2366                         DRWFboTexture fbotex = textures[i];
2367
2368                         if ((fbotex.flag & DRW_TEX_TEMP) != 0) {
2369                                 GPU_framebuffer_texture_detach(*fbotex.tex);
2370                         }
2371                 }
2372
2373                 GPU_framebuffer_bind(DST.default_framebuffer);
2374         }
2375 }
2376
2377 void DRW_framebuffer_free(struct GPUFrameBuffer *fb)
2378 {
2379         GPU_framebuffer_free(fb);
2380 }
2381
2382 void DRW_framebuffer_bind(struct GPUFrameBuffer *fb)
2383 {
2384         GPU_framebuffer_bind(fb);
2385 }
2386
2387 void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth)
2388 {
2389         if (color) {
2390                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
2391                 glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
2392         }
2393         if (depth) {
2394                 glDepthMask(GL_TRUE);
2395                 glClearDepth(clear_depth);
2396         }
2397         if (stencil) {
2398                 glStencilMask(0xFF);
2399         }
2400         glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) |
2401                 ((depth) ? GL_DEPTH_BUFFER_BIT : 0) |
2402                 ((stencil) ? GL_STENCIL_BUFFER_BIT : 0));
2403 }
2404
2405 void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data)
2406 {
2407         GLenum type;
2408         switch (channels) {
2409                 case 1: type = GL_RED; break;
2410                 case 2: type = GL_RG; break;
2411                 case 3: type = GL_RGB; break;
2412                 case 4: type = GL_RGBA; break;
2413                 default:
2414                         BLI_assert(false && "wrong number of read channels");
2415                         return;
2416         }
2417         glReadBuffer(GL_COLOR_ATTACHMENT0 + slot);
2418         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2419 }
2420
2421 void DRW_framebuffer_texture_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip)
2422 {
2423         GPU_framebuffer_texture_attach(fb, tex, slot, mip);
2424 }
2425
2426 void DRW_framebuffer_texture_layer_attach(struct GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip)
2427 {
2428         GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip);
2429 }
2430
2431 void DRW_framebuffer_cubeface_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip)
2432 {
2433         GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip);
2434 }
2435
2436 void DRW_framebuffer_texture_detach(GPUTexture *tex)
2437 {
2438         GPU_framebuffer_texture_detach(tex);
2439 }
2440
2441 void DRW_framebuffer_blit(struct GPUFrameBuffer *fb_read, struct GPUFrameBuffer *fb_write, bool depth, bool stencil)
2442 {
2443         GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil);
2444 }
2445
2446 void DRW_framebuffer_recursive_downsample(
2447         struct GPUFrameBuffer *fb, struct GPUTexture *tex, int num_iter,
2448         void (*callback)(void *userData, int level), void *userData)
2449 {
2450         GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData);
2451 }
2452
2453 void DRW_framebuffer_viewport_size(struct GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h)
2454 {
2455         glViewport(x, y, w, h);
2456 }
2457
2458 /* Use color management profile to draw texture to framebuffer */
2459 void DRW_transform_to_display(GPUTexture *tex)
2460 {
2461         drw_state_set(DRW_STATE_WRITE_COLOR);
2462
2463         Gwn_VertFormat *vert_format = immVertexFormat();
2464         unsigned int pos = GWN_vertformat_attr_add(vert_format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2465         unsigned int texco = GWN_vertformat_attr_add(vert_format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2466
2467         const float dither = 1.0f;
2468
2469         bool use_ocio = false;
2470
2471         /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2472         if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
2473                 Scene *scene = DST.draw_ctx.scene;
2474                 use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
2475                         &scene->view_settings, &scene->display_settings, NULL, dither, false);
2476         }
2477
2478         if (!use_ocio) {
2479                 /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2480                 if (DST.options.is_image_render && !DST.options.is_scene_render) {
2481                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
2482                         immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
2483                 }
2484                 else {
2485                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
2486                 }
2487                 immUniform1i("image", 0);
2488         }
2489
2490         GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
2491
2492         float mat[4][4];
2493         unit_m4(mat);
2494         immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
2495
2496         /* Full screen triangle */
2497         immBegin(GWN_PRIM_TRIS, 3);
2498         immAttrib2f(texco, 0.0f, 0.0f);
2499         immVertex2f(pos, -1.0f, -1.0f);
2500
2501         immAttrib2f(texco, 2.0f, 0.0f);
2502         immVertex2f(pos, 3.0f, -1.0f);
2503
2504         immAttrib2f(texco, 0.0f, 2.0f);
2505         immVertex2f(pos, -1.0f, 3.0f);
2506         immEnd();
2507
2508         GPU_texture_unbind(tex);
2509
2510         if (use_ocio) {
2511                 IMB_colormanagement_finish_glsl_draw();
2512         }
2513         else {
2514                 immUnbindProgram();
2515         }
2516 }
2517
2518 /** \} */
2519
2520
2521 /* -------------------------------------------------------------------- */
2522
2523 /** \name Viewport (DRW_viewport)
2524  * \{ */
2525
2526 static void *DRW_viewport_engine_data_ensure(void *engine_type)
2527 {
2528         void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
2529
2530         if (data == NULL) {
2531                 data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
2532         }
2533         return data;
2534 }
2535
2536 void DRW_engine_viewport_data_size_get(
2537         const void *engine_type_v,
2538         int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
2539 {
2540         const DrawEngineType *engine_type = engine_type_v;
2541
2542         if (r_fbl_len) {
2543                 *r_fbl_len = engine_type->vedata_size->fbl_len;
2544         }
2545         if (r_txl_len) {
2546                 *r_txl_len = engine_type->vedata_size->txl_len;
2547         }
2548         if (r_psl_len) {
2549                 *r_psl_len = engine_type->vedata_size->psl_len;
2550         }
2551         if (r_stl_len) {
2552                 *r_stl_len = engine_type->vedata_size->stl_len;
2553         }
2554 }
2555
2556 const float *DRW_viewport_size_get(void)
2557 {
2558         return &DST.size[0];
2559 }
2560
2561 const float *DRW_viewport_screenvecs_get(void)
2562 {
2563         return &DST.screenvecs[0][0];
2564 }
2565
2566 const float *DRW_viewport_pixelsize_get(void)
2567 {
2568         return &DST.pixsize;
2569 }
2570
2571 static void drw_viewport_cache_resize(void)
2572 {
2573         /* Release the memiter before clearing the mempools that references them */
2574         GPU_viewport_cache_release(DST.viewport);
2575
2576         if (DST.vmempool != NULL) {
2577                 BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_count(DST.vmempool->calls));
2578                 BLI_mempool_clear_ex(DST.vmempool->calls_generate, BLI_mempool_count(DST.vmempool->calls_generate));
2579                 BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_count(DST.vmempool->shgroups));
2580                 BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_count(DST.vmempool->uniforms));
2581                 BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_count(DST.vmempool->passes));
2582         }
2583
2584         DRW_instance_data_list_free_unused(DST.idatalist);
2585         DRW_instance_data_list_resize(DST.idatalist);
2586 }
2587
2588 /* It also stores viewport variable to an immutable place: DST
2589  * This is because a cache uniform only store reference
2590  * to its value. And we don't want to invalidate the cache
2591  * if this value change per viewport */
2592 static void drw_viewport_var_init(void)
2593 {
2594         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2595
2596         /* Refresh DST.size */
2597         if (DST.viewport) {
2598                 int size[2];
2599                 GPU_viewport_size_get(DST.viewport, size);
2600                 DST.size[0] = size[0];
2601                 DST.size[1] = size[1];
2602
2603                 DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
2604                 DST.default_framebuffer = fbl->default_fb;
2605
2606                 DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
2607
2608                 if (DST.vmempool->calls == NULL) {
2609                         DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
2610                 }
2611                 if (DST.vmempool->calls_generate == NULL) {
2612                         DST.vmempool->calls_generate = BLI_mempool_create(sizeof(DRWCallGenerate), 0, 512, 0);
2613                 }
2614                 if (DST.vmempool->shgroups == NULL) {
2615                         DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
2616                 }
2617                 if (DST.vmempool->uniforms == NULL) {
2618                         DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
2619                 }
2620                 if (DST.vmempool->passes == NULL) {
2621                         DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
2622                 }
2623
2624                 DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
2625                 DRW_instance_data_list_reset(DST.idatalist);
2626         }
2627         else {
2628                 DST.size[0] = 0;
2629                 DST.size[1] = 0;
2630
2631                 DST.default_framebuffer = NULL;
2632                 DST.vmempool = NULL;
2633         }
2634         /* Refresh DST.screenvecs */
2635         copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
2636         copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
2637         normalize_v3(DST.screenvecs[0]);
2638         normalize_v3(DST.screenvecs[1]);
2639
2640         /* Refresh DST.pixelsize */
2641         DST.pixsize = rv3d->pixsize;
2642
2643         /* Reset facing */
2644         DST.frontface = GL_CCW;
2645         DST.backface = GL_CW;
2646         glFrontFace(DST.frontface);
2647
2648         if (DST.draw_ctx.scene->obedit) {
2649                 ED_view3d_init_mats_rv3d(DST.draw_ctx.scene->obedit, rv3d);
2650         }
2651
2652         /* Alloc array of texture reference. */
2653         if (RST.bound_texs == NULL) {
2654                 RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
2655         }
2656         if (RST.bound_tex_slots == NULL) {
2657                 RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots");
2658         }
2659
2660         memset(viewport_matrix_override.override, 0x0, sizeof(viewport_matrix_override.override));
2661 }
2662
2663 void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
2664 {
2665         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2666         BLI_assert(type >= DRW_MAT_PERS && type <= DRW_MAT_WININV);
2667
2668         if (viewport_matrix_override.override[type]) {
2669                 copy_m4_m4(mat, viewport_matrix_override.mat[type]);
2670         }
2671         else {
2672                 switch (type) {
2673                         case DRW_MAT_PERS:
2674                                 copy_m4_m4(mat, rv3d->persmat);
2675                                 break;
2676                         case DRW_MAT_PERSINV:
2677                                 copy_m4_m4(mat, rv3d->persinv);
2678                                 break;
2679                         case DRW_MAT_VIEW:
2680                                 copy_m4_m4(mat, rv3d->viewmat);
2681                                 break;
2682                         case DRW_MAT_VIEWINV:
2683                                 copy_m4_m4(mat, rv3d->viewinv);
2684                                 break;
2685                         case DRW_MAT_WIN:
2686                                 copy_m4_m4(mat, rv3d->winmat);
2687                                 break;
2688                         case DRW_MAT_WININV:
2689                                 invert_m4_m4(mat, rv3d->winmat);
2690                                 break;
2691                         default:
2692                                 BLI_assert(!"Matrix type invalid");
2693                                 break;
2694                 }
2695         }
2696 }
2697
2698 void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type)
2699 {
2700         copy_m4_m4(viewport_matrix_override.mat[type], mat);
2701         viewport_matrix_override.override[type] = true;
2702 }
2703
2704 void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
2705 {
2706         viewport_matrix_override.override[type] = false;
2707 }
2708
2709 bool DRW_viewport_is_persp_get(void)
2710 {
2711         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2712         return rv3d->is_persp;
2713 }
2714
2715 DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
2716 {
2717         return GPU_viewport_framebuffer_list_get(DST.viewport);
2718 }
2719
2720 DefaultTextureList *DRW_viewport_texture_list_get(void)
2721 {
2722         return GPU_viewport_texture_list_get(DST.viewport);
2723 }
2724
2725 void DRW_viewport_request_redraw(void)
2726 {
2727         GPU_viewport_tag_update(DST.viewport);
2728 }
2729
2730 /** \} */
2731
2732
2733 /* -------------------------------------------------------------------- */
2734 /** \name ViewLayers (DRW_scenelayer)
2735  * \{ */
2736
2737 void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
2738 {
2739         for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2740                 if (sled->engine_type == engine_type) {
2741                         return sled->storage;
2742                 }
2743         }
2744         return NULL;
2745 }
2746
2747 void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
2748 {
2749         ViewLayerEngineData *sled;
2750
2751         for (sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2752                 if (sled->engine_type == engine_type) {
2753                         return &sled->storage;
2754                 }
2755         }
2756
2757         sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
2758         sled->engine_type = engine_type;
2759         sled->free = callback;
2760         BLI_addtail(&DST.draw_ctx.view_layer->drawdata, sled);
2761
2762         return &sled->storage;
2763 }
2764
2765 /** \} */
2766
2767
2768 /* -------------------------------------------------------------------- */
2769
2770 /** \name Objects (DRW_object)
2771  * \{ */
2772
2773 void *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type)
2774 {
2775         for (ObjectEngineData *oed = ob->drawdata.first; oed; oed = oed->next) {
2776                 if (oed->engine_type == engine_type) {
2777                         return oed->storage;
2778                 }
2779         }
2780         return NULL;
2781 }
2782
2783 void **DRW_object_engine_data_ensure(
2784         Object *ob, DrawEngineType *engine_type, void (*callback)(void *storage))
2785 {
2786         ObjectEngineData *oed;
2787
2788         for (oed = ob->drawdata.first; oed; oed = oed->next) {
2789                 if (oed->engine_type == engine_type) {
2790                         return &oed->storage;
2791                 }
2792         }
2793
2794         oed = MEM_callocN(sizeof(ObjectEngineData), "ObjectEngineData");
2795         oed->engine_type = engine_type;
2796         oed->free = callback;
2797         BLI_addtail(&ob->drawdata, oed);
2798
2799         return &oed->storage;
2800 }
2801
2802 /* XXX There is definitly some overlap between this and DRW_object_engine_data_ensure.
2803  * We should get rid of one of the two. */
2804 LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, RenderEngineType *engine_type)
2805 {
2806         BLI_assert(ob->type == OB_LAMP);
2807
2808         Scene *scene = DST.draw_ctx.scene;
2809
2810         /* TODO Dupliobjects */
2811         /* TODO Should be per scenelayer */
2812         return GPU_lamp_engine_data_get(scene, ob, NULL, engine_type);
2813 }
2814
2815 void DRW_lamp_engine_data_free(LampEngineData *led)
2816 {
2817         GPU_lamp_engine_data_free(led);
2818 }
2819
2820 /** \} */
2821
2822
2823 /* -------------------------------------------------------------------- */
2824
2825 /** \name Rendering (DRW_engines)
2826  * \{ */
2827
2828 static void drw_engines_init(void)
2829 {
2830         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2831                 DrawEngineType *engine = link->data;
2832                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2833                 PROFILE_START(stime);
2834
2835                 if (engine->engine_init) {
2836                         engine->engine_init(data);
2837                 }
2838
2839                 PROFILE_END_UPDATE(data->init_time, stime);
2840         }
2841 }
2842
2843 static void drw_engines_cache_init(void)
2844 {
2845         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2846                 DrawEngineType *engine = link->data;
2847                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2848
2849                 if (data->text_draw_cache) {
2850                         DRW_text_cache_destroy(data->text_draw_cache);
2851                         data->text_draw_cache = NULL;
2852                 }
2853                 if (DST.text_store_p == NULL) {
2854                         DST.text_store_p = &data->text_draw_cache;
2855                 }
2856
2857                 if (engine->cache_init) {
2858                         engine->cache_init(data);
2859                 }
2860         }
2861 }
2862
2863 static void drw_engines_cache_populate(Object *ob)
2864 {
2865         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2866                 DrawEngineType *engine = link->data;
2867                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2868
2869                 if (engine->cache_populate) {
2870                         engine->cache_populate(data, ob);
2871                 }
2872         }
2873 }
2874
2875 static void drw_engines_cache_finish(void)
2876 {
2877         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2878                 DrawEngineType *engine = link->data;
2879                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2880
2881                 if (engine->cache_finish) {
2882                         engine->cache_finish(data);
2883                 }
2884         }
2885 }
2886
2887 static void drw_engines_draw_background(void)
2888 {
2889         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2890                 DrawEngineType *engine = link->data;
2891                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2892
2893                 if (engine->draw_background) {
2894                         PROFILE_START(stime);
2895
2896                         DRW_stats_group_start(engine->idname);
2897                         engine->draw_background(data);
2898                         DRW_stats_group_end();
2899
2900                         PROFILE_END_UPDATE(data->background_time, stime);
2901                         return;
2902                 }
2903         }
2904
2905         /* No draw_background found, doing default background */
2906         if (DRW_state_draw_background()) {
2907                 DRW_draw_background();
2908         }
2909 }
2910
2911 static void drw_engines_draw_scene(void)
2912 {
2913         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2914                 DrawEngineType *engine = link->data;
2915                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2916                 PROFILE_START(stime);
2917
2918                 if (engine->draw_scene) {
2919                         DRW_stats_group_start(engine->idname);
2920                         engine->draw_scene(data);
2921                         DRW_stats_group_end();
2922                 }
2923
2924                 PROFILE_END_UPDATE(data->render_time, stime);
2925         }
2926 }
2927
2928 static void drw_engines_draw_text(void)
2929 {
2930         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2931                 DrawEngineType *engine = link->data;
2932                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2933                 PROFILE_START(stime);
2934
2935                 if (data->text_draw_cache) {
2936                         DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.v3d, DST.draw_ctx.ar, false);
2937                 }
2938
2939                 PROFILE_END_UPDATE(data->render_time, stime);
2940         }
2941 }
2942
2943 #define MAX_INFO_LINES 10
2944
2945 /**
2946  * Returns the offset required for the drawing of engines info.
2947  */
2948 int DRW_draw_region_engine_info_offset(void)
2949 {
2950         int lines = 0;
2951         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2952                 DrawEngineType *engine = link->data;
2953                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2954
2955                 /* Count the number of lines. */
2956                 if (data->info[0] != '\0') {
2957                         lines++;
2958                         char *c = data->info;
2959                         while (*c++ != '\0') {
2960                                 if (*c == '\n') {
2961                                         lines++;
2962                                 }
2963                         }
2964                 }
2965         }
2966         return MIN2(MAX_INFO_LINES, lines) * UI_UNIT_Y;
2967 }
2968
2969 /**
2970  * Actual drawing;
2971  */
2972 void DRW_draw_region_engine_info(void)
2973 {
2974         const char *info_array_final[MAX_INFO_LINES + 1];
2975         /* This should be maxium number of engines running at the same time. */
2976         char info_array[MAX_INFO_LINES][GPU_INFO_SIZE];
2977         int i = 0;
2978
2979         const DRWContextState *draw_ctx = DRW_context_state_get();
2980         ARegion *ar = draw_ctx->ar;
2981         float fill_color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
2982
2983         UI_GetThemeColor3fv(TH_HIGH_GRAD, fill_color);
2984         mul_v3_fl(fill_color, fill_color[3]);
2985
2986         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2987                 DrawEngineType *engine = link->data;
2988                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2989
2990                 if (data->info[0] != '\0') {
2991                         char *chr_current = data->info;
2992                         char *chr_start = chr_current;
2993                         int line_len = 0;
2994
2995                         while (*chr_current++ != '\0') {
2996                                 line_len++;
2997                                 if (*chr_current == '\n') {
2998                                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
2999                                         /* Re-start counting. */
3000                                         chr_start = chr_current + 1;
3001                                         line_len = -1;
3002                                 }
3003                         }
3004
3005                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
3006
3007                         if (i >= MAX_INFO_LINES) {
3008                                 break;
3009                         }
3010                 }
3011         }
3012
3013         for (int j = 0; j < i; j++) {
3014                 info_array_final[j] = info_array[j];
3015         }
3016         info_array_final[i] = NULL;
3017
3018         if (info_array[0] != NULL) {
3019                 ED_region_info_draw_multiline(ar, info_array_final, fill_color, true);
3020         }
3021 }
3022
3023 #undef MAX_INFO_LINES
3024
3025 static void use_drw_engine(DrawEngineType *engine)
3026 {
3027         LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
3028         ld->data = engine;
3029         BLI_addtail(&DST.enabled_engines, ld);
3030 }
3031
3032 /* TODO revisit this when proper layering is implemented */
3033 /* Gather all draw engines needed and store them in DST.enabled_engines
3034  * That also define the rendering order of engines */
3035 static void drw_engines_enable_from_engine(RenderEngineType *engine_type)
3036 {
3037         /* TODO layers */
3038         if (engine_type->draw_engine != NULL) {
3039                 use_drw_engine(engine_type->draw_engine);
3040         }
3041
3042         if ((engine_type->flag & RE_INTERNAL) == 0) {
3043                 drw_engines_enable_external();
3044         }
3045 }
3046
3047 static void drw_engines_enable_from_object_mode(void)
3048 {
3049         use_drw_engine(&draw_engine_object_type);
3050 }
3051
3052 static void drw_engines_enable_from_mode(int mode)
3053 {
3054         switch (mode) {
3055                 case CTX_MODE_EDIT_MESH:
3056                         use_drw_engine(&draw_engine_edit_mesh_type);
3057                         break;
3058                 case CTX_MODE_EDIT_CURVE:
3059                         use_drw_engine(&draw_engine_edit_curve_type);
3060                         break;
3061                 case CTX_MODE_EDIT_SURFACE:
3062                         use_drw_engine(&draw_engine_edit_surface_type);
3063                         break;
3064                 case CTX_MODE_EDIT_TEXT:
3065                         use_drw_engine(&draw_engine_edit_text_type);
3066                         break;
3067                 case CTX_MODE_EDIT_ARMATURE:
3068                         use_drw_engine(&draw_engine_edit_armature_type);
3069                         break;
3070                 case CTX_MODE_EDIT_METABALL:
3071                         use_drw_engine(&draw_engine_edit_metaball_type);
3072                         break;
3073                 case CTX_MODE_EDIT_LATTICE:
3074                         use_drw_engine(&draw_engine_edit_lattice_type);
3075                         break;
3076                 case CTX_MODE_POSE:
3077                         use_drw_engine(&draw_engine_pose_type);
3078                         break;
3079                 case CTX_MODE_SCULPT:
3080                         use_drw_engine(&draw_engine_sculpt_type);
3081                         break;
3082                 case CTX_MODE_PAINT_WEIGHT:
3083                         use_drw_engine(&draw_engine_pose_type);
3084                         use_drw_engine(&draw_engine_paint_weight_type);
3085                         break;
3086                 case CTX_MODE_PAINT_VERTEX:
3087                         use_drw_engine(&draw_engine_paint_vertex_type);
3088                         break;
3089                 case CTX_MODE_PAINT_TEXTURE:
3090                         use_drw_engine(&draw_engine_paint_texture_type);
3091                         break;
3092                 case CTX_MODE_PARTICLE:
3093                         use_drw_engine(&draw_engine_particle_type);
3094                         break;
3095                 case CTX_MODE_OBJECT:
3096                         break;
3097                 default:
3098                         BLI_assert(!"Draw mode invalid");
3099                         break;
3100         }
3101 }
3102
3103 /**
3104  * Use for select and depth-drawing.
3105  */
3106 static void drw_engines_enable_basic(void)
3107 {
3108         use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
3109 }
3110
3111 /**
3112  * Use for external render engines.
3113  */
3114 static void drw_engines_enable_external(void)
3115 {
3116         use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
3117 }
3118
3119 static void drw_engines_enable(const Scene *scene, ViewLayer *view_layer, RenderEngineType *engine_type)
3120 {
3121         Object *obact = OBACT(view_layer);
3122         const int mode = CTX_data_mode_enum_ex(scene->obedit, obact);
3123
3124         drw_engines_enable_from_engine(engine_type);
3125
3126         if (DRW_state_draw_support()) {
3127                 drw_engines_enable_from_object_mode();
3128                 drw_engines_enable_from_mode(mode);
3129         }
3130 }
3131
3132 static void drw_engines_disable(void)
3133 {
3134         BLI_freelistN(&DST.enabled_engines);
3135 }
3136
3137 static unsigned int DRW_engines_get_hash(void)
3138 {
3139         unsigned int hash = 0;
3140         /* The cache depends on enabled engines */
3141         /* FIXME : if collision occurs ... segfault */
3142         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3143                 DrawEngineType *engine = link->data;
3144                 hash += BLI_ghashutil_strhash_p(engine->idname);
3145         }
3146
3147         return hash;
3148 }
3149
3150 static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
3151 {
3152         BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
3153                                rect->ymax - (3 + v) * U.widget_unit, 0.0f,
3154                                txt, size);
3155 }
3156
3157 /* CPU stats */
3158 static void drw_debug_cpu_stats(void)
3159 {
3160         int u, v;
3161         double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
3162         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3163         rcti rect;
3164         struct ARegion *ar = DST.draw_ctx.ar;
3165         ED_region_visible_rect(ar, &rect);
3166
3167         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3168
3169         /* row by row */
3170         v = 0; u = 0;
3171         /* Label row */
3172         char col_label[32];
3173         sprintf(col_label, "Engine");
3174         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3175         sprintf(col_label, "Init");
3176         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3177         sprintf(col_label, "Background");
3178         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3179         sprintf(col_label, "Render");
3180         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3181         sprintf(col_label, "Total (w/o cache)");
3182         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3183         v++;
3184
3185         /* Engines rows */
3186         char time_to_txt[16];
3187         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3188                 u = 0;
3189                 DrawEngineType *engine = link->data;
3190                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3191
3192                 draw_stat(&rect, u++, v, engine->idname, sizeof(engine->idname));
3193
3194                 init_tot_time += data->init_time;
3195                 sprintf(time_to_txt, "%.2fms", data->init_time);
3196                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3197
3198                 background_tot_time += data->background_time;
3199                 sprintf(time_to_txt, "%.2fms", data->background_time);
3200                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3201
3202                 render_tot_time += data->render_time;
3203                 sprintf(time_to_txt, "%.2fms", data->render_time);
3204                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3205
3206                 tot_time += data->init_time + data->background_time + data->render_time;
3207                 sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
3208                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3209                 v++;
3210         }
3211
3212         /* Totals row */
3213         u = 0;
3214         sprintf(col_label, "Sub Total");
3215         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3216         sprintf(time_to_txt, "%.2fms", init_tot_time);
3217         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3218         sprintf(time_to_txt, "%.2fms", background_tot_time);
3219         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3220         sprintf(time_to_txt, "%.2fms", render_tot_time);
3221         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3222         sprintf(time_to_txt, "%.2fms", tot_time);
3223         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3224         v += 2;
3225
3226         u = 0;
3227         sprintf(col_label, "Cache Time");
3228         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3229         sprintf(time_to_txt, "%.2fms", DST.cache_time);
3230         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3231 }
3232
3233 /* Display GPU time for each passes */
3234 static void drw_debug_gpu_stats(void)
3235 {
3236         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3237         rcti rect;
3238         struct ARegion *ar = DST.draw_ctx.ar;
3239         ED_region_visible_rect(ar, &rect);
3240
3241         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3242
3243         int v = BLI_listbase_count(&DST.enabled_engines) + 5;
3244
3245         char stat_string[32];
3246
3247         /* Memory Stats */
3248         unsigned int tex_mem = GPU_texture_memory_usage_get();
3249         unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
3250
3251         sprintf(stat_string, "GPU Memory");
3252         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3253         sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
3254         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3255         sprintf(stat_string, "   |--> Textures");
3256         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3257         sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
3258         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3259         sprintf(stat_string, "   |--> Meshes");
3260         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3261         sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
3262         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3263
3264         /* Pre offset for stats_draw */
3265         rect.ymax -= (3 + ++v) * U.widget_unit;
3266
3267         /* Rendering Stats */
3268         DRW_stats_draw(&rect);
3269 }
3270
3271 /* -------------------------------------------------------------------- */
3272
3273 /** \name View Update
3274  * \{ */
3275
3276 void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
3277 {
3278         RenderEngineType *engine_type = update_ctx->engine_type;
3279         ARegion *ar = update_ctx->ar;
3280         View3D *v3d = update_ctx->v3d;
3281         RegionView3D *rv3d = ar->regiondata;
3282         Depsgraph *depsgraph = update_ctx->depsgraph;
3283         Scene *scene = update_ctx->scene;
3284         ViewLayer *view_layer = update_ctx->view_layer;
3285
3286         if (rv3d->viewport == NULL) {
3287                 return;
3288         }
3289
3290
3291         /* Reset before using it. */
3292         memset(&DST, 0x0, sizeof(DST));
3293
3294         DST.viewport = rv3d->viewport;
3295         DST.draw_ctx = (DRWContextState){
3296                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, depsgraph,
3297                 NULL,
3298         };
3299
3300         drw_engines_enable(scene, view_layer, engine_type);
3301
3302         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3303                 DrawEngineType *draw_engine = link->data;
3304                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3305
3306                 if (draw_engine->view_update) {
3307                         draw_engine->view_update(data);
3308                 }
3309         }
3310
3311         DST.viewport = NULL;
3312
3313         drw_engines_disable();
3314 }
3315
3316 /** \} */
3317
3318 /** \name ID Update
3319  * \{ */
3320
3321 /* TODO(sergey): This code is run for each changed ID (including the ones which
3322  * are changed indirectly via update flush. Need to find a way to make this to
3323  * run really fast, hopefully without any memory allocations on a heap
3324  * Idea here could be to run every known engine's id_update() and make them
3325  * do nothing if there is no engine-specific data yet.
3326  */
3327 void DRW_notify_id_update(const DRWUpdateContext *update_ctx, ID *id)
3328 {
3329         RenderEngineType *engine_type = update_ctx->engine_type;
3330         ARegion *ar = update_ctx->ar;
3331         View3D *v3d = update_ctx->v3d;
3332         RegionView3D *rv3d = ar->regiondata;
3333         Depsgraph *depsgraph = update_ctx->depsgraph;
3334         Scene *scene = update_ctx->scene;
3335         ViewLayer *view_layer = update_ctx->view_layer;
3336         if (rv3d->viewport == NULL) {
3337                 return;
3338         }
3339         /* Reset before using it. */
3340         memset(&DST, 0x0, sizeof(DST));
3341         DST.viewport = rv3d->viewport;
3342         DST.draw_ctx = (DRWContextState){
3343                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, depsgraph, NULL,
3344         };
3345         drw_engines_enable(scene, view_layer, engine_type);
3346         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3347                 DrawEngineType *draw_engine = link->data;
3348                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3349                 if (draw_engine->id_update) {
3350                         draw_engine->id_update(data, id);
3351                 }
3352         }
3353         DST.viewport = NULL;
3354         drw_engines_disable();
3355 }
3356
3357 /** \} */
3358
3359 /* -------------------------------------------------------------------- */
3360
3361 /** \name Main Draw Loops (DRW_draw)
3362  * \{ */
3363
3364 /* Everything starts here.
3365  * This function takes care of calling all cache and rendering functions
3366  * for each relevant engine / mode engine. */
3367 void DRW_draw_view(const bContext *C)
3368 {
3369         struct Depsgraph *depsgraph = CTX_data_depsgraph(C);
3370         RenderEngineType *engine_type = CTX_data_engine_type(C);
3371         ARegion *ar = CTX_wm_region(C);
3372         View3D *v3d = CTX_wm_view3d(C);
3373
3374         /* Reset before using it. */
3375         memset(&DST, 0x0, sizeof(DST));
3376         DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, C);
3377 }
3378
3379 /**
3380  * Used for both regular and off-screen drawing.
3381  * Need to reset DST before calling this function
3382  */
3383 void DRW_draw_render_loop_ex(
3384         struct Depsgraph *depsgraph,
3385         RenderEngineType *engine_type,
3386         ARegion *ar, View3D *v3d,
3387         const bContext *evil_C)
3388 {