Eevee: Stop depsgraph update callback form allocating unneeded memory
[blender.git] / source / blender / draw / intern / draw_manager.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager.c
23  *  \ingroup draw
24  */
25
26 #include <stdio.h>
27
28 #include "BLI_dynstr.h"
29 #include "BLI_listbase.h"
30 #include "BLI_mempool.h"
31 #include "BLI_rect.h"
32 #include "BLI_string.h"
33
34 #include "BIF_glutil.h"
35
36 #include "BKE_global.h"
37 #include "BKE_mesh.h"
38 #include "BKE_object.h"
39 #include "BKE_pbvh.h"
40 #include "BKE_paint.h"
41 #include "BKE_workspace.h"
42
43 #include "BLT_translation.h"
44 #include "BLF_api.h"
45
46 #include "DRW_engine.h"
47 #include "DRW_render.h"
48
49 #include "DNA_camera_types.h"
50 #include "DNA_view3d_types.h"
51 #include "DNA_screen_types.h"
52 #include "DNA_mesh_types.h"
53 #include "DNA_meshdata_types.h"
54
55 #include "ED_space_api.h"
56 #include "ED_screen.h"
57
58 #include "intern/gpu_codegen.h"
59 #include "GPU_batch.h"
60 #include "GPU_draw.h"
61 #include "GPU_extensions.h"
62 #include "GPU_framebuffer.h"
63 #include "GPU_immediate.h"
64 #include "GPU_lamp.h"
65 #include "GPU_material.h"
66 #include "GPU_shader.h"
67 #include "GPU_texture.h"
68 #include "GPU_uniformbuffer.h"
69 #include "GPU_viewport.h"
70 #include "GPU_matrix.h"
71
72 #include "IMB_colormanagement.h"
73
74 #include "RE_engine.h"
75
76 #include "UI_interface.h"
77 #include "UI_resources.h"
78
79 #include "WM_api.h"
80 #include "WM_types.h"
81
82 #include "draw_manager_text.h"
83 #include "draw_manager_profiling.h"
84
85 /* only for callbacks */
86 #include "draw_cache_impl.h"
87
88 #include "draw_mode_engines.h"
89 #include "engines/clay/clay_engine.h"
90 #include "engines/eevee/eevee_engine.h"
91 #include "engines/basic/basic_engine.h"
92 #include "engines/external/external_engine.h"
93
94 #include "DEG_depsgraph.h"
95 #include "DEG_depsgraph_query.h"
96
97 /* -------------------------------------------------------------------- */
98 /** \name Local Features
99  * \{ */
100
101 #define USE_PROFILE
102
103 #ifdef USE_PROFILE
104 #  include "PIL_time.h"
105
106 #  define PROFILE_TIMER_FALLOFF 0.1
107
108 #  define PROFILE_START(time_start) \
109         double time_start = PIL_check_seconds_timer();
110
111 #  define PROFILE_END_ACCUM(time_accum, time_start) { \
112         time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
113 } ((void)0)
114
115 /* exp average */
116 #  define PROFILE_END_UPDATE(time_update, time_start) { \
117         double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
118         time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
119                       (_time_delta * PROFILE_TIMER_FALLOFF); \
120 } ((void)0)
121
122 #else  /* USE_PROFILE */
123
124 #  define PROFILE_START(time_start) ((void)0)
125 #  define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
126 #  define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
127
128 #endif  /* USE_PROFILE */
129
130
131 /* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
132 #define USE_GPU_SELECT
133
134 #ifdef USE_GPU_SELECT
135 #  include "ED_view3d.h"
136 #  include "ED_armature.h"
137 #  include "GPU_select.h"
138 #endif
139
140 /** \} */
141
142
143 #define MAX_ATTRIB_NAME 32
144 #define MAX_ATTRIB_COUNT 6 /* Can be adjusted for more */
145 #define MAX_PASS_NAME 32
146 #define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
147
148 extern char datatoc_gpu_shader_2D_vert_glsl[];
149 extern char datatoc_gpu_shader_3D_vert_glsl[];
150 extern char datatoc_gpu_shader_fullscreen_vert_glsl[];
151
152 /* Prototypes. */
153 static void DRW_engines_enable_external(void);
154
155 /* Structures */
156 typedef enum {
157         DRW_UNIFORM_BOOL,
158         DRW_UNIFORM_SHORT_TO_INT,
159         DRW_UNIFORM_SHORT_TO_FLOAT,
160         DRW_UNIFORM_INT,
161         DRW_UNIFORM_FLOAT,
162         DRW_UNIFORM_TEXTURE,
163         DRW_UNIFORM_BUFFER,
164         DRW_UNIFORM_MAT3,
165         DRW_UNIFORM_MAT4,
166         DRW_UNIFORM_BLOCK
167 } DRWUniformType;
168
169 typedef enum {
170         DRW_ATTRIB_INT,
171         DRW_ATTRIB_FLOAT,
172 } DRWAttribType;
173
174 struct DRWUniform {
175         struct DRWUniform *next;
176         DRWUniformType type;
177         int location;
178         int length;
179         int arraysize;
180         const void *value;
181 };
182
183 typedef struct DRWAttrib {
184         struct DRWAttrib *prev;
185         char name[MAX_ATTRIB_NAME];
186         int location;
187         int format_id;
188         int size; /* number of component */
189         int type;
190 } DRWAttrib;
191
192 struct DRWInterface {
193         DRWUniform *uniforms;   /* DRWUniform, single-linked list */
194         DRWAttrib *attribs;     /* DRWAttrib, single-linked list */
195         DRWAttrib *attribs_first; /* First added attrib to traverse in the right order */
196         int attribs_count;
197         int attribs_stride;
198         int attribs_size[16];
199         int attribs_loc[16];
200         /* matrices locations */
201         int model;
202         int modelinverse;
203         int modelview;
204         int modelviewinverse;
205         int projection;
206         int projectioninverse;
207         int view;
208         int viewinverse;
209         int modelviewprojection;
210         int viewprojection;
211         int viewprojectioninverse;
212         int normal;
213         int worldnormal;
214         int camtexfac;
215         int orcotexfac;
216         int eye;
217         int clipplanes;
218         /* Dynamic batch */
219         Gwn_Batch *instance_batch; /* contains instances attributes */
220         GLuint instance_vbo; /* same as instance_batch but generated from DRWCalls */
221         int instance_count;
222         Gwn_VertFormat vbo_format;
223 };
224
225 struct DRWPass {
226         /* Single linked list with last member to append */
227         DRWShadingGroup *shgroups;
228         DRWShadingGroup *shgroups_last;
229
230         DRWState state;
231         char name[MAX_PASS_NAME];
232 };
233
234 typedef struct DRWCallHeader {
235         void *prev;
236
237 #ifdef USE_GPU_SELECT
238         int select_id;
239 #endif
240         uchar type;
241 } DRWCallHeader;
242
243 typedef struct DRWCall {
244         DRWCallHeader head;
245
246         float obmat[4][4];
247         Gwn_Batch *geometry;
248
249         Object *ob; /* Optional */
250         ID *ob_data; /* Optional. */
251 } DRWCall;
252
253 typedef struct DRWCallGenerate {
254         DRWCallHeader head;
255
256         float obmat[4][4];
257
258         DRWCallGenerateFn *geometry_fn;
259         void *user_data;
260 } DRWCallGenerate;
261
262 typedef struct DRWCallDynamic {
263         DRWCallHeader head;
264
265         const void *data[MAX_ATTRIB_COUNT];
266 } DRWCallDynamic;
267
268 struct DRWShadingGroup {
269         struct DRWShadingGroup *next;
270
271         GPUShader *shader;               /* Shader to bind */
272         DRWInterface interface;          /* Uniforms pointers */
273
274         /* DRWCall or DRWCallDynamic depending of type */
275         void *calls;
276         void *calls_first; /* To be able to traverse the list in the order of addition */
277
278         DRWState state_extra;            /* State changes for this batch only (or'd with the pass's state) */
279         DRWState state_extra_disable;    /* State changes for this batch only (and'd with the pass's state) */
280         unsigned int stencil_mask;       /* Stencil mask to use for stencil test / write operations */
281         int type;
282
283         ID *instance_data;         /* Object->data to instance */
284         Gwn_Batch *instance_geom;  /* Geometry to instance */
285         Gwn_Batch *batch_geom;     /* Result of call batching */
286
287 #ifdef USE_GPU_SELECT
288         /* backlink to pass we're in */
289         DRWPass *pass_parent;
290 #endif
291 };
292
293 /* Used by DRWShadingGroup.type */
294 enum {
295         DRW_SHG_NORMAL,
296         DRW_SHG_POINT_BATCH,
297         DRW_SHG_LINE_BATCH,
298         DRW_SHG_TRIANGLE_BATCH,
299         DRW_SHG_INSTANCE,
300 };
301
302 /* Used by DRWCall.type */
303 enum {
304         /* A single batch */
305         DRW_CALL_SINGLE,
306         /* Uses a callback to draw with any number of batches. */
307         DRW_CALL_GENERATE,
308         /* Arbitrary number of multiple args. */
309         DRW_CALL_DYNAMIC,
310 };
311
312 /* only 16 bits long */
313 enum {
314         STENCIL_SELECT          = (1 << 0),
315         STENCIL_ACTIVE          = (1 << 1),
316 };
317
318 /** Render State: No persistent data between draw calls. */
319 static struct DRWGlobalState {
320         /* Cache generation */
321         ViewportMemoryPool *vmempool;
322         DRWUniform *last_uniform;
323         DRWAttrib *last_attrib;
324         DRWCall *last_call;
325         DRWCallGenerate *last_callgenerate;
326         DRWCallDynamic *last_calldynamic;
327         DRWShadingGroup *last_shgroup;
328
329         /* Rendering state */
330         GPUShader *shader;
331
332         /* Managed by `DRW_state_set`, `DRW_state_reset` */
333         DRWState state;
334         unsigned int stencil_mask;
335
336         /* Per viewport */
337         GPUViewport *viewport;
338         struct GPUFrameBuffer *default_framebuffer;
339         float size[2];
340         float screenvecs[2][3];
341         float pixsize;
342
343         GLenum backface, frontface;
344
345         /* Clip planes */
346         int num_clip_planes;
347         float clip_planes_eq[MAX_CLIP_PLANES][4];
348
349         struct {
350                 unsigned int is_select : 1;
351                 unsigned int is_depth : 1;
352                 unsigned int is_image_render : 1;
353                 unsigned int is_scene_render : 1;
354         } options;
355
356         /* Current rendering context */
357         DRWContextState draw_ctx;
358
359         /* Convenience pointer to text_store owned by the viewport */
360         struct DRWTextStore **text_store_p;
361
362         ListBase enabled_engines; /* RenderEngineType */
363
364         /* Profiling */
365         double cache_time;
366 } DST = {NULL};
367
368 /** GPU Resource State: Memory storage between drawing. */
369 static struct DRWResourceState {
370         GPUTexture **bound_texs;
371
372         bool *bound_tex_slots;
373
374         int bind_tex_inc;
375         int bind_ubo_inc;
376 } RST = {NULL};
377
378 static struct DRWMatrixOveride {
379         float mat[6][4][4];
380         bool override[6];
381 } viewport_matrix_override = {{{{0}}}};
382
383 ListBase DRW_engines = {NULL, NULL};
384
385 #ifdef USE_GPU_SELECT
386 static unsigned int g_DRW_select_id = (unsigned int)-1;
387
388 void DRW_select_load_id(unsigned int id)
389 {
390         BLI_assert(G.f & G_PICKSEL);
391         g_DRW_select_id = id;
392 }
393 #endif
394
395
396 /* -------------------------------------------------------------------- */
397
398 /** \name Textures (DRW_texture)
399  * \{ */
400
401 static void drw_texture_get_format(
402         DRWTextureFormat format,
403         GPUTextureFormat *r_data_type, int *r_channels)
404 {
405         switch (format) {
406                 case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
407                 case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
408                 case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
409                 case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
410                 case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
411                 case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
412                 case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
413                 case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
414                 case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
415                 case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
416 #if 0
417                 case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
418                 case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
419                 case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
420 #endif
421                 case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
422                 case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
423                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
424                 case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
425                 default :
426                         /* file type not supported you must uncomment it from above */
427                         BLI_assert(false);
428                         break;
429         }
430
431         switch (format) {
432                 case DRW_TEX_RGBA_8:
433                 case DRW_TEX_RGBA_16:
434                 case DRW_TEX_RGBA_32:
435                         *r_channels = 4;
436                         break;
437                 case DRW_TEX_RGB_8:
438                 case DRW_TEX_RGB_16:
439                 case DRW_TEX_RGB_32:
440                 case DRW_TEX_RGB_11_11_10:
441                         *r_channels = 3;
442                         break;
443                 case DRW_TEX_RG_8:
444                 case DRW_TEX_RG_16:
445                 case DRW_TEX_RG_32:
446                         *r_channels = 2;
447                         break;
448                 default:
449                         *r_channels = 1;
450                         break;
451         }
452 }
453
454 static void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
455 {
456         GPU_texture_bind(tex, 0);
457         if (flags & DRW_TEX_MIPMAP) {
458                 GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
459                 DRW_texture_generate_mipmaps(tex);
460         }
461         else {
462                 GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
463         }
464         GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
465         GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
466         GPU_texture_unbind(tex);
467 }
468
469 GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
470 {
471         GPUTexture *tex;
472         GPUTextureFormat data_type;
473         int channels;
474
475         drw_texture_get_format(format, &data_type, &channels);
476         tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
477         drw_texture_set_parameters(tex, flags);
478
479         return tex;
480 }
481
482 GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
483 {
484         GPUTexture *tex;
485         GPUTextureFormat data_type;
486         int channels;
487
488         drw_texture_get_format(format, &data_type, &channels);
489         tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
490         drw_texture_set_parameters(tex, flags);
491
492         return tex;
493 }
494
495 GPUTexture *DRW_texture_create_2D_array(
496         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
497 {
498         GPUTexture *tex;
499         GPUTextureFormat data_type;
500         int channels;
501
502         drw_texture_get_format(format, &data_type, &channels);
503         tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
504         drw_texture_set_parameters(tex, flags);
505
506         return tex;
507 }
508
509 GPUTexture *DRW_texture_create_3D(
510         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
511 {
512         GPUTexture *tex;
513         GPUTextureFormat data_type;
514         int channels;
515
516         drw_texture_get_format(format, &data_type, &channels);
517         tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
518         drw_texture_set_parameters(tex, flags);
519
520         return tex;
521 }
522
523 GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
524 {
525         GPUTexture *tex;
526         GPUTextureFormat data_type;
527         int channels;
528
529         drw_texture_get_format(format, &data_type, &channels);
530         tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
531         drw_texture_set_parameters(tex, flags);
532
533         return tex;
534 }
535
536 void DRW_texture_generate_mipmaps(GPUTexture *tex)
537 {
538         GPU_texture_bind(tex, 0);
539         GPU_texture_generate_mipmap(tex);
540         GPU_texture_unbind(tex);
541 }
542
543 void DRW_texture_update(GPUTexture *tex, const float *pixels)
544 {
545         GPU_texture_update(tex, pixels);
546 }
547
548 void DRW_texture_free(GPUTexture *tex)
549 {
550         GPU_texture_free(tex);
551 }
552
553 /** \} */
554
555
556 /* -------------------------------------------------------------------- */
557
558 /** \name Uniform Buffer Object (DRW_uniformbuffer)
559  * \{ */
560
561 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
562 {
563         return GPU_uniformbuffer_create(size, data, NULL);
564 }
565
566 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
567 {
568         GPU_uniformbuffer_update(ubo, data);
569 }
570
571 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
572 {
573         GPU_uniformbuffer_free(ubo);
574 }
575
576 /** \} */
577
578
579 /* -------------------------------------------------------------------- */
580
581 /** \name Shaders (DRW_shader)
582  * \{ */
583
584 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
585 {
586         return GPU_shader_create(vert, frag, geom, NULL, defines);
587 }
588
589 GPUShader *DRW_shader_create_with_lib(
590         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
591 {
592         GPUShader *sh;
593         char *vert_with_lib = NULL;
594         char *frag_with_lib = NULL;
595         char *geom_with_lib = NULL;
596
597         DynStr *ds_vert = BLI_dynstr_new();
598         BLI_dynstr_append(ds_vert, lib);
599         BLI_dynstr_append(ds_vert, vert);
600         vert_with_lib = BLI_dynstr_get_cstring(ds_vert);
601         BLI_dynstr_free(ds_vert);
602
603         DynStr *ds_frag = BLI_dynstr_new();
604         BLI_dynstr_append(ds_frag, lib);
605         BLI_dynstr_append(ds_frag, frag);
606         frag_with_lib = BLI_dynstr_get_cstring(ds_frag);
607         BLI_dynstr_free(ds_frag);
608
609         if (geom) {
610                 DynStr *ds_geom = BLI_dynstr_new();
611                 BLI_dynstr_append(ds_geom, lib);
612                 BLI_dynstr_append(ds_geom, geom);
613                 geom_with_lib = BLI_dynstr_get_cstring(ds_geom);
614                 BLI_dynstr_free(ds_geom);
615         }
616
617         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
618
619         MEM_freeN(vert_with_lib);
620         MEM_freeN(frag_with_lib);
621         if (geom) {
622                 MEM_freeN(geom_with_lib);
623         }
624
625         return sh;
626 }
627
628 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
629 {
630         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
631 }
632
633 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
634 {
635         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
636 }
637
638 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
639 {
640         return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines);
641 }
642
643 GPUShader *DRW_shader_create_3D_depth_only(void)
644 {
645         return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
646 }
647
648 void DRW_shader_free(GPUShader *shader)
649 {
650         GPU_shader_free(shader);
651 }
652
653 /** \} */
654
655
656 /* -------------------------------------------------------------------- */
657
658 /** \name Interface (DRW_interface)
659  * \{ */
660
661 static void DRW_interface_create(DRWInterface *interface, GPUShader *shader)
662 {
663         interface->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
664         interface->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
665         interface->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
666         interface->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
667         interface->projection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION);
668         interface->projectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION_INV);
669         interface->view = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW);
670         interface->viewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW_INV);
671         interface->viewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION);
672         interface->viewprojectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION_INV);
673         interface->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
674         interface->normal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
675         interface->worldnormal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
676         interface->camtexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CAMERATEXCO);
677         interface->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
678         interface->clipplanes = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CLIPPLANES);
679         interface->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
680         interface->instance_count = 0;
681         interface->attribs_count = 0;
682         interface->attribs_stride = 0;
683         interface->instance_vbo = 0;
684         interface->instance_batch = NULL;
685
686         memset(&interface->vbo_format, 0, sizeof(Gwn_VertFormat));
687
688         interface->uniforms = NULL;
689         interface->attribs = NULL;
690         interface->attribs_first = NULL;
691 }
692
693
694 static void DRW_interface_uniform(DRWShadingGroup *shgroup, const char *name,
695                                   DRWUniformType type, const void *value, int length, int arraysize)
696 {
697         int location;
698         if (type == DRW_UNIFORM_BLOCK) {
699                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
700         }
701         else {
702                 location = GPU_shader_get_uniform(shgroup->shader, name);
703         }
704
705         if (location == -1) {
706                 if (G.debug & G_DEBUG)
707                         fprintf(stderr, "Uniform '%s' not found!\n", name);
708                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
709                 // BLI_assert(0);
710                 return;
711         }
712
713         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
714
715         BLI_assert(arraysize > 0);
716
717         uni->location = location;
718         uni->type = type;
719         uni->value = value;
720         uni->length = length;
721         uni->arraysize = arraysize;
722
723         /* Prepend */
724         uni->next = shgroup->interface.uniforms;
725         shgroup->interface.uniforms = uni;
726 }
727
728 static void DRW_interface_attrib(DRWShadingGroup *shgroup, const char *name, DRWAttribType type, int size, bool dummy)
729 {
730         DRWAttrib *attrib = BLI_mempool_alloc(DST.vmempool->attribs);
731         GLuint program = GPU_shader_get_program(shgroup->shader);
732
733         attrib->location = glGetAttribLocation(program, name);
734         attrib->type = type;
735         attrib->size = size;
736
737 /* Adding attribute even if not found for now (to keep memory alignment).
738  * Should ideally take vertex format automatically from batch eventually */
739 #if 0
740         if (attrib->location == -1 && !dummy) {
741                 if (G.debug & G_DEBUG)
742                         fprintf(stderr, "Attribute '%s' not found!\n", name);
743                 BLI_assert(0);
744                 MEM_freeN(attrib);
745                 return;
746         }
747 #else
748         UNUSED_VARS(dummy);
749 #endif
750
751         BLI_assert(BLI_strnlen(name, 32) < 32);
752         BLI_strncpy(attrib->name, name, 32);
753
754         shgroup->interface.attribs_count += 1;
755         BLI_assert(shgroup->interface.attribs_count < MAX_ATTRIB_COUNT);
756
757         /* Prepend */
758         if (shgroup->interface.attribs == NULL) {
759                 shgroup->interface.attribs = attrib;
760                 shgroup->interface.attribs_first = attrib;
761         }
762         else {
763                 shgroup->interface.attribs->prev = attrib;
764                 shgroup->interface.attribs = attrib;
765         }
766         attrib->prev = NULL;
767 }
768
769 /** \} */
770
771
772 /* -------------------------------------------------------------------- */
773
774 /** \name Shading Group (DRW_shgroup)
775  * \{ */
776
777 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
778 {
779         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
780
781         /* Append */
782         if (pass->shgroups != NULL) {
783                 pass->shgroups_last->next = shgroup;
784         }
785         else {
786                 pass->shgroups = shgroup;
787         }
788         pass->shgroups_last = shgroup;
789         shgroup->next = NULL;
790
791         DRW_interface_create(&shgroup->interface, shader);
792
793         shgroup->type = DRW_SHG_NORMAL;
794         shgroup->shader = shader;
795         shgroup->state_extra = 0;
796         shgroup->state_extra_disable = ~0x0;
797         shgroup->stencil_mask = 0;
798         shgroup->batch_geom = NULL;
799         shgroup->instance_geom = NULL;
800         shgroup->instance_data = NULL;
801
802         shgroup->calls = NULL;
803         shgroup->calls_first = NULL;
804
805 #ifdef USE_GPU_SELECT
806         shgroup->pass_parent = pass;
807 #endif
808
809         return shgroup;
810 }
811
812 DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
813 {
814         double time = 0.0; /* TODO make time variable */
815
816         /* TODO : Ideally we should not convert. But since the whole codegen
817          * is relying on GPUPass we keep it as is for now. */
818         GPUPass *gpupass = GPU_material_get_pass(material);
819
820         if (!gpupass) {
821                 /* Shader compilation error */
822                 return NULL;
823         }
824
825         struct GPUShader *shader = GPU_pass_shader(gpupass);
826
827         DRWShadingGroup *grp = DRW_shgroup_create(shader, pass);
828
829         /* Converting dynamic GPUInput to DRWUniform */
830         ListBase *inputs = &gpupass->inputs;
831
832         for (GPUInput *input = inputs->first; input; input = input->next) {
833                 /* Textures */
834                 if (input->ima) {
835                         GPUTexture *tex = GPU_texture_from_blender(
836                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
837
838                         if (input->bindtex) {
839                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
840                         }
841                 }
842                 /* Color Ramps */
843                 else if (input->tex) {
844                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
845                 }
846                 /* Floats */
847                 else {
848                         switch (input->type) {
849                                 case GPU_FLOAT:
850                                         DRW_shgroup_uniform_float(grp, input->shadername, (float *)input->dynamicvec, 1);
851                                         break;
852                                 case GPU_VEC2:
853                                         DRW_shgroup_uniform_vec2(grp, input->shadername, (float *)input->dynamicvec, 1);
854                                         break;
855                                 case GPU_VEC3:
856                                         DRW_shgroup_uniform_vec3(grp, input->shadername, (float *)input->dynamicvec, 1);
857                                         break;
858                                 case GPU_VEC4:
859                                         DRW_shgroup_uniform_vec4(grp, input->shadername, (float *)input->dynamicvec, 1);
860                                         break;
861                                 case GPU_MAT3:
862                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
863                                         break;
864                                 case GPU_MAT4:
865                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
866                                         break;
867                                 default:
868                                         break;
869                         }
870                 }
871         }
872
873         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
874         if (ubo != NULL) {
875                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
876         }
877
878         return grp;
879 }
880
881 DRWShadingGroup *DRW_shgroup_material_instance_create(
882         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob)
883 {
884         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
885
886         if (shgroup) {
887                 shgroup->type = DRW_SHG_INSTANCE;
888                 shgroup->instance_geom = geom;
889                 shgroup->instance_data = ob->data;
890         }
891
892         return shgroup;
893 }
894
895 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
896         struct GPUMaterial *material, DRWPass *pass, int size)
897 {
898         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
899
900         if (shgroup) {
901                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
902                 shgroup->interface.instance_count = size * 3;
903                 DRW_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
904         }
905
906         return shgroup;
907 }
908
909 DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom)
910 {
911         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
912
913         shgroup->type = DRW_SHG_INSTANCE;
914         shgroup->instance_geom = geom;
915
916         return shgroup;
917 }
918
919 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
920 {
921         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
922
923         shgroup->type = DRW_SHG_POINT_BATCH;
924         DRW_shgroup_attrib_float(shgroup, "pos", 3);
925
926         return shgroup;
927 }
928
929 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
930 {
931         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
932
933         shgroup->type = DRW_SHG_LINE_BATCH;
934         DRW_shgroup_attrib_float(shgroup, "pos", 3);
935
936         return shgroup;
937 }
938
939 /* Very special batch. Use this if you position
940  * your vertices with the vertex shader
941  * and dont need any VBO attrib */
942 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int size)
943 {
944         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
945
946         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
947         shgroup->interface.instance_count = size * 3;
948         DRW_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
949
950         return shgroup;
951 }
952
953 void DRW_shgroup_free(struct DRWShadingGroup *shgroup)
954 {
955         if (shgroup->interface.instance_vbo &&
956             (shgroup->interface.instance_batch == 0))
957         {
958                 glDeleteBuffers(1, &shgroup->interface.instance_vbo);
959         }
960
961         GWN_BATCH_DISCARD_SAFE(shgroup->batch_geom);
962 }
963
964 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *instances)
965 {
966         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
967         BLI_assert(shgroup->interface.instance_batch == NULL);
968
969         shgroup->interface.instance_batch = instances;
970 }
971
972 #define CALL_PREPEND(shgroup, call) { \
973         if (shgroup->calls == NULL) { \
974                 shgroup->calls = call; \
975                 shgroup->calls_first = call; \
976         } \
977         else { \
978                 ((DRWCall *)(shgroup->calls))->head.prev = call; \
979                 shgroup->calls = call; \
980         } \
981         call->head.prev = NULL; \
982 } ((void)0)
983
984
985 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
986 {
987         BLI_assert(geom != NULL);
988
989         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
990
991         CALL_PREPEND(shgroup, call);
992
993         call->head.type = DRW_CALL_SINGLE;
994 #ifdef USE_GPU_SELECT
995         call->head.select_id = g_DRW_select_id;
996 #endif
997
998         if (obmat != NULL) {
999                 copy_m4_m4(call->obmat, obmat);
1000         }
1001
1002         call->geometry = geom;
1003         call->ob_data = NULL;
1004 }
1005
1006 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
1007 {
1008         BLI_assert(geom != NULL);
1009
1010         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
1011
1012         CALL_PREPEND(shgroup, call);
1013
1014         call->head.type = DRW_CALL_SINGLE;
1015 #ifdef USE_GPU_SELECT
1016         call->head.select_id = g_DRW_select_id;
1017 #endif
1018
1019         copy_m4_m4(call->obmat, ob->obmat);
1020         call->geometry = geom;
1021         call->ob_data = ob->data;
1022 }
1023
1024 void DRW_shgroup_call_generate_add(
1025         DRWShadingGroup *shgroup,
1026         DRWCallGenerateFn *geometry_fn, void *user_data,
1027         float (*obmat)[4])
1028 {
1029         BLI_assert(geometry_fn != NULL);
1030
1031         DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
1032
1033         CALL_PREPEND(shgroup, call);
1034
1035         call->head.type = DRW_CALL_GENERATE;
1036 #ifdef USE_GPU_SELECT
1037         call->head.select_id = g_DRW_select_id;
1038 #endif
1039
1040         if (obmat != NULL) {
1041                 copy_m4_m4(call->obmat, obmat);
1042         }
1043
1044         call->geometry_fn = geometry_fn;
1045         call->user_data = user_data;
1046 }
1047
1048 static void sculpt_draw_cb(
1049         DRWShadingGroup *shgroup,
1050         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
1051         void *user_data)
1052 {
1053         Object *ob = user_data;
1054         PBVH *pbvh = ob->sculpt->pbvh;
1055
1056         if (pbvh) {
1057                 BKE_pbvh_draw_cb(
1058                         pbvh, NULL, NULL, false,
1059                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
1060         }
1061 }
1062
1063 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
1064 {
1065         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
1066 }
1067
1068 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
1069 {
1070         DRWInterface *interface = &shgroup->interface;
1071
1072 #ifdef USE_GPU_SELECT
1073         if ((G.f & G_PICKSEL) && (interface->instance_count > 0)) {
1074                 DRWShadingGroup *original_shgroup = shgroup;
1075                 shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
1076                 memcpy(shgroup, original_shgroup, sizeof(DRWShadingGroup));
1077
1078                 shgroup->calls = NULL;
1079                 shgroup->calls_first = NULL;
1080
1081                 interface = &shgroup->interface;
1082                 interface->instance_count = 0;
1083
1084                 /* Append */
1085                 if (shgroup->pass_parent->shgroups != NULL) {
1086                         shgroup->pass_parent->shgroups_last->next = shgroup;
1087                 }
1088                 else {
1089                         shgroup->pass_parent->shgroups = shgroup;
1090                 }
1091                 shgroup->pass_parent->shgroups_last = shgroup;
1092                 shgroup->next = NULL;
1093         }
1094 #endif
1095
1096         DRWCallDynamic *call = BLI_mempool_alloc(DST.vmempool->calls_dynamic);
1097
1098         CALL_PREPEND(shgroup, call);
1099
1100         BLI_assert(attr_len == interface->attribs_count);
1101         UNUSED_VARS_NDEBUG(attr_len);
1102
1103         call->head.type = DRW_CALL_DYNAMIC;
1104 #ifdef USE_GPU_SELECT
1105         call->head.select_id = g_DRW_select_id;
1106 #endif
1107
1108         if (interface->attribs_count != 0) {
1109                 memcpy((void *)call->data, attr, sizeof(void *) * interface->attribs_count);
1110         }
1111
1112         interface->instance_count += 1;
1113 }
1114
1115 /* Used for instancing with no attributes */
1116 void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, int count)
1117 {
1118         DRWInterface *interface = &shgroup->interface;
1119
1120         BLI_assert(interface->attribs_count == 0);
1121
1122         interface->instance_count = count;
1123 }
1124
1125 /**
1126  * State is added to #Pass.state while drawing.
1127  * Use to temporarily enable draw options.
1128  */
1129 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1130 {
1131         shgroup->state_extra |= state;
1132 }
1133
1134 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1135 {
1136         shgroup->state_extra_disable &= ~state;
1137 }
1138
1139 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
1140 {
1141         shgroup->stencil_mask = mask;
1142 }
1143
1144 void DRW_shgroup_attrib_float(DRWShadingGroup *shgroup, const char *name, int size)
1145 {
1146         DRW_interface_attrib(shgroup, name, DRW_ATTRIB_FLOAT, size, false);
1147 }
1148
1149 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
1150 {
1151         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
1152 }
1153
1154 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
1155 {
1156         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
1157 }
1158
1159 void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
1160 {
1161         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
1162 }
1163
1164 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const bool *value, int arraysize)
1165 {
1166         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
1167 }
1168
1169 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1170 {
1171         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
1172 }
1173
1174 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1175 {
1176         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
1177 }
1178
1179 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1180 {
1181         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
1182 }
1183
1184 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1185 {
1186         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
1187 }
1188
1189 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1190 {
1191         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
1192 }
1193
1194 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1195 {
1196         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
1197 }
1198
1199 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1200 {
1201         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
1202 }
1203
1204 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1205 {
1206         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
1207 }
1208
1209 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1210 {
1211         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
1212 }
1213
1214 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
1215 {
1216         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_MAT3, value, 9, 1);
1217 }
1218
1219 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
1220 {
1221         DRW_interface_uniform(shgroup, name, DRW_UNIFORM_MAT4, value, 16, 1);
1222 }
1223
1224 /* Creates a VBO containing OGL primitives for all DRWCallDynamic */
1225 static void shgroup_dynamic_batch(DRWShadingGroup *shgroup)
1226 {
1227         DRWInterface *interface = &shgroup->interface;
1228         int nbr = interface->instance_count;
1229
1230         Gwn_PrimType type = (shgroup->type == DRW_SHG_POINT_BATCH) ? GWN_PRIM_POINTS :
1231                              (shgroup->type == DRW_SHG_TRIANGLE_BATCH) ? GWN_PRIM_TRIS : GWN_PRIM_LINES;
1232
1233         if (nbr == 0)
1234                 return;
1235
1236         /* Upload Data */
1237         if (interface->vbo_format.attrib_ct == 0) {
1238                 for (DRWAttrib *attrib = interface->attribs_first; attrib; attrib = attrib->prev) {
1239                         BLI_assert(attrib->size <= 4); /* matrices have no place here for now */
1240                         if (attrib->type == DRW_ATTRIB_FLOAT) {
1241                                 attrib->format_id = GWN_vertformat_attr_add(
1242                                         &interface->vbo_format, attrib->name, GWN_COMP_F32, attrib->size, GWN_FETCH_FLOAT);
1243                         }
1244                         else if (attrib->type == DRW_ATTRIB_INT) {
1245                                 attrib->format_id = GWN_vertformat_attr_add(
1246                                         &interface->vbo_format, attrib->name, GWN_COMP_I8, attrib->size, GWN_FETCH_INT);
1247                         }
1248                         else {
1249                                 BLI_assert(false);
1250                         }
1251                 }
1252         }
1253
1254         Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&interface->vbo_format);
1255         GWN_vertbuf_data_alloc(vbo, nbr);
1256
1257         int j = 0;
1258         for (DRWCallDynamic *call = shgroup->calls_first; call; call = call->head.prev, j++) {
1259                 int i = 0;
1260                 for (DRWAttrib *attrib = interface->attribs_first; attrib; attrib = attrib->prev, i++) {
1261                         GWN_vertbuf_attr_set(vbo, attrib->format_id, j, call->data[i]);
1262                 }
1263         }
1264
1265         /* TODO make the batch dynamic instead of freeing it every times */
1266         if (shgroup->batch_geom)
1267                 GWN_batch_discard(shgroup->batch_geom);
1268
1269         shgroup->batch_geom = GWN_batch_create_ex(type, vbo, NULL, GWN_BATCH_OWNS_VBO);
1270 }
1271
1272 static void shgroup_dynamic_instance(DRWShadingGroup *shgroup)
1273 {
1274         int i = 0;
1275         int offset = 0;
1276         DRWInterface *interface = &shgroup->interface;
1277         int buffer_size = 0;
1278
1279         if (interface->instance_batch != NULL) {
1280                 return;
1281         }
1282
1283         /* TODO We still need this because gawain does not support Matrix attribs. */
1284         if (interface->instance_count == 0) {
1285                 if (interface->instance_vbo) {
1286                         glDeleteBuffers(1, &interface->instance_vbo);
1287                         interface->instance_vbo = 0;
1288                 }
1289                 return;
1290         }
1291
1292         /* only once */
1293         if (interface->attribs_stride == 0) {
1294                 for (DRWAttrib *attrib = interface->attribs_first; attrib; attrib = attrib->prev, i++) {
1295                         BLI_assert(attrib->type == DRW_ATTRIB_FLOAT); /* Only float for now */
1296                         interface->attribs_stride += attrib->size;
1297                         interface->attribs_size[i] = attrib->size;
1298                         interface->attribs_loc[i] = attrib->location;
1299                 }
1300         }
1301
1302         /* Gather Data */
1303         buffer_size = sizeof(float) * interface->attribs_stride * interface->instance_count;
1304         float *data = MEM_mallocN(buffer_size, "Instance VBO data");
1305
1306         for (DRWCallDynamic *call = shgroup->calls_first; call; call = call->head.prev) {
1307                 for (int j = 0; j < interface->attribs_count; ++j) {
1308                         memcpy(data + offset, call->data[j], sizeof(float) * interface->attribs_size[j]);
1309                         offset += interface->attribs_size[j];
1310                 }
1311         }
1312
1313         /* TODO poke mike to add this to gawain */
1314         if (interface->instance_vbo) {
1315                 glDeleteBuffers(1, &interface->instance_vbo);
1316                 interface->instance_vbo = 0;
1317         }
1318
1319         glGenBuffers(1, &interface->instance_vbo);
1320         glBindBuffer(GL_ARRAY_BUFFER, interface->instance_vbo);
1321         glBufferData(GL_ARRAY_BUFFER, buffer_size, data, GL_STATIC_DRAW);
1322
1323         MEM_freeN(data);
1324 }
1325
1326 static void shgroup_dynamic_batch_from_calls(DRWShadingGroup *shgroup)
1327 {
1328         if ((shgroup->interface.instance_vbo || shgroup->batch_geom) &&
1329             (G.debug_value == 667))
1330         {
1331                 return;
1332         }
1333
1334         if (shgroup->type == DRW_SHG_INSTANCE) {
1335                 shgroup_dynamic_instance(shgroup);
1336         }
1337         else {
1338                 shgroup_dynamic_batch(shgroup);
1339         }
1340 }
1341
1342 /** \} */
1343
1344
1345 /* -------------------------------------------------------------------- */
1346
1347 /** \name Passes (DRW_pass)
1348  * \{ */
1349
1350 DRWPass *DRW_pass_create(const char *name, DRWState state)
1351 {
1352         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1353         pass->state = state;
1354         BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1355
1356         pass->shgroups = NULL;
1357         pass->shgroups_last = NULL;
1358
1359         return pass;
1360 }
1361
1362 void DRW_pass_free(DRWPass *pass)
1363 {
1364         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1365                 DRW_shgroup_free(shgroup);
1366         }
1367
1368         pass->shgroups = NULL;
1369         pass->shgroups_last = NULL;
1370 }
1371
1372 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1373 {
1374         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1375                 callback(userData, shgroup);
1376         }
1377 }
1378
1379 typedef struct ZSortData {
1380         float *axis;
1381         float *origin;
1382 } ZSortData;
1383
1384 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1385 {
1386         const ZSortData *zsortdata = (ZSortData *)thunk;
1387         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1388         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1389
1390         const DRWCall *call_a;
1391         const DRWCall *call_b;
1392
1393         call_a = shgrp_a->calls_first;
1394         call_b = shgrp_b->calls_first;
1395
1396         if (call_a == NULL) return -1;
1397         if (call_b == NULL) return -1;
1398
1399         float tmp[3];
1400         sub_v3_v3v3(tmp, zsortdata->origin, call_a->obmat[3]);
1401         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1402         sub_v3_v3v3(tmp, zsortdata->origin, call_b->obmat[3]);
1403         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1404
1405         if      (a_sq < b_sq) return  1;
1406         else if (a_sq > b_sq) return -1;
1407         else {
1408                 /* If there is a depth prepass put it before */
1409                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1410                         return -1;
1411                 }
1412                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1413                         return  1;
1414                 }
1415                 else return  0;
1416         }
1417 }
1418
1419 /* ------------------ Shading group sorting --------------------- */
1420
1421 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1422
1423 #define SORT_IMPL_USE_THUNK
1424 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1425 #include "../../blenlib/intern/list_sort_impl.h"
1426 #undef SORT_IMPL_FUNC
1427 #undef SORT_IMPL_USE_THUNK
1428
1429 #undef SORT_IMPL_LINKTYPE
1430
1431 /**
1432  * Sort Shading groups by decreasing Z of their first draw call.
1433  * This is usefull for order dependant effect such as transparency.
1434  **/
1435 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1436 {
1437         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1438
1439         float (*viewinv)[4];
1440         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1441                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1442
1443         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1444
1445         if (pass->shgroups && pass->shgroups->next) {
1446                 pass->shgroups = shgroup_sort_fn_r(pass->shgroups, pass_shgroup_dist_sort, &zsortdata);
1447
1448                 /* Find the next last */
1449                 DRWShadingGroup *last = pass->shgroups;
1450                 while ((last = last->next)) {
1451                         /* Do nothing */
1452                 };
1453                 pass->shgroups_last = last;
1454         }
1455 }
1456
1457 /** \} */
1458
1459
1460 /* -------------------------------------------------------------------- */
1461
1462 /** \name Draw (DRW_draw)
1463  * \{ */
1464
1465 static void DRW_state_set(DRWState state)
1466 {
1467         if (DST.state == state) {
1468                 return;
1469         }
1470
1471
1472 #define CHANGED_TO(f) \
1473         ((DST.state & (f)) ? \
1474                 ((state & (f)) ?  0 : -1) : \
1475                 ((state & (f)) ?  1 :  0))
1476
1477 #define CHANGED_ANY(f) \
1478         ((DST.state & (f)) != (state & (f)))
1479
1480 #define CHANGED_ANY_STORE_VAR(f, enabled) \
1481         ((DST.state & (f)) != (enabled = (state & (f))))
1482
1483         /* Depth Write */
1484         {
1485                 int test;
1486                 if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
1487                         if (test == 1) {
1488                                 glDepthMask(GL_TRUE);
1489                         }
1490                         else {
1491                                 glDepthMask(GL_FALSE);
1492                         }
1493                 }
1494         }
1495
1496         /* Color Write */
1497         {
1498                 int test;
1499                 if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
1500                         if (test == 1) {
1501                                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1502                         }
1503                         else {
1504                                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1505                         }
1506                 }
1507         }
1508
1509         /* Cull */
1510         {
1511                 DRWState test;
1512                 if (CHANGED_ANY_STORE_VAR(
1513                         DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
1514                         test))
1515                 {
1516                         if (test) {
1517                                 glEnable(GL_CULL_FACE);
1518
1519                                 if ((state & DRW_STATE_CULL_BACK) != 0) {
1520                                         glCullFace(GL_BACK);
1521                                 }
1522                                 else if ((state & DRW_STATE_CULL_FRONT) != 0) {
1523                                         glCullFace(GL_FRONT);
1524                                 }
1525                                 else {
1526                                         BLI_assert(0);
1527                                 }
1528                         }
1529                         else {
1530                                 glDisable(GL_CULL_FACE);
1531                         }
1532                 }
1533         }
1534
1535         /* Depth Test */
1536         {
1537                 DRWState test;
1538                 if (CHANGED_ANY_STORE_VAR(
1539                         DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
1540                         test))
1541                 {
1542                         if (test) {
1543                                 glEnable(GL_DEPTH_TEST);
1544
1545                                 if (state & DRW_STATE_DEPTH_LESS) {
1546                                         glDepthFunc(GL_LEQUAL);
1547                                 }
1548                                 else if (state & DRW_STATE_DEPTH_EQUAL) {
1549                                         glDepthFunc(GL_EQUAL);
1550                                 }
1551                                 else if (state & DRW_STATE_DEPTH_GREATER) {
1552                                         glDepthFunc(GL_GREATER);
1553                                 }
1554                                 else if (state & DRW_STATE_DEPTH_ALWAYS) {
1555                                         glDepthFunc(GL_ALWAYS);
1556                                 }
1557                                 else {
1558                                         BLI_assert(0);
1559                                 }
1560                         }
1561                         else {
1562                                 glDisable(GL_DEPTH_TEST);
1563                         }
1564                 }
1565         }
1566
1567         /* Wire Width */
1568         {
1569                 if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_LARGE)) {
1570                         if ((state & DRW_STATE_WIRE) != 0) {
1571                                 glLineWidth(1.0f);
1572                         }
1573                         else if ((state & DRW_STATE_WIRE_LARGE) != 0) {
1574                                 glLineWidth(UI_GetThemeValuef(TH_OUTLINE_WIDTH) * 2.0f);
1575                         }
1576                         else {
1577                                 /* do nothing */
1578                         }
1579                 }
1580         }
1581
1582         /* Points Size */
1583         {
1584                 int test;
1585                 if ((test = CHANGED_TO(DRW_STATE_POINT))) {
1586                         if (test == 1) {
1587                                 GPU_enable_program_point_size();
1588                                 glPointSize(5.0f);
1589                         }
1590                         else {
1591                                 GPU_disable_program_point_size();
1592                         }
1593                 }
1594         }
1595
1596         /* Blending (all buffer) */
1597         {
1598                 int test;
1599                 if (CHANGED_ANY_STORE_VAR(
1600                         DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION,
1601                         test))
1602                 {
1603                         if (test) {
1604                                 glEnable(GL_BLEND);
1605
1606                                 if ((state & DRW_STATE_BLEND) != 0) {
1607                                         glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
1608                                 }
1609                                 else if ((state & DRW_STATE_MULTIPLY) != 0) {
1610                                         glBlendFunc(GL_DST_COLOR, GL_ZERO);
1611                                 }
1612                                 else if ((state & DRW_STATE_TRANSMISSION) != 0) {
1613                                         glBlendFunc(GL_ONE, GL_SRC_ALPHA);
1614                                 }
1615                                 else if ((state & DRW_STATE_ADDITIVE) != 0) {
1616                                         glBlendFunc(GL_SRC_ALPHA, GL_ONE);
1617                                 }
1618                                 else {
1619                                         BLI_assert(0);
1620                                 }
1621                         }
1622                         else {
1623                                 glDisable(GL_BLEND);
1624                         }
1625                 }
1626         }
1627
1628         /* Clip Planes */
1629         {
1630                 int test;
1631                 if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
1632                         if (test == 1) {
1633                                 for (int i = 0; i < DST.num_clip_planes; ++i) {
1634                                         glEnable(GL_CLIP_DISTANCE0 + i);
1635                                 }
1636                         }
1637                         else {
1638                                 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
1639                                         glDisable(GL_CLIP_DISTANCE0 + i);
1640                                 }
1641                         }
1642                 }
1643         }
1644
1645         /* Line Stipple */
1646         {
1647                 int test;
1648                 if (CHANGED_ANY_STORE_VAR(
1649                         DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
1650                         test))
1651                 {
1652                         if (test) {
1653                                 if ((state & DRW_STATE_STIPPLE_2) != 0) {
1654                                         setlinestyle(2);
1655                                 }
1656                                 else if ((state & DRW_STATE_STIPPLE_3) != 0) {
1657                                         setlinestyle(3);
1658                                 }
1659                                 else if ((state & DRW_STATE_STIPPLE_4) != 0) {
1660                                         setlinestyle(4);
1661                                 }
1662                                 else {
1663                                         BLI_assert(0);
1664                                 }
1665                         }
1666                         else {
1667                                 setlinestyle(0);
1668                         }
1669                 }
1670         }
1671
1672         /* Stencil */
1673         {
1674                 DRWState test;
1675                 if (CHANGED_ANY_STORE_VAR(
1676                         DRW_STATE_WRITE_STENCIL |
1677                         DRW_STATE_STENCIL_EQUAL,
1678                         test))
1679                 {
1680                         if (test) {
1681                                 glEnable(GL_STENCIL_TEST);
1682
1683                                 /* Stencil Write */
1684                                 if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
1685                                         glStencilMask(0xFF);
1686                                         glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
1687                                 }
1688                                 /* Stencil Test */
1689                                 else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
1690                                         glStencilMask(0x00); /* disable write */
1691                                         DST.stencil_mask = 0;
1692                                 }
1693                                 else {
1694                                         BLI_assert(0);
1695                                 }
1696                         }
1697                         else {
1698                                 /* disable write & test */
1699                                 DST.stencil_mask = 0;
1700                                 glStencilMask(0x00);
1701                                 glStencilFunc(GL_ALWAYS, 1, 0xFF);
1702                                 glDisable(GL_STENCIL_TEST);
1703                         }
1704                 }
1705         }
1706
1707 #undef CHANGED_TO
1708 #undef CHANGED_ANY
1709 #undef CHANGED_ANY_STORE_VAR
1710
1711         DST.state = state;
1712 }
1713
1714 static void DRW_stencil_set(unsigned int mask)
1715 {
1716         if (DST.stencil_mask != mask) {
1717                 /* Stencil Write */
1718                 if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
1719                         glStencilFunc(GL_ALWAYS, mask, 0xFF);
1720                         DST.stencil_mask = mask;
1721                 }
1722                 /* Stencil Test */
1723                 else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
1724                         glStencilFunc(GL_EQUAL, mask, 0xFF);
1725                         DST.stencil_mask = mask;
1726                 }
1727         }
1728 }
1729
1730 typedef struct DRWBoundTexture {
1731         struct DRWBoundTexture *next, *prev;
1732         GPUTexture *tex;
1733 } DRWBoundTexture;
1734
1735 static void draw_geometry_prepare(
1736         DRWShadingGroup *shgroup, const float (*obmat)[4], const float *texcoloc, const float *texcosize)
1737 {
1738         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1739         DRWInterface *interface = &shgroup->interface;
1740
1741         float mvp[4][4], mv[4][4], mi[4][4], mvi[4][4], pi[4][4], n[3][3], wn[3][3];
1742         float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
1743         float eye[3] = { 0.0f, 0.0f, 1.0f }; /* looking into the screen */
1744
1745         bool do_pi = (interface->projectioninverse != -1);
1746         bool do_mvp = (interface->modelviewprojection != -1);
1747         bool do_mi = (interface->modelinverse != -1);
1748         bool do_mv = (interface->modelview != -1);
1749         bool do_mvi = (interface->modelviewinverse != -1);
1750         bool do_n = (interface->normal != -1);
1751         bool do_wn = (interface->worldnormal != -1);
1752         bool do_eye = (interface->eye != -1);
1753         bool do_orco = (interface->orcotexfac != -1) && (texcoloc != NULL) && (texcosize != NULL);
1754
1755         /* Matrix override */
1756         float (*persmat)[4];
1757         float (*persinv)[4];
1758         float (*viewmat)[4];
1759         float (*viewinv)[4];
1760         float (*winmat)[4];
1761         float (*wininv)[4];
1762
1763         persmat = (viewport_matrix_override.override[DRW_MAT_PERS])
1764                   ? viewport_matrix_override.mat[DRW_MAT_PERS] : rv3d->persmat;
1765         persinv = (viewport_matrix_override.override[DRW_MAT_PERSINV])
1766                   ? viewport_matrix_override.mat[DRW_MAT_PERSINV] : rv3d->persinv;
1767         viewmat = (viewport_matrix_override.override[DRW_MAT_VIEW])
1768                   ? viewport_matrix_override.mat[DRW_MAT_VIEW] : rv3d->viewmat;
1769         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1770                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1771         winmat = (viewport_matrix_override.override[DRW_MAT_WIN])
1772                   ? viewport_matrix_override.mat[DRW_MAT_WIN] : rv3d->winmat;
1773         wininv = viewport_matrix_override.mat[DRW_MAT_WININV];
1774
1775         if (do_pi) {
1776                 if (!viewport_matrix_override.override[DRW_MAT_WININV]) {
1777                         invert_m4_m4(pi, winmat);
1778                         wininv = pi;
1779                 }
1780         }
1781         if (do_mi) {
1782                 invert_m4_m4(mi, obmat);
1783         }
1784         if (do_mvp) {
1785                 mul_m4_m4m4(mvp, persmat, obmat);
1786         }
1787         if (do_mv || do_mvi || do_n || do_eye) {
1788                 mul_m4_m4m4(mv, viewmat, obmat);
1789         }
1790         if (do_mvi) {
1791                 invert_m4_m4(mvi, mv);
1792         }
1793         if (do_n || do_eye) {
1794                 copy_m3_m4(n, mv);
1795                 invert_m3(n);
1796                 transpose_m3(n);
1797         }
1798         if (do_wn) {
1799                 copy_m3_m4(wn, obmat);
1800                 invert_m3(wn);
1801                 transpose_m3(wn);
1802         }
1803         if (do_eye) {
1804                 /* Used by orthographic wires */
1805                 float tmp[3][3];
1806                 invert_m3_m3(tmp, n);
1807                 /* set eye vector, transformed to object coords */
1808                 mul_m3_v3(tmp, eye);
1809         }
1810         if (do_orco) {
1811                 mul_v3_v3fl(orcofacs[1], texcosize, 2.0f);
1812                 invert_v3(orcofacs[1]);
1813                 sub_v3_v3v3(orcofacs[0], texcoloc, texcosize);
1814                 negate_v3(orcofacs[0]);
1815                 mul_v3_v3(orcofacs[0], orcofacs[1]); /* result in a nice MADD in the shader */
1816         }
1817
1818         /* Should be really simple */
1819         /* step 1 : bind object dependent matrices */
1820         /* TODO : Some of these are not object dependant.
1821          * They should be grouped inside a UBO updated once per redraw.
1822          * The rest can also go into a UBO to reduce API calls. */
1823         GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)obmat);
1824         GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)mi);
1825         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)mvp);
1826         GPU_shader_uniform_vector(shgroup->shader, interface->viewinverse, 16, 1, (float *)viewinv);
1827         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojection, 16, 1, (float *)persmat);
1828         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojectioninverse, 16, 1, (float *)persinv);
1829         GPU_shader_uniform_vector(shgroup->shader, interface->projection, 16, 1, (float *)winmat);
1830         GPU_shader_uniform_vector(shgroup->shader, interface->projectioninverse, 16, 1, (float *)wininv);
1831         GPU_shader_uniform_vector(shgroup->shader, interface->view, 16, 1, (float *)viewmat);
1832         GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)mv);
1833         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)mvi);
1834         GPU_shader_uniform_vector(shgroup->shader, interface->normal, 9, 1, (float *)n);
1835         GPU_shader_uniform_vector(shgroup->shader, interface->worldnormal, 9, 1, (float *)wn);
1836         GPU_shader_uniform_vector(shgroup->shader, interface->camtexfac, 4, 1, (float *)rv3d->viewcamtexcofac);
1837         GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)orcofacs);
1838         GPU_shader_uniform_vector(shgroup->shader, interface->eye, 3, 1, (float *)eye);
1839         GPU_shader_uniform_vector(shgroup->shader, interface->clipplanes, 4, DST.num_clip_planes, (float *)DST.clip_planes_eq);
1840 }
1841
1842 static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
1843 {
1844         DRWInterface *interface = &shgroup->interface;
1845         /* step 2 : bind vertex array & draw */
1846         GWN_batch_program_set(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
1847         if (interface->instance_batch) {
1848                 GWN_batch_draw_stupid_instanced_with_batch(geom, interface->instance_batch);
1849         }
1850         else if (interface->instance_vbo) {
1851                 GWN_batch_draw_stupid_instanced(
1852                         geom, interface->instance_vbo, interface->instance_count, interface->attribs_count,
1853                         interface->attribs_stride, interface->attribs_size, interface->attribs_loc);
1854         }
1855         else {
1856                 GWN_batch_draw_stupid(geom);
1857         }
1858         /* XXX this just tells gawain we are done with the shader.
1859          * This does not unbind the shader. */
1860         GWN_batch_program_unset(geom);
1861 }
1862
1863 static void draw_geometry(DRWShadingGroup *shgroup, Gwn_Batch *geom, const float (*obmat)[4], ID *ob_data)
1864 {
1865         float *texcoloc = NULL;
1866         float *texcosize = NULL;
1867
1868         if (ob_data != NULL) {
1869                 switch (GS(ob_data->name)) {
1870                         case ID_ME:
1871                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
1872                                 break;
1873                         default:
1874                                 /* TODO, curve, metaball? */
1875                                 break;
1876                 }
1877         }
1878
1879         draw_geometry_prepare(shgroup, obmat, texcoloc, texcosize);
1880
1881         draw_geometry_execute(shgroup, geom);
1882 }
1883
1884 static void bind_texture(GPUTexture *tex)
1885 {
1886         int bind_num = GPU_texture_bound_number(tex);
1887         if (bind_num == -1) {
1888                 for (int i = 0; i < GPU_max_textures(); ++i) {
1889                         RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures();
1890                         if (RST.bound_tex_slots[RST.bind_tex_inc] == false) {
1891                                 if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
1892                                         GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
1893                                 }
1894                                 GPU_texture_bind(tex, RST.bind_tex_inc);
1895                                 RST.bound_texs[RST.bind_tex_inc] = tex;
1896                                 RST.bound_tex_slots[RST.bind_tex_inc] = true;
1897                                 return;
1898                         }
1899                 }
1900
1901                 printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
1902         }
1903         RST.bound_tex_slots[bind_num] = true;
1904 }
1905
1906 static void bind_ubo(GPUUniformBuffer *ubo)
1907 {
1908         if (RST.bind_ubo_inc < GPU_max_ubo_binds()) {
1909                 GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
1910                 RST.bind_ubo_inc++;
1911         }
1912         else {
1913                 /* This is not depending on user input.
1914                  * It is our responsability to make sure there enough slots. */
1915                 BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
1916
1917                 /* printf so user can report bad behaviour */
1918                 printf("Not enough ubo slots! This should not happen!\n");
1919         }
1920 }
1921
1922 static void release_texture_slots(void)
1923 {
1924         memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
1925 }
1926
1927 static void release_ubo_slots(void)
1928 {
1929         RST.bind_ubo_inc = 0;
1930 }
1931
1932 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
1933 {
1934         BLI_assert(shgroup->shader);
1935
1936         DRWInterface *interface = &shgroup->interface;
1937         GPUTexture *tex;
1938         GPUUniformBuffer *ubo;
1939         int val;
1940         float fval;
1941
1942         if (DST.shader != shgroup->shader) {
1943                 if (DST.shader) GPU_shader_unbind();
1944                 GPU_shader_bind(shgroup->shader);
1945                 DST.shader = shgroup->shader;
1946         }
1947
1948         const bool is_normal = ELEM(shgroup->type, DRW_SHG_NORMAL);
1949
1950         if (!is_normal) {
1951                 shgroup_dynamic_batch_from_calls(shgroup);
1952         }
1953
1954         release_texture_slots();
1955         release_ubo_slots();
1956
1957         DRW_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
1958         DRW_stencil_set(shgroup->stencil_mask);
1959
1960         /* Binding Uniform */
1961         /* Don't check anything, Interface should already contain the least uniform as possible */
1962         for (DRWUniform *uni = interface->uniforms; uni; uni = uni->next) {
1963                 switch (uni->type) {
1964                         case DRW_UNIFORM_SHORT_TO_INT:
1965                                 val = (int)*((short *)uni->value);
1966                                 GPU_shader_uniform_vector_int(
1967                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
1968                                 break;
1969                         case DRW_UNIFORM_SHORT_TO_FLOAT:
1970                                 fval = (float)*((short *)uni->value);
1971                                 GPU_shader_uniform_vector(
1972                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
1973                                 break;
1974                         case DRW_UNIFORM_BOOL:
1975                         case DRW_UNIFORM_INT:
1976                                 GPU_shader_uniform_vector_int(
1977                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
1978                                 break;
1979                         case DRW_UNIFORM_FLOAT:
1980                         case DRW_UNIFORM_MAT3:
1981                         case DRW_UNIFORM_MAT4:
1982                                 GPU_shader_uniform_vector(
1983                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
1984                                 break;
1985                         case DRW_UNIFORM_TEXTURE:
1986                                 tex = (GPUTexture *)uni->value;
1987                                 BLI_assert(tex);
1988                                 bind_texture(tex);
1989                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1990                                 break;
1991                         case DRW_UNIFORM_BUFFER:
1992                                 if (!DRW_state_is_fbo()) {
1993                                         break;
1994                                 }
1995                                 tex = *((GPUTexture **)uni->value);
1996                                 BLI_assert(tex);
1997                                 bind_texture(tex);
1998                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1999                                 break;
2000                         case DRW_UNIFORM_BLOCK:
2001                                 ubo = (GPUUniformBuffer *)uni->value;
2002                                 bind_ubo(ubo);
2003                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
2004                                 break;
2005                 }
2006         }
2007
2008 #ifdef USE_GPU_SELECT
2009         /* use the first item because of selection we only ever add one */
2010 #  define GPU_SELECT_LOAD_IF_PICKSEL(_call) \
2011         if ((G.f & G_PICKSEL) && (_call)) { \
2012                 GPU_select_load_id((_call)->head.select_id); \
2013         } ((void)0)
2014
2015 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_call_last, _call_first) \
2016         if ((G.f & G_PICKSEL) && _call_first) { \
2017                 BLI_assert(_call_first && (_call_first == _call_last)); \
2018                 GPU_select_load_id(((DRWCall *)_call_first)->head.select_id); \
2019         } ((void)0)
2020
2021 #else
2022 #  define GPU_SELECT_LOAD_IF_PICKSEL(call)
2023 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(call, _call_first)
2024 #endif
2025
2026         /* Rendering Calls */
2027         if (!is_normal) {
2028                 /* Replacing multiple calls with only one */
2029                 float obmat[4][4];
2030                 unit_m4(obmat);
2031
2032                 if (shgroup->type == DRW_SHG_INSTANCE &&
2033                     (interface->instance_count > 0 || interface->instance_batch != NULL))
2034                 {
2035                         GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup->calls, shgroup->calls_first);
2036                         draw_geometry(shgroup, shgroup->instance_geom, obmat, shgroup->instance_data);
2037                 }
2038                 else {
2039                         /* Some dynamic batch can have no geom (no call to aggregate) */
2040                         if (shgroup->batch_geom) {
2041                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup->calls, shgroup->calls_first);
2042                                 draw_geometry(shgroup, shgroup->batch_geom, obmat, NULL);
2043                         }
2044                 }
2045         }
2046         else {
2047                 for (DRWCall *call = shgroup->calls_first; call; call = call->head.prev)
2048                 {
2049                         bool neg_scale = is_negative_m4(call->obmat);
2050
2051                         /* Negative scale objects */
2052                         if (neg_scale) {
2053                                 glFrontFace(DST.backface);
2054                         }
2055
2056                         GPU_SELECT_LOAD_IF_PICKSEL(call);
2057
2058                         if (call->head.type == DRW_CALL_SINGLE) {
2059                                 draw_geometry(shgroup, call->geometry, call->obmat, call->ob_data);
2060                         }
2061                         else {
2062                                 BLI_assert(call->head.type == DRW_CALL_GENERATE);
2063                                 DRWCallGenerate *callgen = ((DRWCallGenerate *)call);
2064                                 draw_geometry_prepare(shgroup, callgen->obmat, NULL, NULL);
2065                                 callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data);
2066                         }
2067
2068                         /* Reset state */
2069                         if (neg_scale) {
2070                                 glFrontFace(DST.frontface);
2071                         }
2072                 }
2073         }
2074
2075         /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
2076         DRW_state_reset();
2077 }
2078
2079 static void DRW_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2080 {
2081         /* Start fresh */
2082         DST.shader = NULL;
2083
2084         DRW_state_set(pass->state);
2085
2086         DRW_stats_query_start(pass->name);
2087
2088         for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
2089                 draw_shgroup(shgroup, pass->state);
2090                 /* break if upper limit */
2091                 if (shgroup == end_group) {
2092                         break;
2093                 }
2094         }
2095
2096         /* Clear Bound textures */
2097         for (int i = 0; i < GPU_max_textures(); i++) {
2098                 if (RST.bound_texs[i] != NULL) {
2099                         GPU_texture_unbind(RST.bound_texs[i]);
2100                         RST.bound_texs[i] = NULL;
2101                 }
2102         }
2103
2104         if (DST.shader) {
2105                 GPU_shader_unbind();
2106                 DST.shader = NULL;
2107         }
2108
2109         DRW_stats_query_end();
2110 }
2111
2112 void DRW_draw_pass(DRWPass *pass)
2113 {
2114         DRW_draw_pass_ex(pass, pass->shgroups, pass->shgroups_last);
2115 }
2116
2117 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
2118 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2119 {
2120         DRW_draw_pass_ex(pass, start_group, end_group);
2121 }
2122
2123 void DRW_draw_callbacks_pre_scene(void)
2124 {
2125         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2126
2127         gpuLoadProjectionMatrix(rv3d->winmat);
2128         gpuLoadMatrix(rv3d->viewmat);
2129 }
2130
2131 void DRW_draw_callbacks_post_scene(void)
2132 {
2133         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2134
2135         gpuLoadProjectionMatrix(rv3d->winmat);
2136         gpuLoadMatrix(rv3d->viewmat);
2137 }
2138
2139 /* Reset state to not interfer with other UI drawcall */
2140 void DRW_state_reset_ex(DRWState state)
2141 {
2142         DST.state = ~state;
2143         DRW_state_set(state);
2144 }
2145
2146 void DRW_state_reset(void)
2147 {
2148         /* Reset blending function */
2149         glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
2150
2151         DRW_state_reset_ex(DRW_STATE_DEFAULT);
2152 }
2153
2154 /* NOTE : Make sure to reset after use! */
2155 void DRW_state_invert_facing(void)
2156 {
2157         SWAP(GLenum, DST.backface, DST.frontface);
2158         glFrontFace(DST.frontface);
2159 }
2160
2161 /**
2162  * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
2163  * and if the shaders have support for it (see usage of gl_ClipDistance).
2164  * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
2165  **/
2166 void DRW_state_clip_planes_add(float plane_eq[4])
2167 {
2168         copy_v4_v4(DST.clip_planes_eq[DST.num_clip_planes++], plane_eq);
2169 }
2170
2171 void DRW_state_clip_planes_reset(void)
2172 {
2173         DST.num_clip_planes = 0;
2174 }
2175
2176 /** \} */
2177
2178
2179 struct DRWTextStore *DRW_text_cache_ensure(void)
2180 {
2181         BLI_assert(DST.text_store_p);
2182         if (*DST.text_store_p == NULL) {
2183                 *DST.text_store_p = DRW_text_cache_create();
2184         }
2185         return *DST.text_store_p;
2186 }
2187
2188
2189 /* -------------------------------------------------------------------- */
2190
2191 /** \name Settings
2192  * \{ */
2193
2194 bool DRW_object_is_renderable(Object *ob)
2195 {
2196         Scene *scene = DST.draw_ctx.scene;
2197         Object *obedit = scene->obedit;
2198
2199         if (!BKE_object_is_visible(ob)) {
2200                 return false;
2201         }
2202
2203         if (ob->type == OB_MESH) {
2204                 if (ob == obedit) {
2205                         IDProperty *props = BKE_layer_collection_engine_evaluated_get(ob, COLLECTION_MODE_EDIT, "");
2206                         bool do_show_occlude_wire = BKE_collection_engine_property_value_get_bool(props, "show_occlude_wire");
2207                         if (do_show_occlude_wire) {
2208                                 return false;
2209                         }
2210                         bool do_show_weight = BKE_collection_engine_property_value_get_bool(props, "show_weight");
2211                         if (do_show_weight) {
2212                                 return false;
2213                         }
2214                 }
2215         }
2216
2217         return true;
2218 }
2219
2220 bool DRW_object_is_flat_normal(const Object *ob)
2221 {
2222         if (ob->type == OB_MESH) {
2223                 const Mesh *me = ob->data;
2224                 if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
2225                         return false;
2226                 }
2227         }
2228         return true;
2229 }
2230
2231
2232 /**
2233  * Return true if the object has its own draw mode.
2234  * Caller must check this is active */
2235 int DRW_object_is_mode_shade(const Object *ob)
2236 {
2237         BLI_assert(ob == DST.draw_ctx.obact);
2238         if ((ob->mode & OB_MODE_EDIT) == 0) {
2239                 if (ob->mode & (OB_MODE_VERTEX_PAINT | OB_MODE_WEIGHT_PAINT | OB_MODE_TEXTURE_PAINT)) {
2240                         if ((DST.draw_ctx.v3d->flag2 & V3D_SHOW_MODE_SHADE_OVERRIDE) == 0) {
2241                                 return true;
2242                         }
2243                         else {
2244                                 return false;
2245                         }
2246                 }
2247         }
2248         return -1;
2249 }
2250
2251 /** \} */
2252
2253
2254 /* -------------------------------------------------------------------- */
2255
2256 /** \name Framebuffers (DRW_framebuffer)
2257  * \{ */
2258
2259 static GPUTextureFormat convert_tex_format(
2260         int fbo_format,
2261         int *r_channels, bool *r_is_depth)
2262 {
2263         *r_is_depth = ELEM(fbo_format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
2264
2265         switch (fbo_format) {
2266                 case DRW_TEX_R_16:     *r_channels = 1; return GPU_R16F;
2267                 case DRW_TEX_R_32:     *r_channels = 1; return GPU_R32F;
2268                 case DRW_TEX_RG_8:     *r_channels = 2; return GPU_RG8;
2269                 case DRW_TEX_RG_16:    *r_channels = 2; return GPU_RG16F;
2270                 case DRW_TEX_RG_32:    *r_channels = 2; return GPU_RG32F;
2271                 case DRW_TEX_RGBA_8:   *r_channels = 4; return GPU_RGBA8;
2272                 case DRW_TEX_RGBA_16:  *r_channels = 4; return GPU_RGBA16F;
2273                 case DRW_TEX_RGBA_32:  *r_channels = 4; return GPU_RGBA32F;
2274                 case DRW_TEX_DEPTH_16: *r_channels = 1; return GPU_DEPTH_COMPONENT16;
2275                 case DRW_TEX_DEPTH_24: *r_channels = 1; return GPU_DEPTH_COMPONENT24;
2276                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_channels = 1; return GPU_DEPTH24_STENCIL8;
2277                 case DRW_TEX_DEPTH_32: *r_channels = 1; return GPU_DEPTH_COMPONENT32F;
2278                 case DRW_TEX_RGB_11_11_10: *r_channels = 3; return GPU_R11F_G11F_B10F;
2279                 default:
2280                         BLI_assert(false && "Texture format unsupported as render target!");
2281                         *r_channels = 4; return GPU_RGBA8;
2282         }
2283 }
2284
2285 void DRW_framebuffer_init(
2286         struct GPUFrameBuffer **fb, void *engine_type, int width, int height,
2287         DRWFboTexture textures[MAX_FBO_TEX], int textures_len)
2288 {
2289         BLI_assert(textures_len <= MAX_FBO_TEX);
2290
2291         bool create_fb = false;
2292         int color_attachment = -1;
2293
2294         if (!*fb) {
2295                 *fb = GPU_framebuffer_create();
2296                 create_fb = true;
2297         }
2298
2299         for (int i = 0; i < textures_len; ++i) {
2300                 int channels;
2301                 bool is_depth;
2302
2303                 DRWFboTexture fbotex = textures[i];
2304                 bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0;
2305
2306                 GPUTextureFormat gpu_format = convert_tex_format(fbotex.format, &channels, &is_depth);
2307
2308                 if (!*fbotex.tex || is_temp) {
2309                         /* Temp textures need to be queried each frame, others not. */
2310                         if (is_temp) {
2311                                 *fbotex.tex = GPU_viewport_texture_pool_query(
2312                                         DST.viewport, engine_type, width, height, channels, gpu_format);
2313                         }
2314                         else if (create_fb) {
2315                                 *fbotex.tex = GPU_texture_create_2D_custom(
2316                                         width, height, channels, gpu_format, NULL, NULL);
2317                         }
2318                 }
2319
2320                 if (create_fb) {
2321                         if (!is_depth) {
2322                                 ++color_attachment;
2323                         }
2324                         drw_texture_set_parameters(*fbotex.tex, fbotex.flag);
2325                         GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0);
2326                 }
2327         }
2328
2329         if (create_fb && (textures_len > 0)) {
2330                 if (!GPU_framebuffer_check_valid(*fb, NULL)) {
2331                         printf("Error invalid framebuffer\n");
2332                 }
2333
2334                 /* Detach temp textures */
2335                 for (int i = 0; i < textures_len; ++i) {
2336                         DRWFboTexture fbotex = textures[i];
2337
2338                         if ((fbotex.flag & DRW_TEX_TEMP) != 0) {
2339                                 GPU_framebuffer_texture_detach(*fbotex.tex);
2340                         }
2341                 }
2342
2343                 GPU_framebuffer_bind(DST.default_framebuffer);
2344         }
2345 }
2346
2347 void DRW_framebuffer_free(struct GPUFrameBuffer *fb)
2348 {
2349         GPU_framebuffer_free(fb);
2350 }
2351
2352 void DRW_framebuffer_bind(struct GPUFrameBuffer *fb)
2353 {
2354         GPU_framebuffer_bind(fb);
2355 }
2356
2357 void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth)
2358 {
2359         if (color) {
2360                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
2361                 glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
2362         }
2363         if (depth) {
2364                 glDepthMask(GL_TRUE);
2365                 glClearDepth(clear_depth);
2366         }
2367         if (stencil) {
2368                 glStencilMask(0xFF);
2369         }
2370         glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) |
2371                 ((depth) ? GL_DEPTH_BUFFER_BIT : 0) |
2372                 ((stencil) ? GL_STENCIL_BUFFER_BIT : 0));
2373 }
2374
2375 void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data)
2376 {
2377         GLenum type;
2378         switch (channels) {
2379                 case 1: type = GL_RED; break;
2380                 case 2: type = GL_RG; break;
2381                 case 3: type = GL_RGB; break;
2382                 case 4: type = GL_RGBA; break;
2383                 default:
2384                         BLI_assert(false && "wrong number of read channels");
2385                         return;
2386         }
2387         glReadBuffer(GL_COLOR_ATTACHMENT0 + slot);
2388         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2389 }
2390
2391 void DRW_framebuffer_texture_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip)
2392 {
2393         GPU_framebuffer_texture_attach(fb, tex, slot, mip);
2394 }
2395
2396 void DRW_framebuffer_texture_layer_attach(struct GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip)
2397 {
2398         GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip);
2399 }
2400
2401 void DRW_framebuffer_cubeface_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip)
2402 {
2403         GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip);
2404 }
2405
2406 void DRW_framebuffer_texture_detach(GPUTexture *tex)
2407 {
2408         GPU_framebuffer_texture_detach(tex);
2409 }
2410
2411 void DRW_framebuffer_blit(struct GPUFrameBuffer *fb_read, struct GPUFrameBuffer *fb_write, bool depth, bool stencil)
2412 {
2413         GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil);
2414 }
2415
2416 void DRW_framebuffer_recursive_downsample(
2417         struct GPUFrameBuffer *fb, struct GPUTexture *tex, int num_iter,
2418         void (*callback)(void *userData, int level), void *userData)
2419 {
2420         GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData);
2421 }
2422
2423 void DRW_framebuffer_viewport_size(struct GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h)
2424 {
2425         glViewport(x, y, w, h);
2426 }
2427
2428 /* Use color management profile to draw texture to framebuffer */
2429 void DRW_transform_to_display(GPUTexture *tex)
2430 {
2431         DRW_state_set(DRW_STATE_WRITE_COLOR);
2432
2433         Gwn_VertFormat *vert_format = immVertexFormat();
2434         unsigned int pos = GWN_vertformat_attr_add(vert_format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2435         unsigned int texco = GWN_vertformat_attr_add(vert_format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2436
2437         const float dither = 1.0f;
2438
2439         bool use_ocio = false;
2440
2441         {
2442                 Scene *scene = DST.draw_ctx.scene;
2443                 /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2444                 ColorManagedViewSettings *view_settings =
2445                         (DST.options.is_image_render && !DST.options.is_scene_render) ?
2446                         NULL : &scene->view_settings;
2447                 use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
2448                         view_settings, &scene->display_settings, NULL, dither, false);
2449         }
2450
2451         if (!use_ocio) {
2452                 immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
2453                 immUniform1i("image", 0);
2454         }
2455
2456         GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
2457
2458         float mat[4][4];
2459         unit_m4(mat);
2460         immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
2461
2462         /* Full screen triangle */
2463         immBegin(GWN_PRIM_TRIS, 3);
2464         immAttrib2f(texco, 0.0f, 0.0f);
2465         immVertex2f(pos, -1.0f, -1.0f);
2466
2467         immAttrib2f(texco, 2.0f, 0.0f);
2468         immVertex2f(pos, 3.0f, -1.0f);
2469
2470         immAttrib2f(texco, 0.0f, 2.0f);
2471         immVertex2f(pos, -1.0f, 3.0f);
2472         immEnd();
2473
2474         GPU_texture_unbind(tex);
2475
2476         if (use_ocio) {
2477                 IMB_colormanagement_finish_glsl_draw();
2478         }
2479         else {
2480                 immUnbindProgram();
2481         }
2482 }
2483
2484 /** \} */
2485
2486
2487 /* -------------------------------------------------------------------- */
2488
2489 /** \name Viewport (DRW_viewport)
2490  * \{ */
2491
2492 static void *DRW_viewport_engine_data_ensure(void *engine_type)
2493 {
2494         void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
2495
2496         if (data == NULL) {
2497                 data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
2498         }
2499         return data;
2500 }
2501
2502 void DRW_engine_viewport_data_size_get(
2503         const void *engine_type_v,
2504         int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
2505 {
2506         const DrawEngineType *engine_type = engine_type_v;
2507
2508         if (r_fbl_len) {
2509                 *r_fbl_len = engine_type->vedata_size->fbl_len;
2510         }
2511         if (r_txl_len) {
2512                 *r_txl_len = engine_type->vedata_size->txl_len;
2513         }
2514         if (r_psl_len) {
2515                 *r_psl_len = engine_type->vedata_size->psl_len;
2516         }
2517         if (r_stl_len) {
2518                 *r_stl_len = engine_type->vedata_size->stl_len;
2519         }
2520 }
2521
2522 const float *DRW_viewport_size_get(void)
2523 {
2524         return &DST.size[0];
2525 }
2526
2527 const float *DRW_viewport_screenvecs_get(void)
2528 {
2529         return &DST.screenvecs[0][0];
2530 }
2531
2532 const float *DRW_viewport_pixelsize_get(void)
2533 {
2534         return &DST.pixsize;
2535 }
2536
2537 static void DRW_viewport_cache_resize(void)
2538 {
2539         /* Release the memiter before clearing the mempools that references them */
2540         GPU_viewport_cache_release(DST.viewport);
2541
2542         if (DST.vmempool != NULL) {
2543                 BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_count(DST.vmempool->calls));
2544                 BLI_mempool_clear_ex(DST.vmempool->calls_generate, BLI_mempool_count(DST.vmempool->calls_generate));
2545                 BLI_mempool_clear_ex(DST.vmempool->calls_dynamic, BLI_mempool_count(DST.vmempool->calls_dynamic));
2546                 BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_count(DST.vmempool->shgroups));
2547                 BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_count(DST.vmempool->uniforms));
2548                 BLI_mempool_clear_ex(DST.vmempool->attribs, BLI_mempool_count(DST.vmempool->attribs));
2549                 BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_count(DST.vmempool->passes));
2550         }
2551 }
2552
2553 /* It also stores viewport variable to an immutable place: DST
2554  * This is because a cache uniform only store reference
2555  * to its value. And we don't want to invalidate the cache
2556  * if this value change per viewport */
2557 static void DRW_viewport_var_init(void)
2558 {
2559         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2560
2561         /* Refresh DST.size */
2562         if (DST.viewport) {
2563                 int size[2];
2564                 GPU_viewport_size_get(DST.viewport, size);
2565                 DST.size[0] = size[0];
2566                 DST.size[1] = size[1];
2567
2568                 DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
2569                 DST.default_framebuffer = fbl->default_fb;
2570
2571                 DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
2572
2573                 if (DST.vmempool->calls == NULL) {
2574                         DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
2575                 }
2576                 if (DST.vmempool->calls_generate == NULL) {
2577                         DST.vmempool->calls_generate = BLI_mempool_create(sizeof(DRWCallGenerate), 0, 512, 0);
2578                 }
2579                 if (DST.vmempool->calls_dynamic == NULL) {
2580                         DST.vmempool->calls_dynamic = BLI_mempool_create(sizeof(DRWCallDynamic), 0, 512, 0);
2581                 }
2582                 if (DST.vmempool->shgroups == NULL) {
2583                         DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
2584                 }
2585                 if (DST.vmempool->uniforms == NULL) {
2586                         DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
2587                 }
2588                 if (DST.vmempool->attribs == NULL) {
2589                         DST.vmempool->attribs = BLI_mempool_create(sizeof(DRWAttrib), 0, 256, 0);
2590                 }
2591                 if (DST.vmempool->passes == NULL) {
2592                         DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
2593                 }
2594         }
2595         else {
2596                 DST.size[0] = 0;
2597                 DST.size[1] = 0;
2598
2599                 DST.default_framebuffer = NULL;
2600                 DST.vmempool = NULL;
2601         }
2602         /* Refresh DST.screenvecs */
2603         copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
2604         copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
2605         normalize_v3(DST.screenvecs[0]);
2606         normalize_v3(DST.screenvecs[1]);
2607
2608         /* Refresh DST.pixelsize */
2609         DST.pixsize = rv3d->pixsize;
2610
2611         /* Reset facing */
2612         DST.frontface = GL_CCW;
2613         DST.backface = GL_CW;
2614         glFrontFace(DST.frontface);
2615
2616         if (DST.draw_ctx.scene->obedit) {
2617                 ED_view3d_init_mats_rv3d(DST.draw_ctx.scene->obedit, rv3d);
2618         }
2619
2620         /* Alloc array of texture reference. */
2621         if (RST.bound_texs == NULL) {
2622                 RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
2623         }
2624         if (RST.bound_tex_slots == NULL) {
2625                 RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots");
2626         }
2627
2628         memset(viewport_matrix_override.override, 0x0, sizeof(viewport_matrix_override.override));
2629 }
2630
2631 void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
2632 {
2633         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2634         BLI_assert(type >= DRW_MAT_PERS && type <= DRW_MAT_WININV);
2635
2636         if (viewport_matrix_override.override[type]) {
2637                 copy_m4_m4(mat, viewport_matrix_override.mat[type]);
2638         }
2639         else {
2640                 switch (type) {
2641                         case DRW_MAT_PERS:
2642                                 copy_m4_m4(mat, rv3d->persmat);
2643                                 break;
2644                         case DRW_MAT_PERSINV:
2645                                 copy_m4_m4(mat, rv3d->persinv);
2646                                 break;
2647                         case DRW_MAT_VIEW:
2648                                 copy_m4_m4(mat, rv3d->viewmat);
2649                                 break;
2650                         case DRW_MAT_VIEWINV:
2651                                 copy_m4_m4(mat, rv3d->viewinv);
2652                                 break;
2653                         case DRW_MAT_WIN:
2654                                 copy_m4_m4(mat, rv3d->winmat);
2655                                 break;
2656                         case DRW_MAT_WININV:
2657                                 invert_m4_m4(mat, rv3d->winmat);
2658                                 break;
2659                         default:
2660                                 BLI_assert(!"Matrix type invalid");
2661                                 break;
2662                 }
2663         }
2664 }
2665
2666 void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type)
2667 {
2668         copy_m4_m4(viewport_matrix_override.mat[type], mat);
2669         viewport_matrix_override.override[type] = true;
2670 }
2671
2672 void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
2673 {
2674         viewport_matrix_override.override[type] = false;
2675 }
2676
2677 bool DRW_viewport_is_persp_get(void)
2678 {
2679         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2680         return rv3d->is_persp;
2681 }
2682
2683 DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
2684 {
2685         return GPU_viewport_framebuffer_list_get(DST.viewport);
2686 }
2687
2688 DefaultTextureList *DRW_viewport_texture_list_get(void)
2689 {
2690         return GPU_viewport_texture_list_get(DST.viewport);
2691 }
2692
2693 void DRW_viewport_request_redraw(void)
2694 {
2695         GPU_viewport_tag_update(DST.viewport);
2696 }
2697
2698 /** \} */
2699
2700
2701 /* -------------------------------------------------------------------- */
2702 /** \name ViewLayers (DRW_scenelayer)
2703  * \{ */
2704
2705 void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
2706 {
2707         for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2708                 if (sled->engine_type == engine_type) {
2709                         return sled->storage;
2710                 }
2711         }
2712         return NULL;
2713 }
2714
2715 void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
2716 {
2717         ViewLayerEngineData *sled;
2718
2719         for (sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2720                 if (sled->engine_type == engine_type) {
2721                         return &sled->storage;
2722                 }
2723         }
2724
2725         sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
2726         sled->engine_type = engine_type;
2727         sled->free = callback;
2728         BLI_addtail(&DST.draw_ctx.view_layer->drawdata, sled);
2729
2730         return &sled->storage;
2731 }
2732
2733 /** \} */
2734
2735
2736 /* -------------------------------------------------------------------- */
2737
2738 /** \name Objects (DRW_object)
2739  * \{ */
2740
2741 void *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type)
2742 {
2743         for (ObjectEngineData *oed = ob->drawdata.first; oed; oed = oed->next) {
2744                 if (oed->engine_type == engine_type) {
2745                         return oed->storage;
2746                 }
2747         }
2748         return NULL;
2749 }
2750
2751 void **DRW_object_engine_data_ensure(
2752         Object *ob, DrawEngineType *engine_type, void (*callback)(void *storage))
2753 {
2754         ObjectEngineData *oed;
2755
2756         for (oed = ob->drawdata.first; oed; oed = oed->next) {
2757                 if (oed->engine_type == engine_type) {
2758                         return &oed->storage;
2759                 }
2760         }
2761
2762         oed = MEM_callocN(sizeof(ObjectEngineData), "ObjectEngineData");
2763         oed->engine_type = engine_type;
2764         oed->free = callback;
2765         BLI_addtail(&ob->drawdata, oed);
2766
2767         return &oed->storage;
2768 }
2769
2770 /* XXX There is definitly some overlap between this and DRW_object_engine_data_ensure.
2771  * We should get rid of one of the two. */
2772 LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, RenderEngineType *engine_type)
2773 {
2774         BLI_assert(ob->type == OB_LAMP);
2775
2776         Scene *scene = DST.draw_ctx.scene;
2777
2778         /* TODO Dupliobjects */
2779         /* TODO Should be per scenelayer */
2780         return GPU_lamp_engine_data_get(scene, ob, NULL, engine_type);
2781 }
2782
2783 void DRW_lamp_engine_data_free(LampEngineData *led)
2784 {
2785         GPU_lamp_engine_data_free(led);
2786 }
2787
2788 /** \} */
2789
2790
2791 /* -------------------------------------------------------------------- */
2792
2793 /** \name Rendering (DRW_engines)
2794  * \{ */
2795
2796 static void DRW_engines_init(void)
2797 {
2798         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2799                 DrawEngineType *engine = link->data;
2800                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2801                 PROFILE_START(stime);
2802
2803                 if (engine->engine_init) {
2804                         engine->engine_init(data);
2805                 }
2806
2807                 PROFILE_END_UPDATE(data->init_time, stime);
2808         }
2809 }
2810
2811 static void DRW_engines_cache_init(void)
2812 {
2813         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2814                 DrawEngineType *engine = link->data;
2815                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2816
2817                 if (data->text_draw_cache) {
2818                         DRW_text_cache_destroy(data->text_draw_cache);
2819                         data->text_draw_cache = NULL;
2820                 }
2821                 if (DST.text_store_p == NULL) {
2822                         DST.text_store_p = &data->text_draw_cache;
2823                 }
2824
2825                 if (engine->cache_init) {
2826                         engine->cache_init(data);
2827                 }
2828         }
2829 }
2830
2831 static void DRW_engines_cache_populate(Object *ob)
2832 {
2833         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2834                 DrawEngineType *engine = link->data;
2835                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2836
2837                 if (engine->cache_populate) {
2838                         engine->cache_populate(data, ob);
2839                 }
2840         }
2841 }
2842
2843 static void DRW_engines_cache_finish(void)
2844 {
2845         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2846                 DrawEngineType *engine = link->data;
2847                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2848
2849                 if (engine->cache_finish) {
2850                         engine->cache_finish(data);
2851                 }
2852         }
2853 }
2854
2855 static void DRW_engines_draw_background(void)
2856 {
2857         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2858                 DrawEngineType *engine = link->data;
2859                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2860
2861                 if (engine->draw_background) {
2862                         PROFILE_START(stime);
2863
2864                         DRW_stats_group_start(engine->idname);
2865                         engine->draw_background(data);
2866                         DRW_stats_group_end();
2867
2868                         PROFILE_END_UPDATE(data->background_time, stime);
2869                         return;
2870                 }
2871         }
2872
2873         /* No draw_background found, doing default background */
2874         DRW_draw_background();
2875 }
2876
2877 static void DRW_engines_draw_scene(void)
2878 {
2879         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2880                 DrawEngineType *engine = link->data;
2881                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2882                 PROFILE_START(stime);
2883
2884                 if (engine->draw_scene) {
2885                         DRW_stats_group_start(engine->idname);
2886                         engine->draw_scene(data);
2887                         DRW_stats_group_end();
2888                 }
2889
2890                 PROFILE_END_UPDATE(data->render_time, stime);
2891         }
2892 }
2893
2894 static void DRW_engines_draw_text(void)
2895 {
2896         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2897                 DrawEngineType *engine = link->data;
2898                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2899                 PROFILE_START(stime);
2900
2901                 if (data->text_draw_cache) {
2902                         DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.v3d, DST.draw_ctx.ar, false);
2903                 }
2904
2905                 PROFILE_END_UPDATE(data->render_time, stime);
2906         }
2907 }
2908
2909 #define MAX_INFO_LINES 10
2910
2911 /**
2912  * Returns the offset required for the drawing of engines info.
2913  */
2914 int DRW_draw_region_engine_info_offset(void)
2915 {
2916         int lines = 0;
2917         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2918                 DrawEngineType *engine = link->data;
2919                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2920
2921                 /* Count the number of lines. */
2922                 if (data->info[0] != '\0') {
2923                         lines++;
2924                         char *c = data->info;
2925                         while (*c++ != '\0') {
2926                                 if (*c == '\n') {
2927                                         lines++;
2928                                 }
2929                         }
2930                 }
2931         }
2932         return MIN2(MAX_INFO_LINES, lines) * UI_UNIT_Y;
2933 }
2934
2935 /**
2936  * Actual drawing;
2937  */
2938 void DRW_draw_region_engine_info(void)
2939 {
2940         const char *info_array_final[MAX_INFO_LINES + 1];
2941         /* This should be maxium number of engines running at the same time. */
2942         char info_array[MAX_INFO_LINES][GPU_INFO_SIZE];
2943         int i = 0;
2944
2945         const DRWContextState *draw_ctx = DRW_context_state_get();
2946         ARegion *ar = draw_ctx->ar;
2947         float fill_color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
2948
2949         UI_GetThemeColor3fv(TH_HIGH_GRAD, fill_color);
2950         mul_v3_fl(fill_color, fill_color[3]);
2951
2952         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2953                 DrawEngineType *engine = link->data;
2954                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2955
2956                 if (data->info[0] != '\0') {
2957                         char *chr_current = data->info;
2958                         char *chr_start = chr_current;
2959                         int line_len = 0;
2960
2961                         while (*chr_current++ != '\0') {
2962                                 line_len++;
2963                                 if (*chr_current == '\n') {
2964                                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
2965                                         /* Re-start counting. */
2966                                         chr_start = chr_current + 1;
2967                                         line_len = -1;
2968                                 }
2969                         }
2970
2971                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
2972
2973                         if (i >= MAX_INFO_LINES) {
2974                                 break;
2975                         }
2976                 }
2977         }
2978
2979         for (int j = 0; j < i; j++) {
2980                 info_array_final[j] = info_array[j];
2981         }
2982         info_array_final[i] = NULL;
2983
2984         if (info_array[0] != NULL) {
2985                 ED_region_info_draw_multiline(ar, info_array_final, fill_color, true);
2986         }
2987 }
2988
2989 #undef MAX_INFO_LINES
2990
2991 static void use_drw_engine(DrawEngineType *engine)
2992 {
2993         LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
2994         ld->data = engine;
2995         BLI_addtail(&DST.enabled_engines, ld);
2996 }
2997
2998 /* TODO revisit this when proper layering is implemented */
2999 /* Gather all draw engines needed and store them in DST.enabled_engines
3000  * That also define the rendering order of engines */
3001 static void DRW_engines_enable_from_engine(RenderEngineType *engine_type)
3002 {
3003         /* TODO layers */
3004         if (engine_type->draw_engine != NULL) {
3005                 use_drw_engine(engine_type->draw_engine);
3006         }
3007
3008         if ((engine_type->flag & RE_INTERNAL) == 0) {
3009                 DRW_engines_enable_external();
3010         }
3011 }
3012
3013 static void DRW_engines_enable_from_object_mode(void)
3014 {
3015         use_drw_engine(&draw_engine_object_type);
3016 }
3017
3018 static void DRW_engines_enable_from_mode(int mode)
3019 {
3020         switch (mode) {
3021                 case CTX_MODE_EDIT_MESH:
3022                         use_drw_engine(&draw_engine_edit_mesh_type);
3023                         break;
3024                 case CTX_MODE_EDIT_CURVE:
3025                         use_drw_engine(&draw_engine_edit_curve_type);
3026                         break;
3027                 case CTX_MODE_EDIT_SURFACE:
3028                         use_drw_engine(&draw_engine_edit_surface_type);
3029                         break;
3030                 case CTX_MODE_EDIT_TEXT:
3031                         use_drw_engine(&draw_engine_edit_text_type);
3032                         break;
3033                 case CTX_MODE_EDIT_ARMATURE:
3034                         use_drw_engine(&draw_engine_edit_armature_type);
3035                         break;
3036                 case CTX_MODE_EDIT_METABALL:
3037                         use_drw_engine(&draw_engine_edit_metaball_type);
3038                         break;
3039                 case CTX_MODE_EDIT_LATTICE:
3040                         use_drw_engine(&draw_engine_edit_lattice_type);
3041                         break;
3042                 case CTX_MODE_POSE:
3043                         use_drw_engine(&draw_engine_pose_type);
3044                         break;
3045                 case CTX_MODE_SCULPT:
3046                         use_drw_engine(&draw_engine_sculpt_type);
3047                         break;
3048                 case CTX_MODE_PAINT_WEIGHT:
3049                         use_drw_engine(&draw_engine_pose_type);
3050                         use_drw_engine(&draw_engine_paint_weight_type);
3051                         break;
3052                 case CTX_MODE_PAINT_VERTEX:
3053                         use_drw_engine(&draw_engine_paint_vertex_type);
3054                         break;
3055                 case CTX_MODE_PAINT_TEXTURE:
3056                         use_drw_engine(&draw_engine_paint_texture_type);
3057                         break;
3058                 case CTX_MODE_PARTICLE:
3059                         use_drw_engine(&draw_engine_particle_type);
3060                         break;
3061                 case CTX_MODE_OBJECT:
3062                         break;
3063                 default:
3064                         BLI_assert(!"Draw mode invalid");
3065                         break;
3066         }
3067 }
3068
3069 /**
3070  * Use for select and depth-drawing.
3071  */
3072 static void DRW_engines_enable_basic(void)
3073 {
3074         use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
3075 }
3076
3077 /**
3078  * Use for external render engines.
3079  */
3080 static void DRW_engines_enable_external(void)
3081 {
3082         use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
3083 }
3084
3085 static void DRW_engines_enable(const Scene *scene, ViewLayer *view_layer, RenderEngineType *engine_type)
3086 {
3087         Object *obact = OBACT(view_layer);
3088         const int mode = CTX_data_mode_enum_ex(scene->obedit, obact);
3089
3090         DRW_engines_enable_from_engine(engine_type);
3091
3092         if (DRW_state_draw_support()) {
3093                 DRW_engines_enable_from_object_mode();
3094                 DRW_engines_enable_from_mode(mode);
3095         }
3096 }
3097
3098 static void DRW_engines_disable(void)
3099 {
3100         BLI_freelistN(&DST.enabled_engines);
3101 }
3102
3103 static unsigned int DRW_engines_get_hash(void)
3104 {
3105         unsigned int hash = 0;
3106         /* The cache depends on enabled engines */
3107         /* FIXME : if collision occurs ... segfault */
3108         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3109                 DrawEngineType *engine = link->data;
3110                 hash += BLI_ghashutil_strhash_p(engine->idname);
3111         }
3112
3113         return hash;
3114 }
3115
3116 static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
3117 {
3118         BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
3119                                rect->ymax - (3 + v) * U.widget_unit, 0.0f,
3120                                txt, size);
3121 }
3122
3123 /* CPU stats */
3124 static void DRW_debug_cpu_stats(void)
3125 {
3126         int u, v;
3127         double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
3128         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3129         rcti rect;
3130         struct ARegion *ar = DST.draw_ctx.ar;
3131         ED_region_visible_rect(ar, &rect);
3132
3133         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3134
3135         /* row by row */
3136         v = 0; u = 0;
3137         /* Label row */
3138         char col_label[32];
3139         sprintf(col_label, "Engine");
3140         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3141         sprintf(col_label, "Init");
3142         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3143         sprintf(col_label, "Background");
3144         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3145         sprintf(col_label, "Render");
3146         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3147         sprintf(col_label, "Total (w/o cache)");
3148         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3149         v++;
3150
3151         /* Engines rows */
3152         char time_to_txt[16];
3153         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3154                 u = 0;
3155                 DrawEngineType *engine = link->data;
3156                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3157
3158                 draw_stat(&rect, u++, v, engine->idname, sizeof(engine->idname));
3159
3160                 init_tot_time += data->init_time;
3161                 sprintf(time_to_txt, "%.2fms", data->init_time);
3162                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3163
3164                 background_tot_time += data->background_time;
3165                 sprintf(time_to_txt, "%.2fms", data->background_time);
3166                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3167
3168                 render_tot_time += data->render_time;
3169                 sprintf(time_to_txt, "%.2fms", data->render_time);
3170                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3171
3172                 tot_time += data->init_time + data->background_time + data->render_time;
3173                 sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
3174                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3175                 v++;
3176         }
3177
3178         /* Totals row */
3179         u = 0;
3180         sprintf(col_label, "Sub Total");
3181         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3182         sprintf(time_to_txt, "%.2fms", init_tot_time);
3183         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3184         sprintf(time_to_txt, "%.2fms", background_tot_time);
3185         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3186         sprintf(time_to_txt, "%.2fms", render_tot_time);
3187         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3188         sprintf(time_to_txt, "%.2fms", tot_time);
3189         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3190         v += 2;
3191
3192         u = 0;
3193         sprintf(col_label, "Cache Time");
3194         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3195         sprintf(time_to_txt, "%.2fms", DST.cache_time);
3196         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3197 }
3198
3199 /* Display GPU time for each passes */
3200 static void DRW_debug_gpu_stats(void)
3201 {
3202         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3203         rcti rect;
3204         struct ARegion *ar = DST.draw_ctx.ar;
3205         ED_region_visible_rect(ar, &rect);
3206
3207         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3208
3209         int v = BLI_listbase_count(&DST.enabled_engines) + 5;
3210
3211         char stat_string[32];
3212
3213         /* Memory Stats */
3214         unsigned int tex_mem = GPU_texture_memory_usage_get();
3215         unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
3216
3217         sprintf(stat_string, "GPU Memory");
3218         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3219         sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
3220         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3221         sprintf(stat_string, "   |--> Textures");
3222         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3223         sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
3224         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3225         sprintf(stat_string, "   |--> Meshes");
3226         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3227         sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
3228         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3229
3230         /* Pre offset for stats_draw */
3231         rect.ymax -= (3 + ++v) * U.widget_unit;
3232
3233         /* Rendering Stats */
3234         DRW_stats_draw(&rect);
3235 }
3236
3237 /* -------------------------------------------------------------------- */
3238
3239 /** \name View Update
3240  * \{ */
3241
3242 void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
3243 {
3244         RenderEngineType *engine_type = update_ctx->engine_type;
3245         ARegion *ar = update_ctx->ar;
3246         View3D *v3d = update_ctx->v3d;
3247         RegionView3D *rv3d = ar->regiondata;
3248         Scene *scene = update_ctx->scene;
3249         ViewLayer *view_layer = update_ctx->view_layer;
3250
3251         if (rv3d->viewport == NULL) {
3252                 return;
3253         }
3254
3255
3256         /* Reset before using it. */
3257         memset(&DST, 0x0, sizeof(DST));
3258
3259         DST.viewport = rv3d->viewport;
3260         DST.draw_ctx = (DRWContextState){
3261                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, NULL,
3262         };
3263
3264         DRW_engines_enable(scene, view_layer, engine_type);
3265
3266         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3267                 DrawEngineType *draw_engine = link->data;
3268                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3269
3270                 if (draw_engine->view_update) {
3271                         draw_engine->view_update(data);
3272                 }
3273         }
3274
3275         DST.viewport = NULL;
3276
3277         DRW_engines_disable();
3278 }
3279
3280 /** \} */
3281
3282 /** \name ID Update
3283  * \{ */
3284
3285 /* TODO(sergey): This code is run for each changed ID (including the ones which
3286  * are changed indirectly via update flush. Need to find a way to make this to
3287  * run really fast, hopefully without any memory allocations on a heap
3288  * Idea here could be to run every known engine's id_update() and make them
3289  * do nothing if there is no engine-specific data yet.
3290  */
3291 void DRW_notify_id_update(const DRWUpdateContext *update_ctx, ID *id)
3292 {
3293         RenderEngineType *engine_type = update_ctx->engine_type;
3294         ARegion *ar = update_ctx->ar;
3295         View3D *v3d = update_ctx->v3d;
3296         RegionView3D *rv3d = ar->regiondata;
3297         Scene *scene = update_ctx->scene;
3298         ViewLayer *view_layer = update_ctx->view_layer;
3299         if (rv3d->viewport == NULL) {
3300                 return;
3301         }
3302         /* Reset before using it. */
3303         memset(&DST, 0x0, sizeof(DST));
3304         DST.viewport = rv3d->viewport;
3305         DST.draw_ctx = (DRWContextState){
3306                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, NULL,
3307         };
3308         DRW_engines_enable(scene, view_layer, engine_type);
3309         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3310                 DrawEngineType *draw_engine = link->data;
3311                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3312                 if (draw_engine->id_update) {
3313                         draw_engine->id_update(data, id);
3314                 }
3315         }
3316         DST.viewport = NULL;
3317         DRW_engines_disable();
3318 }
3319
3320 /** \} */
3321
3322 /* -------------------------------------------------------------------- */
3323
3324 /** \name Main Draw Loops (DRW_draw)
3325  * \{ */
3326
3327 /* Everything starts here.
3328  * This function takes care of calling all cache and rendering functions
3329  * for each relevant engine / mode engine. */
3330 void DRW_draw_view(const bContext *C)
3331 {
3332         struct Depsgraph *graph = CTX_data_depsgraph(C);
3333         RenderEngineType *engine_type = CTX_data_engine_type(C);
3334         ARegion *ar = CTX_wm_region(C);
3335         View3D *v3d = CTX_wm_view3d(C);
3336
3337         /* Reset before using it. */
3338         memset(&DST, 0x0, sizeof(DST));
3339         DRW_draw_render_loop_ex(graph, engine_type, ar, v3d, C);
3340 }
3341
3342 /**
3343  * Used for both regular and off-screen drawing.
3344  * Need to reset DST before calling this function
3345  */
3346 void DRW_draw_render_loop_ex(
3347         struct Depsgraph *graph,
3348         RenderEngineType *engine_type,
3349         ARegion *ar, View3D *v3d,
3350         const bContext *evil_C)
3351 {
3352         Scene *scene = DEG_get_evaluated_scene(graph);
3353         ViewLayer *view_layer = DEG_get_evaluated_view_layer(graph);
3354         RegionView3D *rv3d = ar->regiondata;
3355
3356         DST.draw_ctx.evil_C = evil_C;
3357
3358         DST.viewport = rv3d->viewport;
3359         v3d->zbuf = true;
3360
3361         /* Setup viewport */
3362         GPU_viewport_engines_data_validate(DST.viewport, DRW_engines_get_hash());
3363
3364         DST.draw_ctx = (DRWContextState){
3365             ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type,
3366
3367             /* reuse if caller sets */
3368             DST.draw_ctx.evil_C,
3369         };
3370
3371         DRW_viewport_var_init();
3372
3373         /* Get list of enabled engines */
3374         DRW_engines_enable(scene, view_layer, engine_type);
3375
3376         /* Update ubos */
3377         DRW_globals_update();
3378
3379         /* Init engines */
3380         DRW_engines_init();
3381
3382         /* TODO : tag to refresh by the deps graph */
3383         /* ideally only refresh when objects are added/removed */
3384         /* or render properties / materials change */
3385         {
3386                 PROFILE_START(stime);
3387                 DRW_engines_cache_init();
3388
3389                 DEG_OBJECT_ITER(graph, ob, DEG_OBJECT_ITER_FLAG_ALL);
3390                 {
3391                         DRW_engines_cache_populate(ob);
3392                 }
3393                 DEG_OBJECT_ITER_END
3394
3395                 DRW_engines_cache_finish();
3396                 PROFILE_END_ACCUM(DST.cache_time, stime);
3397         }
3398
3399         DRW_stats_begin();
3400
3401         /* Start Drawing */
3402         DRW_state_reset();
3403         DRW_engines_draw_background();
3404
3405         /* WIP, single image drawn over the camera view (replace) */
3406         bool do_bg_image = false;
3407         if (rv3d->persp == RV3D_CAMOB) {
3408                 Object *cam_ob = v3d->camera;
3409                 if (cam_ob && cam_ob->type == OB_CAMERA) {
3410                         Camera *cam = cam_ob->data;
3411                         if (!BLI_listbase_is_empty(&cam->bg_images)) {
3412                                 do_bg_image = true;
3413                         }
3414                 }
3415         }
3416
3417         extern void view3d_draw_bgpic_test(Scene *scene, ARegion *ar, View3D *v3d,
3418                                                                            const bool do_foreground, cons