Fix border rendering for eevee + stop passing render result around
[blender.git] / source / blender / draw / intern / draw_manager.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager.c
23  *  \ingroup draw
24  */
25
26 #include <stdio.h>
27
28 #include "BLI_listbase.h"
29 #include "BLI_mempool.h"
30 #include "BLI_rect.h"
31 #include "BLI_string.h"
32 #include "BLI_string_utils.h"
33
34 #include "BIF_glutil.h"
35
36 #include "BKE_curve.h"
37 #include "BKE_global.h"
38 #include "BKE_mesh.h"
39 #include "BKE_object.h"
40 #include "BKE_pbvh.h"
41 #include "BKE_paint.h"
42 #include "BKE_workspace.h"
43
44 #include "BLT_translation.h"
45 #include "BLF_api.h"
46
47 #include "DRW_engine.h"
48 #include "DRW_render.h"
49
50 #include "DNA_camera_types.h"
51 #include "DNA_curve_types.h"
52 #include "DNA_view3d_types.h"
53 #include "DNA_screen_types.h"
54 #include "DNA_mesh_types.h"
55 #include "DNA_meshdata_types.h"
56 #include "DNA_meta_types.h"
57
58 #include "ED_space_api.h"
59 #include "ED_screen.h"
60
61 #include "intern/gpu_codegen.h"
62 #include "GPU_batch.h"
63 #include "GPU_draw.h"
64 #include "GPU_extensions.h"
65 #include "GPU_framebuffer.h"
66 #include "GPU_immediate.h"
67 #include "GPU_lamp.h"
68 #include "GPU_material.h"
69 #include "GPU_shader.h"
70 #include "GPU_texture.h"
71 #include "GPU_uniformbuffer.h"
72 #include "GPU_viewport.h"
73 #include "GPU_matrix.h"
74
75 #include "IMB_colormanagement.h"
76
77 #include "RE_engine.h"
78 #include "RE_pipeline.h"
79
80 #include "UI_interface.h"
81 #include "UI_resources.h"
82
83 #include "WM_api.h"
84 #include "WM_types.h"
85
86 #include "draw_manager_text.h"
87 #include "draw_manager_profiling.h"
88
89 /* only for callbacks */
90 #include "draw_cache_impl.h"
91
92 #include "draw_instance_data.h"
93
94 #include "draw_mode_engines.h"
95 #include "engines/clay/clay_engine.h"
96 #include "engines/eevee/eevee_engine.h"
97 #include "engines/basic/basic_engine.h"
98 #include "engines/external/external_engine.h"
99
100 #include "DEG_depsgraph.h"
101 #include "DEG_depsgraph_query.h"
102
103 /* -------------------------------------------------------------------- */
104 /** \name Local Features
105  * \{ */
106
107 #define USE_PROFILE
108
109 #ifdef USE_PROFILE
110 #  include "PIL_time.h"
111
112 #  define PROFILE_TIMER_FALLOFF 0.1
113
114 #  define PROFILE_START(time_start) \
115         double time_start = PIL_check_seconds_timer();
116
117 #  define PROFILE_END_ACCUM(time_accum, time_start) { \
118         time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
119 } ((void)0)
120
121 /* exp average */
122 #  define PROFILE_END_UPDATE(time_update, time_start) { \
123         double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
124         time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
125                       (_time_delta * PROFILE_TIMER_FALLOFF); \
126 } ((void)0)
127
128 #else  /* USE_PROFILE */
129
130 #  define PROFILE_START(time_start) ((void)0)
131 #  define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
132 #  define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
133
134 #endif  /* USE_PROFILE */
135
136
137 /* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
138 #define USE_GPU_SELECT
139
140 #ifdef USE_GPU_SELECT
141 #  include "ED_view3d.h"
142 #  include "ED_armature.h"
143 #  include "GPU_select.h"
144 #endif
145
146 /** \} */
147
148
149 #define MAX_ATTRIB_NAME 32
150 #define MAX_ATTRIB_COUNT 6 /* Can be adjusted for more */
151 #define MAX_PASS_NAME 32
152 #define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
153
154 extern char datatoc_gpu_shader_2D_vert_glsl[];
155 extern char datatoc_gpu_shader_3D_vert_glsl[];
156 extern char datatoc_gpu_shader_fullscreen_vert_glsl[];
157
158 /* Prototypes. */
159 static void drw_engines_enable_external(void);
160
161 /* Structures */
162 typedef enum {
163         DRW_UNIFORM_BOOL,
164         DRW_UNIFORM_SHORT_TO_INT,
165         DRW_UNIFORM_SHORT_TO_FLOAT,
166         DRW_UNIFORM_INT,
167         DRW_UNIFORM_FLOAT,
168         DRW_UNIFORM_TEXTURE,
169         DRW_UNIFORM_BUFFER,
170         DRW_UNIFORM_MAT3,
171         DRW_UNIFORM_MAT4,
172         DRW_UNIFORM_BLOCK
173 } DRWUniformType;
174
175 #define MAX_UNIFORM_DATA_SIZE 16
176
177 struct DRWUniform {
178         struct DRWUniform *next;
179         const void *value;
180         int location;
181         char type; /* DRWUniformType */
182         char length; /* cannot be more than 16 */
183         char arraysize; /* cannot be more than 16 too */
184 };
185
186 struct DRWInterface {
187         DRWUniform *uniforms;   /* DRWUniform, single-linked list */
188         /* Dynamic batch */
189 #ifdef USE_GPU_SELECT
190         struct DRWInstanceData *inst_selectid;
191         /* Override for single object instances. */
192         int override_selectid;
193 #endif
194         Gwn_VertBuf *instance_vbo;
195         unsigned int instance_count;
196 #ifndef NDEBUG
197         char attribs_count;
198 #endif
199         /* matrices locations */
200         int model;
201         int modelinverse;
202         int modelview;
203         int modelviewinverse;
204         int projection;
205         int projectioninverse;
206         int view;
207         int viewinverse;
208         int modelviewprojection;
209         int viewprojection;
210         int viewprojectioninverse;
211         int normal;
212         int worldnormal;
213         int camtexfac;
214         int orcotexfac;
215         int eye;
216         int clipplanes;
217 };
218
219 struct DRWPass {
220         /* Single linked list with last member to append */
221         DRWShadingGroup *shgroups;
222         DRWShadingGroup *shgroups_last;
223
224         DRWState state;
225         char name[MAX_PASS_NAME];
226 };
227
228 typedef struct DRWCallHeader {
229         void *prev;
230
231 #ifdef USE_GPU_SELECT
232         int select_id;
233 #endif
234         uchar type;
235 } DRWCallHeader;
236
237 typedef struct DRWCall {
238         DRWCallHeader head;
239
240         float obmat[4][4];
241         Gwn_Batch *geometry;
242
243         Object *ob; /* Optional */
244         ID *ob_data; /* Optional. */
245 } DRWCall;
246
247 typedef struct DRWCallGenerate {
248         DRWCallHeader head;
249
250         float obmat[4][4];
251
252         DRWCallGenerateFn *geometry_fn;
253         void *user_data;
254 } DRWCallGenerate;
255
256 struct DRWShadingGroup {
257         struct DRWShadingGroup *next;
258
259         GPUShader *shader;               /* Shader to bind */
260         DRWInterface interface;          /* Uniforms pointers */
261
262         /* DRWCall or DRWCallDynamic depending of type */
263         void *calls;
264         void *calls_first; /* To be able to traverse the list in the order of addition */
265
266         DRWState state_extra;            /* State changes for this batch only (or'd with the pass's state) */
267         DRWState state_extra_disable;    /* State changes for this batch only (and'd with the pass's state) */
268         unsigned int stencil_mask;       /* Stencil mask to use for stencil test / write operations */
269         int type;
270
271         ID *instance_data;         /* Object->data to instance */
272         Gwn_Batch *instance_geom;  /* Geometry to instance */
273         Gwn_Batch *instancing_geom;/* Instances attributes */
274         Gwn_Batch *batch_geom;     /* Result of call batching */
275
276 #ifdef USE_GPU_SELECT
277         /* backlink to pass we're in */
278         DRWPass *pass_parent;
279 #endif
280 };
281
282 /* Used by DRWShadingGroup.type */
283 enum {
284         DRW_SHG_NORMAL,
285         DRW_SHG_POINT_BATCH,
286         DRW_SHG_LINE_BATCH,
287         DRW_SHG_TRIANGLE_BATCH,
288         DRW_SHG_INSTANCE,
289         DRW_SHG_INSTANCE_EXTERNAL,
290 };
291
292 /* Used by DRWCall.type */
293 enum {
294         /* A single batch */
295         DRW_CALL_SINGLE,
296         /* Uses a callback to draw with any number of batches. */
297         DRW_CALL_GENERATE,
298         /* Arbitrary number of multiple args. */
299         DRW_CALL_DYNAMIC,
300 };
301
302 /** Render State: No persistent data between draw calls. */
303 static struct DRWGlobalState {
304         /* Cache generation */
305         ViewportMemoryPool *vmempool;
306         DRWUniform *last_uniform;
307         DRWCall *last_call;
308         DRWCallGenerate *last_callgenerate;
309         DRWShadingGroup *last_shgroup;
310         DRWInstanceDataList *idatalist;
311         DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE];
312
313         /* Rendering state */
314         GPUShader *shader;
315
316         /* Managed by `DRW_state_set`, `DRW_state_reset` */
317         DRWState state;
318         unsigned int stencil_mask;
319
320         /* Per viewport */
321         GPUViewport *viewport;
322         struct GPUFrameBuffer *default_framebuffer;
323         float size[2];
324         float screenvecs[2][3];
325         float pixsize;
326
327         GLenum backface, frontface;
328
329         /* Clip planes */
330         int num_clip_planes;
331         float clip_planes_eq[MAX_CLIP_PLANES][4];
332
333         struct {
334                 unsigned int is_select : 1;
335                 unsigned int is_depth : 1;
336                 unsigned int is_image_render : 1;
337                 unsigned int is_scene_render : 1;
338                 unsigned int draw_background : 1;
339         } options;
340
341         /* Current rendering context */
342         DRWContextState draw_ctx;
343
344         /* Convenience pointer to text_store owned by the viewport */
345         struct DRWTextStore **text_store_p;
346
347         ListBase enabled_engines; /* RenderEngineType */
348
349         bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */
350
351         /* Profiling */
352         double cache_time;
353 } DST = {NULL};
354
355 /** GPU Resource State: Memory storage between drawing. */
356 static struct DRWResourceState {
357         GPUTexture **bound_texs;
358
359         bool *bound_tex_slots;
360
361         int bind_tex_inc;
362         int bind_ubo_inc;
363 } RST = {NULL};
364
365 static struct DRWMatrixOveride {
366         float mat[6][4][4];
367         bool override[6];
368 } viewport_matrix_override = {{{{0}}}};
369
370 ListBase DRW_engines = {NULL, NULL};
371
372 #ifdef USE_GPU_SELECT
373 static unsigned int g_DRW_select_id = (unsigned int)-1;
374
375 void DRW_select_load_id(unsigned int id)
376 {
377         BLI_assert(G.f & G_PICKSEL);
378         g_DRW_select_id = id;
379 }
380 #endif
381
382
383 /* -------------------------------------------------------------------- */
384
385 /** \name Textures (DRW_texture)
386  * \{ */
387
388 static void drw_texture_get_format(
389         DRWTextureFormat format,
390         GPUTextureFormat *r_data_type, int *r_channels)
391 {
392         switch (format) {
393                 case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
394                 case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
395                 case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
396                 case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
397                 case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
398                 case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
399                 case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break;
400                 case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
401                 case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
402                 case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
403                 case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
404 #if 0
405                 case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
406                 case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
407                 case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
408 #endif
409                 case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
410                 case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
411                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
412                 case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
413                 default :
414                         /* file type not supported you must uncomment it from above */
415                         BLI_assert(false);
416                         break;
417         }
418
419         switch (format) {
420                 case DRW_TEX_RGBA_8:
421                 case DRW_TEX_RGBA_16:
422                 case DRW_TEX_RGBA_32:
423                         *r_channels = 4;
424                         break;
425                 case DRW_TEX_RGB_8:
426                 case DRW_TEX_RGB_16:
427                 case DRW_TEX_RGB_32:
428                 case DRW_TEX_RGB_11_11_10:
429                         *r_channels = 3;
430                         break;
431                 case DRW_TEX_RG_8:
432                 case DRW_TEX_RG_16:
433                 case DRW_TEX_RG_16I:
434                 case DRW_TEX_RG_32:
435                         *r_channels = 2;
436                         break;
437                 default:
438                         *r_channels = 1;
439                         break;
440         }
441 }
442
443 static void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
444 {
445         GPU_texture_bind(tex, 0);
446         if (flags & DRW_TEX_MIPMAP) {
447                 GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
448                 DRW_texture_generate_mipmaps(tex);
449         }
450         else {
451                 GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
452         }
453         GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
454         GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
455         GPU_texture_unbind(tex);
456 }
457
458 GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
459 {
460         GPUTexture *tex;
461         GPUTextureFormat data_type;
462         int channels;
463
464         drw_texture_get_format(format, &data_type, &channels);
465         tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
466         drw_texture_set_parameters(tex, flags);
467
468         return tex;
469 }
470
471 GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
472 {
473         GPUTexture *tex;
474         GPUTextureFormat data_type;
475         int channels;
476
477         drw_texture_get_format(format, &data_type, &channels);
478         tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
479         drw_texture_set_parameters(tex, flags);
480
481         return tex;
482 }
483
484 GPUTexture *DRW_texture_create_2D_array(
485         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
486 {
487         GPUTexture *tex;
488         GPUTextureFormat data_type;
489         int channels;
490
491         drw_texture_get_format(format, &data_type, &channels);
492         tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
493         drw_texture_set_parameters(tex, flags);
494
495         return tex;
496 }
497
498 GPUTexture *DRW_texture_create_3D(
499         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
500 {
501         GPUTexture *tex;
502         GPUTextureFormat data_type;
503         int channels;
504
505         drw_texture_get_format(format, &data_type, &channels);
506         tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
507         drw_texture_set_parameters(tex, flags);
508
509         return tex;
510 }
511
512 GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
513 {
514         GPUTexture *tex;
515         GPUTextureFormat data_type;
516         int channels;
517
518         drw_texture_get_format(format, &data_type, &channels);
519         tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
520         drw_texture_set_parameters(tex, flags);
521
522         return tex;
523 }
524
525 void DRW_texture_generate_mipmaps(GPUTexture *tex)
526 {
527         GPU_texture_bind(tex, 0);
528         GPU_texture_generate_mipmap(tex);
529         GPU_texture_unbind(tex);
530 }
531
532 void DRW_texture_update(GPUTexture *tex, const float *pixels)
533 {
534         GPU_texture_update(tex, pixels);
535 }
536
537 void DRW_texture_free(GPUTexture *tex)
538 {
539         GPU_texture_free(tex);
540 }
541
542 /** \} */
543
544
545 /* -------------------------------------------------------------------- */
546
547 /** \name Uniform Buffer Object (DRW_uniformbuffer)
548  * \{ */
549
550 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
551 {
552         return GPU_uniformbuffer_create(size, data, NULL);
553 }
554
555 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
556 {
557         GPU_uniformbuffer_update(ubo, data);
558 }
559
560 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
561 {
562         GPU_uniformbuffer_free(ubo);
563 }
564
565 /** \} */
566
567
568 /* -------------------------------------------------------------------- */
569
570 /** \name Shaders (DRW_shader)
571  * \{ */
572
573 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
574 {
575         return GPU_shader_create(vert, frag, geom, NULL, defines);
576 }
577
578 GPUShader *DRW_shader_create_with_lib(
579         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
580 {
581         GPUShader *sh;
582         char *vert_with_lib = NULL;
583         char *frag_with_lib = NULL;
584         char *geom_with_lib = NULL;
585
586         vert_with_lib = BLI_string_joinN(lib, vert);
587         frag_with_lib = BLI_string_joinN(lib, frag);
588
589         if (geom) {
590                 geom_with_lib = BLI_string_joinN(lib, geom);
591         }
592
593         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
594
595         MEM_freeN(vert_with_lib);
596         MEM_freeN(frag_with_lib);
597         if (geom) {
598                 MEM_freeN(geom_with_lib);
599         }
600
601         return sh;
602 }
603
604 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
605 {
606         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
607 }
608
609 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
610 {
611         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
612 }
613
614 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
615 {
616         return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines);
617 }
618
619 GPUShader *DRW_shader_create_3D_depth_only(void)
620 {
621         return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
622 }
623
624 void DRW_shader_free(GPUShader *shader)
625 {
626         GPU_shader_free(shader);
627 }
628
629 /** \} */
630
631
632 /* -------------------------------------------------------------------- */
633
634 /** \name Interface (DRW_interface)
635  * \{ */
636
637 static void drw_interface_init(DRWInterface *interface, GPUShader *shader)
638 {
639         interface->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
640         interface->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
641         interface->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
642         interface->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
643         interface->projection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION);
644         interface->projectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION_INV);
645         interface->view = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW);
646         interface->viewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW_INV);
647         interface->viewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION);
648         interface->viewprojectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION_INV);
649         interface->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
650         interface->normal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
651         interface->worldnormal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
652         interface->camtexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CAMERATEXCO);
653         interface->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
654         interface->clipplanes = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CLIPPLANES);
655         interface->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
656         interface->instance_count = 0;
657 #ifndef NDEBUG
658         interface->attribs_count = 0;
659 #endif
660         interface->uniforms = NULL;
661 #ifdef USE_GPU_SELECT
662         interface->inst_selectid = NULL;
663         interface->override_selectid = -1;
664 #endif
665 }
666
667 static void drw_interface_instance_init(
668         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format)
669 {
670         DRWInterface *interface = &shgroup->interface;
671         drw_interface_init(interface, shader);
672
673 #ifndef NDEBUG
674         interface->attribs_count = (format != NULL) ? format->attrib_ct : 0;
675 #endif
676         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
677         BLI_assert(shgroup->instance_geom != NULL);
678
679         if (format != NULL) {
680                 DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
681                                               &shgroup->instancing_geom, &interface->instance_vbo);
682         }
683 }
684
685 static void drw_interface_batching_init(
686         DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format)
687 {
688         DRWInterface *interface = &shgroup->interface;
689         drw_interface_init(interface, shader);
690
691 #ifndef NDEBUG
692         interface->attribs_count = (format != NULL) ? format->attrib_ct : 0;
693 #endif
694         BLI_assert(format != NULL);
695
696         Gwn_PrimType type;
697         switch (shgroup->type) {
698                 case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break;
699                 case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break;
700                 case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break;
701                 default:
702                         BLI_assert(0);
703         }
704
705         DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
706                                     &shgroup->batch_geom, &interface->instance_vbo);
707 }
708
709 static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name,
710                                   DRWUniformType type, const void *value, int length, int arraysize)
711 {
712         int location;
713         if (type == DRW_UNIFORM_BLOCK) {
714                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
715         }
716         else {
717                 location = GPU_shader_get_uniform(shgroup->shader, name);
718         }
719
720         if (location == -1) {
721                 if (G.debug & G_DEBUG)
722                         fprintf(stderr, "Uniform '%s' not found!\n", name);
723                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
724                 // BLI_assert(0);
725                 return;
726         }
727
728         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
729
730         BLI_assert(arraysize > 0 && arraysize <= 16);
731         BLI_assert(arraysize * length <= MAX_UNIFORM_DATA_SIZE);
732
733         uni->location = location;
734         uni->type = type;
735         uni->value = value;
736         uni->length = length;
737         uni->arraysize = arraysize;
738
739         /* Prepend */
740         uni->next = shgroup->interface.uniforms;
741         shgroup->interface.uniforms = uni;
742 }
743
744 Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
745 {
746         Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat");
747
748         for (int i = 0; i < arraysize; ++i) {
749                 GWN_vertformat_attr_add(format, attribs[i].name,
750                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32,
751                                         attribs[i].components,
752                                         (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT);
753         }
754         return format;
755 }
756
757 /** \} */
758
759
760 /* -------------------------------------------------------------------- */
761
762 /** \name Shading Group (DRW_shgroup)
763  * \{ */
764
765 static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
766 {
767         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
768
769         /* Append */
770         if (pass->shgroups != NULL) {
771                 pass->shgroups_last->next = shgroup;
772         }
773         else {
774                 pass->shgroups = shgroup;
775         }
776         pass->shgroups_last = shgroup;
777         shgroup->next = NULL;
778         shgroup->type = DRW_SHG_NORMAL;
779         shgroup->shader = shader;
780         shgroup->state_extra = 0;
781         shgroup->state_extra_disable = ~0x0;
782         shgroup->stencil_mask = 0;
783         shgroup->batch_geom = NULL;
784         shgroup->instancing_geom = NULL;
785         shgroup->instance_geom = NULL;
786         shgroup->instance_data = NULL;
787
788         shgroup->calls = NULL;
789         shgroup->calls_first = NULL;
790
791 #ifdef USE_GPU_SELECT
792         shgroup->pass_parent = pass;
793 #endif
794
795         return shgroup;
796 }
797
798 static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
799 {
800         if (!gpupass) {
801                 /* Shader compilation error */
802                 return NULL;
803         }
804
805         DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
806         return grp;
807 }
808
809 static DRWShadingGroup *drw_shgroup_material_inputs(
810          DRWShadingGroup *grp, struct GPUMaterial *material, GPUPass *gpupass)
811 {
812         /* TODO : Ideally we should not convert. But since the whole codegen
813          * is relying on GPUPass we keep it as is for now. */
814
815         /* Converting dynamic GPUInput to DRWUniform */
816         ListBase *inputs = &gpupass->inputs;
817
818         for (GPUInput *input = inputs->first; input; input = input->next) {
819                 /* Textures */
820                 if (input->ima) {
821                         double time = 0.0; /* TODO make time variable */
822                         GPUTexture *tex = GPU_texture_from_blender(
823                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
824
825                         if (input->bindtex) {
826                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
827                         }
828                 }
829                 /* Color Ramps */
830                 else if (input->tex) {
831                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
832                 }
833                 /* Floats */
834                 else {
835                         switch (input->type) {
836                                 case GPU_FLOAT:
837                                 case GPU_VEC2:
838                                 case GPU_VEC3:
839                                 case GPU_VEC4:
840                                         /* Should already be in the material ubo. */
841                                         break;
842                                 case GPU_MAT3:
843                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
844                                         break;
845                                 case GPU_MAT4:
846                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
847                                         break;
848                                 default:
849                                         break;
850                         }
851                 }
852         }
853
854         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
855         if (ubo != NULL) {
856                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
857         }
858
859         return grp;
860 }
861
862 DRWShadingGroup *DRW_shgroup_material_create(
863         struct GPUMaterial *material, DRWPass *pass)
864 {
865         GPUPass *gpupass = GPU_material_get_pass(material);
866         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
867
868         if (shgroup) {
869                 drw_interface_init(&shgroup->interface, GPU_pass_shader(gpupass));
870                 drw_shgroup_material_inputs(shgroup, material, gpupass);
871         }
872
873         return shgroup;
874 }
875
876 DRWShadingGroup *DRW_shgroup_material_instance_create(
877         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format)
878 {
879         GPUPass *gpupass = GPU_material_get_pass(material);
880         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
881
882         if (shgroup) {
883                 shgroup->type = DRW_SHG_INSTANCE;
884                 shgroup->instance_geom = geom;
885                 shgroup->instance_data = ob->data;
886                 drw_interface_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
887                 drw_shgroup_material_inputs(shgroup, material, gpupass);
888         }
889
890         return shgroup;
891 }
892
893 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
894         struct GPUMaterial *material, DRWPass *pass, int tri_count)
895 {
896 #ifdef USE_GPU_SELECT
897         BLI_assert((G.f & G_PICKSEL) == 0);
898 #endif
899         GPUPass *gpupass = GPU_material_get_pass(material);
900         DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
901
902         if (shgroup) {
903                 /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
904                 drw_interface_init(&shgroup->interface, GPU_pass_shader(gpupass));
905                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
906                 shgroup->interface.instance_count = tri_count * 3;
907                 drw_shgroup_material_inputs(shgroup, material, gpupass);
908         }
909
910         return shgroup;
911 }
912
913 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
914 {
915         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
916         drw_interface_init(&shgroup->interface, shader);
917         return shgroup;
918 }
919
920 DRWShadingGroup *DRW_shgroup_instance_create(
921         struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format)
922 {
923         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
924         shgroup->type = DRW_SHG_INSTANCE;
925         shgroup->instance_geom = geom;
926
927         drw_interface_instance_init(shgroup, shader, geom, format);
928
929         return shgroup;
930 }
931
932 static Gwn_VertFormat *g_pos_format = NULL;
933
934 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
935 {
936         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
937
938         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
939         shgroup->type = DRW_SHG_POINT_BATCH;
940
941         drw_interface_batching_init(shgroup, shader, g_pos_format);
942
943         return shgroup;
944 }
945
946 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
947 {
948         DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
949
950         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
951         shgroup->type = DRW_SHG_LINE_BATCH;
952
953         drw_interface_batching_init(shgroup, shader, g_pos_format);
954
955         return shgroup;
956 }
957
958 /* Very special batch. Use this if you position
959  * your vertices with the vertex shader
960  * and dont need any VBO attrib */
961 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
962 {
963 #ifdef USE_GPU_SELECT
964         BLI_assert((G.f & G_PICKSEL) == 0);
965 #endif
966         DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
967
968         /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */
969         drw_interface_init(&shgroup->interface, shader);
970
971         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
972         shgroup->interface.instance_count = tri_count * 3;
973
974         return shgroup;
975 }
976
977 void DRW_shgroup_free(struct DRWShadingGroup *UNUSED(shgroup))
978 {
979         return;
980 }
981
982 #define CALL_PREPEND(shgroup, call) { \
983         if (shgroup->calls == NULL) { \
984                 shgroup->calls = call; \
985                 shgroup->calls_first = call; \
986         } \
987         else { \
988                 ((DRWCall *)(shgroup->calls))->head.prev = call; \
989                 shgroup->calls = call; \
990         } \
991         call->head.prev = NULL; \
992 } ((void)0)
993
994 /* Specify an external batch instead of adding each attrib one by one. */
995 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch)
996 {
997         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
998         BLI_assert(shgroup->interface.instance_count == 0);
999         /* You cannot use external instancing batch without a dummy format. */
1000         BLI_assert(shgroup->instancing_geom != NULL);
1001
1002         shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
1003         /* PERF : This destroys the vaos cache so better check if it's necessary. */
1004         /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
1005          * at the same adress. Bindings/VAOs would remain obsolete. */
1006         //if (shgroup->instancing_geom->inst != batch->verts[0])
1007         GWN_batch_instbuf_set(shgroup->instancing_geom, batch->verts[0], false);
1008
1009 #ifdef USE_GPU_SELECT
1010         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
1011         call->head.select_id = g_DRW_select_id;
1012
1013         CALL_PREPEND(shgroup, call);
1014 #endif
1015 }
1016
1017 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
1018 {
1019         BLI_assert(geom != NULL);
1020         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1021
1022         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
1023
1024         CALL_PREPEND(shgroup, call);
1025
1026         call->head.type = DRW_CALL_SINGLE;
1027 #ifdef USE_GPU_SELECT
1028         call->head.select_id = g_DRW_select_id;
1029 #endif
1030
1031         if (obmat != NULL) {
1032                 copy_m4_m4(call->obmat, obmat);
1033         }
1034         else {
1035                 unit_m4(call->obmat);
1036         }
1037
1038         call->geometry = geom;
1039         call->ob_data = NULL;
1040 }
1041
1042 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
1043 {
1044         BLI_assert(geom != NULL);
1045         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1046
1047         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
1048
1049         CALL_PREPEND(shgroup, call);
1050
1051         call->head.type = DRW_CALL_SINGLE;
1052 #ifdef USE_GPU_SELECT
1053         call->head.select_id = g_DRW_select_id;
1054 #endif
1055
1056         copy_m4_m4(call->obmat, ob->obmat);
1057         call->geometry = geom;
1058         call->ob_data = ob->data;
1059 }
1060
1061 void DRW_shgroup_call_generate_add(
1062         DRWShadingGroup *shgroup,
1063         DRWCallGenerateFn *geometry_fn, void *user_data,
1064         float (*obmat)[4])
1065 {
1066         BLI_assert(geometry_fn != NULL);
1067         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1068
1069         DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
1070
1071         CALL_PREPEND(shgroup, call);
1072
1073         call->head.type = DRW_CALL_GENERATE;
1074 #ifdef USE_GPU_SELECT
1075         call->head.select_id = g_DRW_select_id;
1076 #endif
1077
1078         if (obmat != NULL) {
1079                 copy_m4_m4(call->obmat, obmat);
1080         }
1081         else {
1082                 unit_m4(call->obmat);
1083         }
1084
1085         call->geometry_fn = geometry_fn;
1086         call->user_data = user_data;
1087 }
1088
1089 static void sculpt_draw_cb(
1090         DRWShadingGroup *shgroup,
1091         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
1092         void *user_data)
1093 {
1094         Object *ob = user_data;
1095         PBVH *pbvh = ob->sculpt->pbvh;
1096
1097         if (pbvh) {
1098                 BKE_pbvh_draw_cb(
1099                         pbvh, NULL, NULL, false,
1100                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
1101         }
1102 }
1103
1104 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
1105 {
1106         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
1107 }
1108
1109 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
1110 {
1111         DRWInterface *interface = &shgroup->interface;
1112
1113 #ifdef USE_GPU_SELECT
1114         if (G.f & G_PICKSEL) {
1115                 if (interface->inst_selectid == NULL) {
1116                         interface->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
1117                 }
1118
1119                 int *select_id = DRW_instance_data_next(interface->inst_selectid);
1120                 *select_id = g_DRW_select_id;
1121         }
1122 #endif
1123
1124         BLI_assert(attr_len == interface->attribs_count);
1125         UNUSED_VARS_NDEBUG(attr_len);
1126
1127         for (int i = 0; i < attr_len; ++i) {
1128                 if (interface->instance_count == interface->instance_vbo->vertex_ct) {
1129                         GWN_vertbuf_data_resize(interface->instance_vbo, interface->instance_count + 32);
1130                 }
1131                 GWN_vertbuf_attr_set(interface->instance_vbo, i, interface->instance_count, attr[i]);
1132         }
1133
1134         interface->instance_count += 1;
1135 }
1136
1137 /* Used for instancing with no attributes */
1138 void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, unsigned int count)
1139 {
1140         DRWInterface *interface = &shgroup->interface;
1141
1142         BLI_assert(interface->instance_count == 0);
1143         BLI_assert(interface->attribs_count == 0);
1144
1145 #ifdef USE_GPU_SELECT
1146         if (G.f & G_PICKSEL) {
1147                 interface->override_selectid = g_DRW_select_id;
1148         }
1149 #endif
1150
1151         interface->instance_count = count;
1152 }
1153
1154 unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
1155 {
1156         return shgroup->interface.instance_count;
1157 }
1158
1159 /**
1160  * State is added to #Pass.state while drawing.
1161  * Use to temporarily enable draw options.
1162  */
1163 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1164 {
1165         shgroup->state_extra |= state;
1166 }
1167
1168 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1169 {
1170         shgroup->state_extra_disable &= ~state;
1171 }
1172
1173 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
1174 {
1175         BLI_assert(mask <= 255);
1176         shgroup->stencil_mask = mask;
1177 }
1178
1179 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
1180 {
1181         drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
1182 }
1183
1184 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
1185 {
1186         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
1187 }
1188
1189 void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
1190 {
1191         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
1192 }
1193
1194 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1195 {
1196         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
1197 }
1198
1199 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1200 {
1201         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
1202 }
1203
1204 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1205 {
1206         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
1207 }
1208
1209 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1210 {
1211         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
1212 }
1213
1214 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1215 {
1216         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
1217 }
1218
1219 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1220 {
1221         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
1222 }
1223
1224 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1225 {
1226         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
1227 }
1228
1229 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1230 {
1231         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
1232 }
1233
1234 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1235 {
1236         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
1237 }
1238
1239 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1240 {
1241         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
1242 }
1243
1244 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
1245 {
1246         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT3, value, 9, 1);
1247 }
1248
1249 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
1250 {
1251         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT4, value, 16, 1);
1252 }
1253
1254 /** \} */
1255
1256
1257 /* -------------------------------------------------------------------- */
1258
1259 /** \name Passes (DRW_pass)
1260  * \{ */
1261
1262 DRWPass *DRW_pass_create(const char *name, DRWState state)
1263 {
1264         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1265         pass->state = state;
1266         BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1267
1268         pass->shgroups = NULL;
1269         pass->shgroups_last = NULL;
1270
1271         return pass;
1272 }
1273
1274 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1275 {
1276         pass->state = state;
1277 }
1278
1279 void DRW_pass_free(DRWPass *pass)
1280 {
1281         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1282                 DRW_shgroup_free(shgroup);
1283         }
1284
1285         pass->shgroups = NULL;
1286         pass->shgroups_last = NULL;
1287 }
1288
1289 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1290 {
1291         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1292                 callback(userData, shgroup);
1293         }
1294 }
1295
1296 typedef struct ZSortData {
1297         float *axis;
1298         float *origin;
1299 } ZSortData;
1300
1301 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1302 {
1303         const ZSortData *zsortdata = (ZSortData *)thunk;
1304         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1305         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1306
1307         const DRWCall *call_a;
1308         const DRWCall *call_b;
1309
1310         call_a = shgrp_a->calls_first;
1311         call_b = shgrp_b->calls_first;
1312
1313         if (call_a == NULL) return -1;
1314         if (call_b == NULL) return -1;
1315
1316         float tmp[3];
1317         sub_v3_v3v3(tmp, zsortdata->origin, call_a->obmat[3]);
1318         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1319         sub_v3_v3v3(tmp, zsortdata->origin, call_b->obmat[3]);
1320         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1321
1322         if      (a_sq < b_sq) return  1;
1323         else if (a_sq > b_sq) return -1;
1324         else {
1325                 /* If there is a depth prepass put it before */
1326                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1327                         return -1;
1328                 }
1329                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1330                         return  1;
1331                 }
1332                 else return  0;
1333         }
1334 }
1335
1336 /* ------------------ Shading group sorting --------------------- */
1337
1338 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1339
1340 #define SORT_IMPL_USE_THUNK
1341 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1342 #include "../../blenlib/intern/list_sort_impl.h"
1343 #undef SORT_IMPL_FUNC
1344 #undef SORT_IMPL_USE_THUNK
1345
1346 #undef SORT_IMPL_LINKTYPE
1347
1348 /**
1349  * Sort Shading groups by decreasing Z of their first draw call.
1350  * This is usefull for order dependant effect such as transparency.
1351  **/
1352 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1353 {
1354         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1355
1356         float (*viewinv)[4];
1357         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1358                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1359
1360         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1361
1362         if (pass->shgroups && pass->shgroups->next) {
1363                 pass->shgroups = shgroup_sort_fn_r(pass->shgroups, pass_shgroup_dist_sort, &zsortdata);
1364
1365                 /* Find the next last */
1366                 DRWShadingGroup *last = pass->shgroups;
1367                 while ((last = last->next)) {
1368                         /* Do nothing */
1369                 }
1370                 pass->shgroups_last = last;
1371         }
1372 }
1373
1374 /** \} */
1375
1376
1377 /* -------------------------------------------------------------------- */
1378
1379 /** \name Draw (DRW_draw)
1380  * \{ */
1381
1382 static void drw_state_set(DRWState state)
1383 {
1384         if (DST.state == state) {
1385                 return;
1386         }
1387
1388
1389 #define CHANGED_TO(f) \
1390         ((DST.state & (f)) ? \
1391                 ((state & (f)) ?  0 : -1) : \
1392                 ((state & (f)) ?  1 :  0))
1393
1394 #define CHANGED_ANY(f) \
1395         ((DST.state & (f)) != (state & (f)))
1396
1397 #define CHANGED_ANY_STORE_VAR(f, enabled) \
1398         ((DST.state & (f)) != (enabled = (state & (f))))
1399
1400         /* Depth Write */
1401         {
1402                 int test;
1403                 if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
1404                         if (test == 1) {
1405                                 glDepthMask(GL_TRUE);
1406                         }
1407                         else {
1408                                 glDepthMask(GL_FALSE);
1409                         }
1410                 }
1411         }
1412
1413         /* Color Write */
1414         {
1415                 int test;
1416                 if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
1417                         if (test == 1) {
1418                                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1419                         }
1420                         else {
1421                                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1422                         }
1423                 }
1424         }
1425
1426         /* Cull */
1427         {
1428                 DRWState test;
1429                 if (CHANGED_ANY_STORE_VAR(
1430                         DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
1431                         test))
1432                 {
1433                         if (test) {
1434                                 glEnable(GL_CULL_FACE);
1435
1436                                 if ((state & DRW_STATE_CULL_BACK) != 0) {
1437                                         glCullFace(GL_BACK);
1438                                 }
1439                                 else if ((state & DRW_STATE_CULL_FRONT) != 0) {
1440                                         glCullFace(GL_FRONT);
1441                                 }
1442                                 else {
1443                                         BLI_assert(0);
1444                                 }
1445                         }
1446                         else {
1447                                 glDisable(GL_CULL_FACE);
1448                         }
1449                 }
1450         }
1451
1452         /* Depth Test */
1453         {
1454                 DRWState test;
1455                 if (CHANGED_ANY_STORE_VAR(
1456                         DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
1457                         test))
1458                 {
1459                         if (test) {
1460                                 glEnable(GL_DEPTH_TEST);
1461
1462                                 if (state & DRW_STATE_DEPTH_LESS) {
1463                                         glDepthFunc(GL_LEQUAL);
1464                                 }
1465                                 else if (state & DRW_STATE_DEPTH_EQUAL) {
1466                                         glDepthFunc(GL_EQUAL);
1467                                 }
1468                                 else if (state & DRW_STATE_DEPTH_GREATER) {
1469                                         glDepthFunc(GL_GREATER);
1470                                 }
1471                                 else if (state & DRW_STATE_DEPTH_ALWAYS) {
1472                                         glDepthFunc(GL_ALWAYS);
1473                                 }
1474                                 else {
1475                                         BLI_assert(0);
1476                                 }
1477                         }
1478                         else {
1479                                 glDisable(GL_DEPTH_TEST);
1480                         }
1481                 }
1482         }
1483
1484         /* Wire Width */
1485         {
1486                 if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_LARGE)) {
1487                         if ((state & DRW_STATE_WIRE) != 0) {
1488                                 glLineWidth(1.0f);
1489                         }
1490                         else if ((state & DRW_STATE_WIRE_LARGE) != 0) {
1491                                 glLineWidth(UI_GetThemeValuef(TH_OUTLINE_WIDTH) * 2.0f);
1492                         }
1493                         else {
1494                                 /* do nothing */
1495                         }
1496                 }
1497         }
1498
1499         /* Points Size */
1500         {
1501                 int test;
1502                 if ((test = CHANGED_TO(DRW_STATE_POINT))) {
1503                         if (test == 1) {
1504                                 GPU_enable_program_point_size();
1505                                 glPointSize(5.0f);
1506                         }
1507                         else {
1508                                 GPU_disable_program_point_size();
1509                         }
1510                 }
1511         }
1512
1513         /* Blending (all buffer) */
1514         {
1515                 int test;
1516                 if (CHANGED_ANY_STORE_VAR(
1517                         DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION |
1518                         DRW_STATE_ADDITIVE_FULL,
1519                         test))
1520                 {
1521                         if (test) {
1522                                 glEnable(GL_BLEND);
1523
1524                                 if ((state & DRW_STATE_BLEND) != 0) {
1525                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
1526                                                             GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
1527                                 }
1528                                 else if ((state & DRW_STATE_MULTIPLY) != 0) {
1529                                         glBlendFunc(GL_DST_COLOR, GL_ZERO);
1530                                 }
1531                                 else if ((state & DRW_STATE_TRANSMISSION) != 0) {
1532                                         glBlendFunc(GL_ONE, GL_SRC_ALPHA);
1533                                 }
1534                                 else if ((state & DRW_STATE_ADDITIVE) != 0) {
1535                                         /* Do not let alpha accumulate but premult the source RGB by it. */
1536                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
1537                                                             GL_ZERO, GL_ONE); /* Alpha */
1538                                 }
1539                                 else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
1540                                         /* Let alpha accumulate. */
1541                                         glBlendFunc(GL_ONE, GL_ONE);
1542                                 }
1543                                 else {
1544                                         BLI_assert(0);
1545                                 }
1546                         }
1547                         else {
1548                                 glDisable(GL_BLEND);
1549                         }
1550                 }
1551         }
1552
1553         /* Clip Planes */
1554         {
1555                 int test;
1556                 if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
1557                         if (test == 1) {
1558                                 for (int i = 0; i < DST.num_clip_planes; ++i) {
1559                                         glEnable(GL_CLIP_DISTANCE0 + i);
1560                                 }
1561                         }
1562                         else {
1563                                 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
1564                                         glDisable(GL_CLIP_DISTANCE0 + i);
1565                                 }
1566                         }
1567                 }
1568         }
1569
1570         /* Line Stipple */
1571         {
1572                 int test;
1573                 if (CHANGED_ANY_STORE_VAR(
1574                         DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
1575                         test))
1576                 {
1577                         if (test) {
1578                                 if ((state & DRW_STATE_STIPPLE_2) != 0) {
1579                                         setlinestyle(2);
1580                                 }
1581                                 else if ((state & DRW_STATE_STIPPLE_3) != 0) {
1582                                         setlinestyle(3);
1583                                 }
1584                                 else if ((state & DRW_STATE_STIPPLE_4) != 0) {
1585                                         setlinestyle(4);
1586                                 }
1587                                 else {
1588                                         BLI_assert(0);
1589                                 }
1590                         }
1591                         else {
1592                                 setlinestyle(0);
1593                         }
1594                 }
1595         }
1596
1597         /* Stencil */
1598         {
1599                 DRWState test;
1600                 if (CHANGED_ANY_STORE_VAR(
1601                         DRW_STATE_WRITE_STENCIL |
1602                         DRW_STATE_STENCIL_EQUAL,
1603                         test))
1604                 {
1605                         if (test) {
1606                                 glEnable(GL_STENCIL_TEST);
1607
1608                                 /* Stencil Write */
1609                                 if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
1610                                         glStencilMask(0xFF);
1611                                         glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
1612                                 }
1613                                 /* Stencil Test */
1614                                 else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
1615                                         glStencilMask(0x00); /* disable write */
1616                                         DST.stencil_mask = 0;
1617                                 }
1618                                 else {
1619                                         BLI_assert(0);
1620                                 }
1621                         }
1622                         else {
1623                                 /* disable write & test */
1624                                 DST.stencil_mask = 0;
1625                                 glStencilMask(0x00);
1626                                 glStencilFunc(GL_ALWAYS, 1, 0xFF);
1627                                 glDisable(GL_STENCIL_TEST);
1628                         }
1629                 }
1630         }
1631
1632 #undef CHANGED_TO
1633 #undef CHANGED_ANY
1634 #undef CHANGED_ANY_STORE_VAR
1635
1636         DST.state = state;
1637 }
1638
1639 static void drw_stencil_set(unsigned int mask)
1640 {
1641         if (DST.stencil_mask != mask) {
1642                 /* Stencil Write */
1643                 if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
1644                         glStencilFunc(GL_ALWAYS, mask, 0xFF);
1645                         DST.stencil_mask = mask;
1646                 }
1647                 /* Stencil Test */
1648                 else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
1649                         glStencilFunc(GL_EQUAL, mask, 0xFF);
1650                         DST.stencil_mask = mask;
1651                 }
1652         }
1653 }
1654
1655 typedef struct DRWBoundTexture {
1656         struct DRWBoundTexture *next, *prev;
1657         GPUTexture *tex;
1658 } DRWBoundTexture;
1659
1660 static void draw_geometry_prepare(
1661         DRWShadingGroup *shgroup, const float (*obmat)[4], const float *texcoloc, const float *texcosize)
1662 {
1663         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1664         DRWInterface *interface = &shgroup->interface;
1665
1666         float mvp[4][4], mv[4][4], mi[4][4], mvi[4][4], pi[4][4], n[3][3], wn[3][3];
1667         float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
1668         float eye[3] = { 0.0f, 0.0f, 1.0f }; /* looking into the screen */
1669         float viewcamtexcofac[4] = { 1.0f, 1.0f, 0.0f, 0.0f };
1670
1671         if (rv3d != NULL) {
1672                 copy_v4_v4(viewcamtexcofac, rv3d->viewcamtexcofac);
1673         }
1674
1675         bool do_pi = (interface->projectioninverse != -1);
1676         bool do_mvp = (interface->modelviewprojection != -1);
1677         bool do_mi = (interface->modelinverse != -1);
1678         bool do_mv = (interface->modelview != -1);
1679         bool do_mvi = (interface->modelviewinverse != -1);
1680         bool do_n = (interface->normal != -1);
1681         bool do_wn = (interface->worldnormal != -1);
1682         bool do_eye = (interface->eye != -1);
1683         bool do_orco = (interface->orcotexfac != -1) && (texcoloc != NULL) && (texcosize != NULL);
1684
1685         /* Matrix override */
1686         float (*persmat)[4];
1687         float (*persinv)[4];
1688         float (*viewmat)[4];
1689         float (*viewinv)[4];
1690         float (*winmat)[4];
1691         float (*wininv)[4];
1692
1693         persmat = (viewport_matrix_override.override[DRW_MAT_PERS])
1694                   ? viewport_matrix_override.mat[DRW_MAT_PERS] : rv3d->persmat;
1695         persinv = (viewport_matrix_override.override[DRW_MAT_PERSINV])
1696                   ? viewport_matrix_override.mat[DRW_MAT_PERSINV] : rv3d->persinv;
1697         viewmat = (viewport_matrix_override.override[DRW_MAT_VIEW])
1698                   ? viewport_matrix_override.mat[DRW_MAT_VIEW] : rv3d->viewmat;
1699         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1700                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1701         winmat = (viewport_matrix_override.override[DRW_MAT_WIN])
1702                   ? viewport_matrix_override.mat[DRW_MAT_WIN] : rv3d->winmat;
1703         wininv = viewport_matrix_override.mat[DRW_MAT_WININV];
1704
1705         if (do_pi) {
1706                 if (!viewport_matrix_override.override[DRW_MAT_WININV]) {
1707                         invert_m4_m4(pi, winmat);
1708                         wininv = pi;
1709                 }
1710         }
1711         if (do_mi) {
1712                 invert_m4_m4(mi, obmat);
1713         }
1714         if (do_mvp) {
1715                 mul_m4_m4m4(mvp, persmat, obmat);
1716         }
1717         if (do_mv || do_mvi || do_n || do_eye) {
1718                 mul_m4_m4m4(mv, viewmat, obmat);
1719         }
1720         if (do_mvi) {
1721                 invert_m4_m4(mvi, mv);
1722         }
1723         if (do_n || do_eye) {
1724                 copy_m3_m4(n, mv);
1725                 invert_m3(n);
1726                 transpose_m3(n);
1727         }
1728         if (do_wn) {
1729                 copy_m3_m4(wn, obmat);
1730                 invert_m3(wn);
1731                 transpose_m3(wn);
1732         }
1733         if (do_eye) {
1734                 /* Used by orthographic wires */
1735                 float tmp[3][3];
1736                 invert_m3_m3(tmp, n);
1737                 /* set eye vector, transformed to object coords */
1738                 mul_m3_v3(tmp, eye);
1739         }
1740         if (do_orco) {
1741                 mul_v3_v3fl(orcofacs[1], texcosize, 2.0f);
1742                 invert_v3(orcofacs[1]);
1743                 sub_v3_v3v3(orcofacs[0], texcoloc, texcosize);
1744                 negate_v3(orcofacs[0]);
1745                 mul_v3_v3(orcofacs[0], orcofacs[1]); /* result in a nice MADD in the shader */
1746         }
1747
1748         /* Should be really simple */
1749         /* step 1 : bind object dependent matrices */
1750         /* TODO : Some of these are not object dependant.
1751          * They should be grouped inside a UBO updated once per redraw.
1752          * The rest can also go into a UBO to reduce API calls. */
1753         GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)obmat);
1754         GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)mi);
1755         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)mvp);
1756         GPU_shader_uniform_vector(shgroup->shader, interface->viewinverse, 16, 1, (float *)viewinv);
1757         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojection, 16, 1, (float *)persmat);
1758         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojectioninverse, 16, 1, (float *)persinv);
1759         GPU_shader_uniform_vector(shgroup->shader, interface->projection, 16, 1, (float *)winmat);
1760         GPU_shader_uniform_vector(shgroup->shader, interface->projectioninverse, 16, 1, (float *)wininv);
1761         GPU_shader_uniform_vector(shgroup->shader, interface->view, 16, 1, (float *)viewmat);
1762         GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)mv);
1763         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)mvi);
1764         GPU_shader_uniform_vector(shgroup->shader, interface->normal, 9, 1, (float *)n);
1765         GPU_shader_uniform_vector(shgroup->shader, interface->worldnormal, 9, 1, (float *)wn);
1766         GPU_shader_uniform_vector(shgroup->shader, interface->camtexfac, 4, 1, (float *)viewcamtexcofac);
1767         GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)orcofacs);
1768         GPU_shader_uniform_vector(shgroup->shader, interface->eye, 3, 1, (float *)eye);
1769         GPU_shader_uniform_vector(shgroup->shader, interface->clipplanes, 4, DST.num_clip_planes, (float *)DST.clip_planes_eq);
1770 }
1771
1772 static void draw_geometry_execute_ex(
1773         DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count)
1774 {
1775         /* Special case: empty drawcall, placement is done via shader, don't bind anything. */
1776         if (geom == NULL) {
1777                 BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
1778                 /* Shader is already bound. */
1779                 GWN_draw_primitive(GWN_PRIM_TRIS, count);
1780                 return;
1781         }
1782
1783         /* step 2 : bind vertex array & draw */
1784         GWN_batch_program_set(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
1785         if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
1786                 GWN_batch_draw_range_ex(geom, start, count, true);
1787         }
1788         else {
1789                 GWN_batch_draw_range(geom, start, count);
1790         }
1791         /* XXX this just tells gawain we are done with the shader.
1792          * This does not unbind the shader. */
1793         GWN_batch_program_unset(geom);
1794 }
1795
1796 static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
1797 {
1798         draw_geometry_execute_ex(shgroup, geom, 0, 0);
1799 }
1800
1801 static void draw_geometry(
1802         DRWShadingGroup *shgroup, Gwn_Batch *geom, const float (*obmat)[4], ID *ob_data,
1803         unsigned int start, unsigned int count)
1804 {
1805         float *texcoloc = NULL;
1806         float *texcosize = NULL;
1807
1808         if (ob_data != NULL) {
1809                 switch (GS(ob_data->name)) {
1810                         case ID_ME:
1811                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
1812                                 break;
1813                         case ID_CU:
1814                         {
1815                                 Curve *cu = (Curve *)ob_data;
1816                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
1817                                         BKE_curve_texspace_calc(cu);
1818                                 }
1819                                 texcoloc = cu->loc;
1820                                 texcosize = cu->size;
1821                                 break;
1822                         }
1823                         case ID_MB:
1824                         {
1825                                 MetaBall *mb = (MetaBall *)ob_data;
1826                                 texcoloc = mb->loc;
1827                                 texcosize = mb->size;
1828                                 break;
1829                         }
1830                         default:
1831                                 break;
1832                 }
1833         }
1834
1835         draw_geometry_prepare(shgroup, obmat, texcoloc, texcosize);
1836
1837         draw_geometry_execute_ex(shgroup, geom, start, count);
1838 }
1839
1840 static void bind_texture(GPUTexture *tex)
1841 {
1842         int bind_num = GPU_texture_bound_number(tex);
1843         if (bind_num == -1) {
1844                 for (int i = 0; i < GPU_max_textures(); ++i) {
1845                         RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures();
1846                         if (RST.bound_tex_slots[RST.bind_tex_inc] == false) {
1847                                 if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
1848                                         GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
1849                                 }
1850                                 GPU_texture_bind(tex, RST.bind_tex_inc);
1851                                 RST.bound_texs[RST.bind_tex_inc] = tex;
1852                                 RST.bound_tex_slots[RST.bind_tex_inc] = true;
1853                                 // printf("Binds Texture %d %p\n", RST.bind_tex_inc, tex);
1854                                 return;
1855                         }
1856                 }
1857
1858                 printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
1859         }
1860         RST.bound_tex_slots[bind_num] = true;
1861 }
1862
1863 static void bind_ubo(GPUUniformBuffer *ubo)
1864 {
1865         if (RST.bind_ubo_inc < GPU_max_ubo_binds()) {
1866                 GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
1867                 RST.bind_ubo_inc++;
1868         }
1869         else {
1870                 /* This is not depending on user input.
1871                  * It is our responsability to make sure there enough slots. */
1872                 BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
1873
1874                 /* printf so user can report bad behaviour */
1875                 printf("Not enough ubo slots! This should not happen!\n");
1876         }
1877 }
1878
1879 static void release_texture_slots(void)
1880 {
1881         memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
1882 }
1883
1884 static void release_ubo_slots(void)
1885 {
1886         RST.bind_ubo_inc = 0;
1887 }
1888
1889 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
1890 {
1891         BLI_assert(shgroup->shader);
1892
1893         DRWInterface *interface = &shgroup->interface;
1894         GPUTexture *tex;
1895         GPUUniformBuffer *ubo;
1896         int val;
1897         float fval;
1898
1899         if (DST.shader != shgroup->shader) {
1900                 if (DST.shader) GPU_shader_unbind();
1901                 GPU_shader_bind(shgroup->shader);
1902                 DST.shader = shgroup->shader;
1903         }
1904
1905         release_texture_slots();
1906         release_ubo_slots();
1907
1908         drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
1909         drw_stencil_set(shgroup->stencil_mask);
1910
1911         /* Binding Uniform */
1912         /* Don't check anything, Interface should already contain the least uniform as possible */
1913         for (DRWUniform *uni = interface->uniforms; uni; uni = uni->next) {
1914                 switch (uni->type) {
1915                         case DRW_UNIFORM_SHORT_TO_INT:
1916                                 val = (int)*((short *)uni->value);
1917                                 GPU_shader_uniform_vector_int(
1918                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
1919                                 break;
1920                         case DRW_UNIFORM_SHORT_TO_FLOAT:
1921                                 fval = (float)*((short *)uni->value);
1922                                 GPU_shader_uniform_vector(
1923                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
1924                                 break;
1925                         case DRW_UNIFORM_BOOL:
1926                         case DRW_UNIFORM_INT:
1927                                 GPU_shader_uniform_vector_int(
1928                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
1929                                 break;
1930                         case DRW_UNIFORM_FLOAT:
1931                         case DRW_UNIFORM_MAT3:
1932                         case DRW_UNIFORM_MAT4:
1933                                 GPU_shader_uniform_vector(
1934                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
1935                                 break;
1936                         case DRW_UNIFORM_TEXTURE:
1937                                 tex = (GPUTexture *)uni->value;
1938                                 BLI_assert(tex);
1939                                 bind_texture(tex);
1940                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1941                                 break;
1942                         case DRW_UNIFORM_BUFFER:
1943                                 if (!DRW_state_is_fbo()) {
1944                                         break;
1945                                 }
1946                                 tex = *((GPUTexture **)uni->value);
1947                                 BLI_assert(tex);
1948                                 bind_texture(tex);
1949                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1950                                 break;
1951                         case DRW_UNIFORM_BLOCK:
1952                                 ubo = (GPUUniformBuffer *)uni->value;
1953                                 bind_ubo(ubo);
1954                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
1955                                 break;
1956                 }
1957         }
1958
1959 #ifdef USE_GPU_SELECT
1960         /* use the first item because of selection we only ever add one */
1961 #  define GPU_SELECT_LOAD_IF_PICKSEL(_call) \
1962         if ((G.f & G_PICKSEL) && (_call)) { \
1963                 GPU_select_load_id((_call)->head.select_id); \
1964         } ((void)0)
1965
1966 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count)  \
1967         _start = 0;                                                      \
1968         _count = _shgroup->interface.instance_count;                     \
1969         int *select_id = NULL;                                           \
1970         if (G.f & G_PICKSEL) {                                           \
1971                 if (_shgroup->interface.override_selectid == -1) {                        \
1972                         select_id = DRW_instance_data_get(_shgroup->interface.inst_selectid); \
1973                         switch (_shgroup->type) {                                             \
1974                                 case DRW_SHG_TRIANGLE_BATCH: _count = 3; break;                   \
1975                                 case DRW_SHG_LINE_BATCH: _count = 2; break;                       \
1976                                 default: _count = 1; break;                                       \
1977                         }                                                                     \
1978                 }                                                                         \
1979                 else {                                                                    \
1980                         GPU_select_load_id(_shgroup->interface.override_selectid);            \
1981                 }                                                                         \
1982         }                                                                \
1983         while (_start < _shgroup->interface.instance_count) {            \
1984                 if (select_id) {                                             \
1985                         GPU_select_load_id(select_id[_start]);                   \
1986                 }
1987
1988 # define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
1989                 _start += _count;                                    \
1990         }
1991
1992 #else
1993 #  define GPU_SELECT_LOAD_IF_PICKSEL(call)
1994 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
1995 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
1996         _start = 0;                                                     \
1997         _count = _shgroup->interface.instance_count;
1998
1999 #endif
2000
2001         /* Rendering Calls */
2002         if (!ELEM(shgroup->type, DRW_SHG_NORMAL)) {
2003                 /* Replacing multiple calls with only one */
2004                 float obmat[4][4];
2005                 unit_m4(obmat);
2006
2007                 if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
2008                         if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
2009                                 if (shgroup->instancing_geom != NULL) {
2010                                         GPU_SELECT_LOAD_IF_PICKSEL((DRWCall *)shgroup->calls_first);
2011                                         draw_geometry(shgroup, shgroup->instancing_geom, obmat, shgroup->instance_data, 0, 0);
2012                                 }
2013                         }
2014                         else {
2015                                 if (shgroup->interface.instance_count > 0) {
2016                                         unsigned int count, start;
2017                                         GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2018                                         {
2019                                                 draw_geometry(shgroup,
2020                                                               (shgroup->instancing_geom) ? shgroup->instancing_geom : shgroup->instance_geom,
2021                                                               obmat, shgroup->instance_data, start, count);
2022                                         }
2023                                         GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2024                                 }
2025                         }
2026                 }
2027                 else { /* DRW_SHG_***_BATCH */
2028                         /* Some dynamic batch can have no geom (no call to aggregate) */
2029                         if (shgroup->interface.instance_count > 0) {
2030                                 unsigned int count, start;
2031                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2032                                 {
2033                                         draw_geometry(shgroup, shgroup->batch_geom, obmat, NULL, start, count);
2034                                 }
2035                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2036                         }
2037                 }
2038         }
2039         else {
2040                 for (DRWCall *call = shgroup->calls_first; call; call = call->head.prev) {
2041                         bool neg_scale = is_negative_m4(call->obmat);
2042
2043                         /* Negative scale objects */
2044                         if (neg_scale) {
2045                                 glFrontFace(DST.backface);
2046                         }
2047
2048                         GPU_SELECT_LOAD_IF_PICKSEL(call);
2049
2050                         if (call->head.type == DRW_CALL_SINGLE) {
2051                                 draw_geometry(shgroup, call->geometry, call->obmat, call->ob_data, 0, 0);
2052                         }
2053                         else {
2054                                 BLI_assert(call->head.type == DRW_CALL_GENERATE);
2055                                 DRWCallGenerate *callgen = ((DRWCallGenerate *)call);
2056                                 draw_geometry_prepare(shgroup, callgen->obmat, NULL, NULL);
2057                                 callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data);
2058                         }
2059
2060                         /* Reset state */
2061                         if (neg_scale) {
2062                                 glFrontFace(DST.frontface);
2063                         }
2064                 }
2065         }
2066
2067         /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
2068         DRW_state_reset();
2069 }
2070
2071 static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2072 {
2073         /* Start fresh */
2074         DST.shader = NULL;
2075
2076         BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing");
2077
2078         drw_state_set(pass->state);
2079
2080         DRW_stats_query_start(pass->name);
2081
2082         for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
2083                 draw_shgroup(shgroup, pass->state);
2084                 /* break if upper limit */
2085                 if (shgroup == end_group) {
2086                         break;
2087                 }
2088         }
2089
2090         /* Clear Bound textures */
2091         for (int i = 0; i < GPU_max_textures(); i++) {
2092                 if (RST.bound_texs[i] != NULL) {
2093                         GPU_texture_unbind(RST.bound_texs[i]);
2094                         RST.bound_texs[i] = NULL;
2095                 }
2096         }
2097
2098         if (DST.shader) {
2099                 GPU_shader_unbind();
2100                 DST.shader = NULL;
2101         }
2102
2103         DRW_stats_query_end();
2104 }
2105
2106 void DRW_draw_pass(DRWPass *pass)
2107 {
2108         drw_draw_pass_ex(pass, pass->shgroups, pass->shgroups_last);
2109 }
2110
2111 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
2112 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2113 {
2114         drw_draw_pass_ex(pass, start_group, end_group);
2115 }
2116
2117 void DRW_draw_callbacks_pre_scene(void)
2118 {
2119         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2120
2121         gpuLoadProjectionMatrix(rv3d->winmat);
2122         gpuLoadMatrix(rv3d->viewmat);
2123 }
2124
2125 void DRW_draw_callbacks_post_scene(void)
2126 {
2127         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2128
2129         gpuLoadProjectionMatrix(rv3d->winmat);
2130         gpuLoadMatrix(rv3d->viewmat);
2131 }
2132
2133 /* Reset state to not interfer with other UI drawcall */
2134 void DRW_state_reset_ex(DRWState state)
2135 {
2136         DST.state = ~state;
2137         drw_state_set(state);
2138 }
2139
2140 void DRW_state_reset(void)
2141 {
2142         /* Reset blending function */
2143         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
2144
2145         DRW_state_reset_ex(DRW_STATE_DEFAULT);
2146 }
2147
2148 /* NOTE : Make sure to reset after use! */
2149 void DRW_state_invert_facing(void)
2150 {
2151         SWAP(GLenum, DST.backface, DST.frontface);
2152         glFrontFace(DST.frontface);
2153 }
2154
2155 /**
2156  * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
2157  * and if the shaders have support for it (see usage of gl_ClipDistance).
2158  * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
2159  **/
2160 void DRW_state_clip_planes_add(float plane_eq[4])
2161 {
2162         copy_v4_v4(DST.clip_planes_eq[DST.num_clip_planes++], plane_eq);
2163 }
2164
2165 void DRW_state_clip_planes_reset(void)
2166 {
2167         DST.num_clip_planes = 0;
2168 }
2169
2170 /** \} */
2171
2172
2173 struct DRWTextStore *DRW_text_cache_ensure(void)
2174 {
2175         BLI_assert(DST.text_store_p);
2176         if (*DST.text_store_p == NULL) {
2177                 *DST.text_store_p = DRW_text_cache_create();
2178         }
2179         return *DST.text_store_p;
2180 }
2181
2182
2183 /* -------------------------------------------------------------------- */
2184
2185 /** \name Settings
2186  * \{ */
2187
2188 bool DRW_object_is_renderable(Object *ob)
2189 {
2190         BLI_assert(BKE_object_is_visible(ob, OB_VISIBILITY_CHECK_UNKNOWN_RENDER_MODE));
2191
2192         if (ob->type == OB_MESH) {
2193                 if (ob == DST.draw_ctx.object_edit) {
2194                         IDProperty *props = BKE_layer_collection_engine_evaluated_get(ob, COLLECTION_MODE_EDIT, "");
2195                         bool do_show_occlude_wire = BKE_collection_engine_property_value_get_bool(props, "show_occlude_wire");
2196                         if (do_show_occlude_wire) {
2197                                 return false;
2198                         }
2199                         bool do_show_weight = BKE_collection_engine_property_value_get_bool(props, "show_weight");
2200                         if (do_show_weight) {
2201                                 return false;
2202                         }
2203                 }
2204         }
2205
2206         return true;
2207 }
2208
2209 /**
2210  * Return whether this object is visible depending if
2211  * we are rendering or drawing in the viewport.
2212  */
2213 bool DRW_check_object_visible_within_active_context(Object *ob)
2214 {
2215         const eObjectVisibilityCheck mode = DRW_state_is_scene_render() ?
2216                                              OB_VISIBILITY_CHECK_FOR_RENDER :
2217                                              OB_VISIBILITY_CHECK_FOR_VIEWPORT;
2218         return BKE_object_is_visible(ob, mode);
2219 }
2220
2221 bool DRW_object_is_flat_normal(const Object *ob)
2222 {
2223         if (ob->type == OB_MESH) {
2224                 const Mesh *me = ob->data;
2225                 if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
2226                         return false;
2227                 }
2228         }
2229         return true;
2230 }
2231
2232 /**
2233  * Return true if the object has its own draw mode.
2234  * Caller must check this is active */
2235 int DRW_object_is_mode_shade(const Object *ob)
2236 {
2237         BLI_assert(ob == DST.draw_ctx.obact);
2238         if ((DST.draw_ctx.object_mode & OB_MODE_EDIT) == 0) {
2239                 if (DST.draw_ctx.object_mode & (OB_MODE_VERTEX_PAINT | OB_MODE_WEIGHT_PAINT | OB_MODE_TEXTURE_PAINT)) {
2240                         if ((DST.draw_ctx.v3d->flag2 & V3D_SHOW_MODE_SHADE_OVERRIDE) == 0) {
2241                                 return true;
2242                         }
2243                         else {
2244                                 return false;
2245                         }
2246                 }
2247         }
2248         return -1;
2249 }
2250
2251 /** \} */
2252
2253
2254 /* -------------------------------------------------------------------- */
2255
2256 /** \name Framebuffers (DRW_framebuffer)
2257  * \{ */
2258
2259 static GPUTextureFormat convert_tex_format(
2260         int fbo_format,
2261         int *r_channels, bool *r_is_depth)
2262 {
2263         *r_is_depth = ELEM(fbo_format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
2264
2265         switch (fbo_format) {
2266                 case DRW_TEX_R_16:     *r_channels = 1; return GPU_R16F;
2267                 case DRW_TEX_R_32:     *r_channels = 1; return GPU_R32F;
2268                 case DRW_TEX_RG_8:     *r_channels = 2; return GPU_RG8;
2269                 case DRW_TEX_RG_16:    *r_channels = 2; return GPU_RG16F;
2270                 case DRW_TEX_RG_16I:   *r_channels = 2; return GPU_RG16I;
2271                 case DRW_TEX_RG_32:    *r_channels = 2; return GPU_RG32F;
2272                 case DRW_TEX_RGBA_8:   *r_channels = 4; return GPU_RGBA8;
2273                 case DRW_TEX_RGBA_16:  *r_channels = 4; return GPU_RGBA16F;
2274                 case DRW_TEX_RGBA_32:  *r_channels = 4; return GPU_RGBA32F;
2275                 case DRW_TEX_DEPTH_16: *r_channels = 1; return GPU_DEPTH_COMPONENT16;
2276                 case DRW_TEX_DEPTH_24: *r_channels = 1; return GPU_DEPTH_COMPONENT24;
2277                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_channels = 1; return GPU_DEPTH24_STENCIL8;
2278                 case DRW_TEX_DEPTH_32: *r_channels = 1; return GPU_DEPTH_COMPONENT32F;
2279                 case DRW_TEX_RGB_11_11_10: *r_channels = 3; return GPU_R11F_G11F_B10F;
2280                 default:
2281                         BLI_assert(false && "Texture format unsupported as render target!");
2282                         *r_channels = 4; return GPU_RGBA8;
2283         }
2284 }
2285
2286 struct GPUFrameBuffer *DRW_framebuffer_create(void)
2287 {
2288         return GPU_framebuffer_create();
2289 }
2290
2291 void DRW_framebuffer_init(
2292         struct GPUFrameBuffer **fb, void *engine_type, int width, int height,
2293         DRWFboTexture textures[MAX_FBO_TEX], int textures_len)
2294 {
2295         BLI_assert(textures_len <= MAX_FBO_TEX);
2296         BLI_assert(width > 0 && height > 0);
2297
2298         bool create_fb = false;
2299         int color_attachment = -1;
2300
2301         if (!*fb) {
2302                 *fb = GPU_framebuffer_create();
2303                 create_fb = true;
2304         }
2305
2306         for (int i = 0; i < textures_len; ++i) {
2307                 int channels;
2308                 bool is_depth;
2309                 bool create_tex = false;
2310
2311                 DRWFboTexture fbotex = textures[i];
2312                 bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0;
2313
2314                 GPUTextureFormat gpu_format = convert_tex_format(fbotex.format, &channels, &is_depth);
2315
2316                 if (!*fbotex.tex || is_temp) {
2317                         /* Temp textures need to be queried each frame, others not. */
2318                         if (is_temp) {
2319                                 *fbotex.tex = GPU_viewport_texture_pool_query(
2320                                         DST.viewport, engine_type, width, height, channels, gpu_format);
2321                         }
2322                         else {
2323                                 *fbotex.tex = GPU_texture_create_2D_custom(
2324                                         width, height, channels, gpu_format, NULL, NULL);
2325                                 create_tex = true;
2326                         }
2327                 }
2328
2329                 if (!is_depth) {
2330                         ++color_attachment;
2331                 }
2332
2333                 if (create_fb || create_tex) {
2334                         drw_texture_set_parameters(*fbotex.tex, fbotex.flag);
2335                         GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0);
2336                 }
2337         }
2338
2339         if (create_fb && (textures_len > 0)) {
2340                 if (!GPU_framebuffer_check_valid(*fb, NULL)) {
2341                         printf("Error invalid framebuffer\n");
2342                 }
2343
2344                 /* Detach temp textures */
2345                 for (int i = 0; i < textures_len; ++i) {
2346                         DRWFboTexture fbotex = textures[i];
2347
2348                         if ((fbotex.flag & DRW_TEX_TEMP) != 0) {
2349                                 GPU_framebuffer_texture_detach(*fbotex.tex);
2350                         }
2351                 }
2352
2353                 if (DST.default_framebuffer != NULL) {
2354                         GPU_framebuffer_bind(DST.default_framebuffer);
2355                 }
2356         }
2357 }
2358
2359 void DRW_framebuffer_free(struct GPUFrameBuffer *fb)
2360 {
2361         GPU_framebuffer_free(fb);
2362 }
2363
2364 void DRW_framebuffer_bind(struct GPUFrameBuffer *fb)
2365 {
2366         GPU_framebuffer_bind(fb);
2367 }
2368
2369 void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth)
2370 {
2371         if (color) {
2372                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
2373                 glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
2374         }
2375         if (depth) {
2376                 glDepthMask(GL_TRUE);
2377                 glClearDepth(clear_depth);
2378         }
2379         if (stencil) {
2380                 glStencilMask(0xFF);
2381         }
2382         glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) |
2383                 ((depth) ? GL_DEPTH_BUFFER_BIT : 0) |
2384                 ((stencil) ? GL_STENCIL_BUFFER_BIT : 0));
2385 }
2386
2387 void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data)
2388 {
2389         GLenum type;
2390         switch (channels) {
2391                 case 1: type = GL_RED; break;
2392                 case 2: type = GL_RG; break;
2393                 case 3: type = GL_RGB; break;
2394                 case 4: type = GL_RGBA; break;
2395                 default:
2396                         BLI_assert(false && "wrong number of read channels");
2397                         return;
2398         }
2399         glReadBuffer(GL_COLOR_ATTACHMENT0 + slot);
2400         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2401 }
2402
2403 void DRW_framebuffer_read_depth(int x, int y, int w, int h, float *data)
2404 {
2405         GLenum type = GL_DEPTH_COMPONENT;
2406
2407         glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */
2408         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2409 }
2410
2411 void DRW_framebuffer_texture_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip)
2412 {
2413         GPU_framebuffer_texture_attach(fb, tex, slot, mip);
2414 }
2415
2416 void DRW_framebuffer_texture_layer_attach(struct GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip)
2417 {
2418         GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip);
2419 }
2420
2421 void DRW_framebuffer_cubeface_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip)
2422 {
2423         GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip);
2424 }
2425
2426 void DRW_framebuffer_texture_detach(GPUTexture *tex)
2427 {
2428         GPU_framebuffer_texture_detach(tex);
2429 }
2430
2431 void DRW_framebuffer_blit(struct GPUFrameBuffer *fb_read, struct GPUFrameBuffer *fb_write, bool depth, bool stencil)
2432 {
2433         GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil);
2434 }
2435
2436 void DRW_framebuffer_recursive_downsample(
2437         struct GPUFrameBuffer *fb, struct GPUTexture *tex, int num_iter,
2438         void (*callback)(void *userData, int level), void *userData)
2439 {
2440         GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData);
2441 }
2442
2443 void DRW_framebuffer_viewport_size(struct GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h)
2444 {
2445         glViewport(x, y, w, h);
2446 }
2447
2448 /* Use color management profile to draw texture to framebuffer */
2449 void DRW_transform_to_display(GPUTexture *tex)
2450 {
2451         drw_state_set(DRW_STATE_WRITE_COLOR);
2452
2453         Gwn_VertFormat *vert_format = immVertexFormat();
2454         unsigned int pos = GWN_vertformat_attr_add(vert_format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2455         unsigned int texco = GWN_vertformat_attr_add(vert_format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2456
2457         const float dither = 1.0f;
2458
2459         bool use_ocio = false;
2460
2461         /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2462         if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
2463                 Scene *scene = DST.draw_ctx.scene;
2464                 use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
2465                         &scene->view_settings, &scene->display_settings, NULL, dither, false);
2466         }
2467
2468         if (!use_ocio) {
2469                 /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2470                 if (DST.options.is_image_render && !DST.options.is_scene_render) {
2471                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
2472                         immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
2473                 }
2474                 else {
2475                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
2476                 }
2477                 immUniform1i("image", 0);
2478         }
2479
2480         GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
2481
2482         float mat[4][4];
2483         unit_m4(mat);
2484         immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
2485
2486         /* Full screen triangle */
2487         immBegin(GWN_PRIM_TRIS, 3);
2488         immAttrib2f(texco, 0.0f, 0.0f);
2489         immVertex2f(pos, -1.0f, -1.0f);
2490
2491         immAttrib2f(texco, 2.0f, 0.0f);
2492         immVertex2f(pos, 3.0f, -1.0f);
2493
2494         immAttrib2f(texco, 0.0f, 2.0f);
2495         immVertex2f(pos, -1.0f, 3.0f);
2496         immEnd();
2497
2498         GPU_texture_unbind(tex);
2499
2500         if (use_ocio) {
2501                 IMB_colormanagement_finish_glsl_draw();
2502         }
2503         else {
2504                 immUnbindProgram();
2505         }
2506 }
2507
2508 /** \} */
2509
2510
2511 /* -------------------------------------------------------------------- */
2512
2513 /** \name Viewport (DRW_viewport)
2514  * \{ */
2515
2516 static void *DRW_viewport_engine_data_ensure(void *engine_type)
2517 {
2518         void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
2519
2520         if (data == NULL) {
2521                 data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
2522         }
2523         return data;
2524 }
2525
2526 void DRW_engine_viewport_data_size_get(
2527         const void *engine_type_v,
2528         int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
2529 {
2530         const DrawEngineType *engine_type = engine_type_v;
2531
2532         if (r_fbl_len) {
2533                 *r_fbl_len = engine_type->vedata_size->fbl_len;
2534         }
2535         if (r_txl_len) {
2536                 *r_txl_len = engine_type->vedata_size->txl_len;
2537         }
2538         if (r_psl_len) {
2539                 *r_psl_len = engine_type->vedata_size->psl_len;
2540         }
2541         if (r_stl_len) {
2542                 *r_stl_len = engine_type->vedata_size->stl_len;
2543         }
2544 }
2545
2546 const float *DRW_viewport_size_get(void)
2547 {
2548         return &DST.size[0];
2549 }
2550
2551 const float *DRW_viewport_screenvecs_get(void)
2552 {
2553         return &DST.screenvecs[0][0];
2554 }
2555
2556 const float *DRW_viewport_pixelsize_get(void)
2557 {
2558         return &DST.pixsize;
2559 }
2560
2561 static void drw_viewport_cache_resize(void)
2562 {
2563         /* Release the memiter before clearing the mempools that references them */
2564         GPU_viewport_cache_release(DST.viewport);
2565
2566         if (DST.vmempool != NULL) {
2567                 BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_len(DST.vmempool->calls));
2568                 BLI_mempool_clear_ex(DST.vmempool->calls_generate, BLI_mempool_len(DST.vmempool->calls_generate));
2569                 BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_len(DST.vmempool->shgroups));
2570                 BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_len(DST.vmempool->uniforms));
2571                 BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_len(DST.vmempool->passes));
2572         }
2573
2574         DRW_instance_data_list_free_unused(DST.idatalist);
2575         DRW_instance_data_list_resize(DST.idatalist);
2576 }
2577
2578
2579 /* Not a viewport variable, we could split this out. */
2580 static void drw_context_state_init(void)
2581 {
2582         /* Edit object. */
2583         if (DST.draw_ctx.object_mode & OB_MODE_EDIT) {
2584                 DST.draw_ctx.object_edit = DST.draw_ctx.obact;
2585         }
2586         else {
2587                 DST.draw_ctx.object_edit = NULL;
2588         }
2589
2590         /* Pose object. */
2591         if (DST.draw_ctx.object_mode & OB_MODE_POSE) {
2592                 DST.draw_ctx.object_pose = DST.draw_ctx.obact;
2593         }
2594         else if (DST.draw_ctx.object_mode & OB_MODE_WEIGHT_PAINT) {
2595                 DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
2596         }
2597         else {
2598                 DST.draw_ctx.object_pose = NULL;
2599         }
2600 }
2601
2602 /* It also stores viewport variable to an immutable place: DST
2603  * This is because a cache uniform only store reference
2604  * to its value. And we don't want to invalidate the cache
2605  * if this value change per viewport */
2606 static void drw_viewport_var_init(void)
2607 {
2608         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2609         /* Refresh DST.size */
2610         if (DST.viewport) {
2611                 int size[2];
2612                 GPU_viewport_size_get(DST.viewport, size);
2613                 DST.size[0] = size[0];
2614                 DST.size[1] = size[1];
2615
2616                 DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
2617                 DST.default_framebuffer = fbl->default_fb;
2618
2619                 DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
2620
2621                 if (DST.vmempool->calls == NULL) {
2622                         DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
2623                 }
2624                 if (DST.vmempool->calls_generate == NULL) {
2625                         DST.vmempool->calls_generate = BLI_mempool_create(sizeof(DRWCallGenerate), 0, 512, 0);
2626                 }
2627                 if (DST.vmempool->shgroups == NULL) {
2628                         DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
2629                 }
2630                 if (DST.vmempool->uniforms == NULL) {
2631                         DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
2632                 }
2633                 if (DST.vmempool->passes == NULL) {
2634                         DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
2635                 }
2636
2637                 DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
2638                 DRW_instance_data_list_reset(DST.idatalist);
2639         }
2640         else {
2641                 DST.size[0] = 0;
2642                 DST.size[1] = 0;
2643
2644                 DST.default_framebuffer = NULL;
2645                 DST.vmempool = NULL;
2646         }
2647
2648         if (rv3d != NULL) {
2649                 /* Refresh DST.screenvecs */
2650                 copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
2651                 copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
2652                 normalize_v3(DST.screenvecs[0]);
2653                 normalize_v3(DST.screenvecs[1]);
2654
2655                 /* Refresh DST.pixelsize */
2656                 DST.pixsize = rv3d->pixsize;
2657         }
2658
2659         /* Reset facing */
2660         DST.frontface = GL_CCW;
2661         DST.backface = GL_CW;
2662         glFrontFace(DST.frontface);
2663
2664         if (DST.draw_ctx.object_edit) {
2665                 ED_view3d_init_mats_rv3d(DST.draw_ctx.object_edit, rv3d);
2666         }
2667
2668         /* Alloc array of texture reference. */
2669         if (RST.bound_texs == NULL) {
2670                 RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
2671         }
2672         if (RST.bound_tex_slots == NULL) {
2673                 RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots");
2674         }
2675
2676         memset(viewport_matrix_override.override, 0x0, sizeof(viewport_matrix_override.override));
2677         memset(DST.common_instance_data, 0x0, sizeof(DST.common_instance_data));
2678 }
2679
2680 void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
2681 {
2682         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2683         BLI_assert(type >= DRW_MAT_PERS && type <= DRW_MAT_WININV);
2684
2685         if (viewport_matrix_override.override[type]) {
2686                 copy_m4_m4(mat, viewport_matrix_override.mat[type]);
2687         }
2688         else {
2689                 BLI_assert(rv3d != NULL); /* Can't use this in render mode. */
2690                 switch (type) {
2691                         case DRW_MAT_PERS:
2692                                 copy_m4_m4(mat, rv3d->persmat);
2693                                 break;
2694                         case DRW_MAT_PERSINV:
2695                                 copy_m4_m4(mat, rv3d->persinv);
2696                                 break;
2697                         case DRW_MAT_VIEW:
2698                                 copy_m4_m4(mat, rv3d->viewmat);
2699                                 break;
2700                         case DRW_MAT_VIEWINV:
2701                                 copy_m4_m4(mat, rv3d->viewinv);
2702                                 break;
2703                         case DRW_MAT_WIN:
2704                                 copy_m4_m4(mat, rv3d->winmat);
2705                                 break;
2706                         case DRW_MAT_WININV:
2707                                 invert_m4_m4(mat, rv3d->winmat);
2708                                 break;
2709                         default:
2710                                 BLI_assert(!"Matrix type invalid");
2711                                 break;
2712                 }
2713         }
2714 }
2715
2716 void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type)
2717 {
2718         copy_m4_m4(viewport_matrix_override.mat[type], mat);
2719         viewport_matrix_override.override[type] = true;
2720 }
2721
2722 void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
2723 {
2724         viewport_matrix_override.override[type] = false;
2725 }
2726
2727 bool DRW_viewport_is_persp_get(void)
2728 {
2729         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2730         if (rv3d) {
2731                 return rv3d->is_persp;
2732         }
2733         else {
2734                 if (viewport_matrix_override.override[DRW_MAT_WIN]) {
2735                         return viewport_matrix_override.mat[DRW_MAT_WIN][3][3] == 0.0f;
2736                 }
2737         }
2738         BLI_assert(0);
2739         return false;
2740 }
2741
2742 DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
2743 {
2744         return GPU_viewport_framebuffer_list_get(DST.viewport);
2745 }
2746
2747 DefaultTextureList *DRW_viewport_texture_list_get(void)
2748 {
2749         return GPU_viewport_texture_list_get(DST.viewport);
2750 }
2751
2752 void DRW_viewport_request_redraw(void)
2753 {
2754         GPU_viewport_tag_update(DST.viewport);
2755 }
2756
2757 /** \} */
2758
2759
2760 /* -------------------------------------------------------------------- */
2761 /** \name ViewLayers (DRW_scenelayer)
2762  * \{ */
2763
2764 void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
2765 {
2766         for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2767                 if (sled->engine_type == engine_type) {
2768                         return sled->storage;
2769                 }
2770         }
2771         return NULL;
2772 }
2773
2774 void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
2775 {
2776         ViewLayerEngineData *sled;
2777
2778         for (sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2779                 if (sled->engine_type == engine_type) {
2780                         return &sled->storage;
2781                 }
2782         }
2783
2784         sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
2785         sled->engine_type = engine_type;
2786         sled->free = callback;
2787         BLI_addtail(&DST.draw_ctx.view_layer->drawdata, sled);
2788
2789         return &sled->storage;
2790 }
2791
2792 /** \} */
2793
2794
2795 /* -------------------------------------------------------------------- */
2796
2797 /** \name Objects (DRW_object)
2798  * \{ */
2799
2800 ObjectEngineData *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type)
2801 {
2802         for (ObjectEngineData *oed = ob->drawdata.first; oed; oed = oed->next) {
2803                 if (oed->engine_type == engine_type) {
2804                         return oed;
2805                 }
2806         }
2807         return NULL;
2808 }
2809
2810 ObjectEngineData *DRW_object_engine_data_ensure(
2811         Object *ob,
2812         DrawEngineType *engine_type,
2813         size_t size,
2814         ObjectEngineDataInitCb init_cb,
2815         ObjectEngineDataFreeCb free_cb)
2816 {
2817         BLI_assert(size >= sizeof(ObjectEngineData));
2818         /* Try to re-use existing data. */
2819         ObjectEngineData *oed = DRW_object_engine_data_get(ob, engine_type);
2820         if (oed != NULL) {
2821                 return oed;
2822         }
2823         /* Allocate new data. */
2824         if ((ob->base_flag & BASE_FROMDUPLI) != 0) {
2825                 /* NOTE: data is not persistent in this case. It is reset each redraw. */
2826                 BLI_assert(free_cb == NULL); /* No callback allowed. */
2827                 /* Round to sizeof(float) for DRW_instance_data_request(). */
2828                 const size_t t = sizeof(float) - 1;
2829                 size = (size + t) & ~t;
2830                 size_t fsize = size / sizeof(float);
2831                 if (DST.common_instance_data[fsize] == NULL) {
2832                         DST.common_instance_data[fsize] = DRW_instance_data_request(DST.idatalist, fsize, 16);
2833                 }
2834                 oed = (ObjectEngineData *)DRW_instance_data_next(DST.common_instance_data[fsize]);
2835                 memset(oed, 0, size);
2836         }
2837         else {
2838                 oed = MEM_callocN(size, "ObjectEngineData");
2839         }
2840         oed->engine_type = engine_type;
2841         oed->free = free_cb;
2842         /* Perform user-side initialization, if needed. */
2843         if (init_cb != NULL) {
2844                 init_cb(oed);
2845         }
2846         /* Register in the list. */
2847         BLI_addtail(&ob->drawdata, oed);
2848         return oed;
2849 }
2850
2851 /* XXX There is definitly some overlap between this and DRW_object_engine_data_ensure.
2852  * We should get rid of one of the two. */
2853 LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, RenderEngineType *engine_type)
2854 {
2855         BLI_assert(ob->type == OB_LAMP);
2856
2857         Scene *scene = DST.draw_ctx.scene;
2858
2859         /* TODO Dupliobjects */
2860         /* TODO Should be per scenelayer */
2861         return GPU_lamp_engine_data_get(scene, ob, NULL, engine_type);
2862 }
2863
2864 void DRW_lamp_engine_data_free(LampEngineData *led)
2865 {
2866         GPU_lamp_engine_data_free(led);
2867 }
2868
2869 /** \} */
2870
2871
2872 /* -------------------------------------------------------------------- */
2873
2874 /** \name Rendering (DRW_engines)
2875  * \{ */
2876
2877 static void drw_engines_init(void)
2878 {
2879         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2880                 DrawEngineType *engine = link->data;
2881                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2882                 PROFILE_START(stime);
2883
2884                 if (engine->engine_init) {
2885                         engine->engine_init(data);
2886                 }
2887
2888                 PROFILE_END_UPDATE(data->init_time, stime);
2889         }
2890 }
2891
2892 static void drw_engines_cache_init(void)
2893 {
2894         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2895                 DrawEngineType *engine = link->data;
2896                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2897
2898                 if (data->text_draw_cache) {
2899                         DRW_text_cache_destroy(data->text_draw_cache);
2900                         data->text_draw_cache = NULL;
2901                 }
2902                 if (DST.text_store_p == NULL) {
2903                         DST.text_store_p = &data->text_draw_cache;
2904                 }
2905
2906                 if (engine->cache_init) {
2907                         engine->cache_init(data);
2908                 }
2909         }
2910 }
2911
2912 static void drw_engines_cache_populate(Object *ob)
2913 {
2914         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2915                 DrawEngineType *engine = link->data;
2916                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2917
2918                 if (engine->id_update) {
2919                         engine->id_update(data, &ob->id);
2920                 }
2921
2922                 if (engine->cache_populate) {
2923                         engine->cache_populate(data, ob);
2924                 }
2925         }
2926 }
2927
2928 static void drw_engines_cache_finish(void)
2929 {
2930         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2931                 DrawEngineType *engine = link->data;
2932                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2933
2934                 if (engine->cache_finish) {
2935                         engine->cache_finish(data);
2936                 }
2937         }
2938 }
2939
2940 static void drw_engines_draw_background(void)
2941 {
2942         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2943                 DrawEngineType *engine = link->data;
2944                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2945
2946                 if (engine->draw_background) {
2947                         PROFILE_START(stime);
2948
2949                         DRW_stats_group_start(engine->idname);
2950                         engine->draw_background(data);
2951                         DRW_stats_group_end();
2952
2953                         PROFILE_END_UPDATE(data->background_time, stime);
2954                         return;
2955                 }
2956         }
2957
2958         /* No draw_background found, doing default background */
2959         if (DRW_state_draw_background()) {
2960                 DRW_draw_background();
2961         }
2962 }
2963
2964 static void drw_engines_draw_scene(void)
2965 {
2966         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2967                 DrawEngineType *engine = link->data;
2968                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2969                 PROFILE_START(stime);
2970
2971                 if (engine->draw_scene) {
2972                         DRW_stats_group_start(engine->idname);
2973                         engine->draw_scene(data);
2974                         DRW_stats_group_end();
2975                 }
2976
2977                 PROFILE_END_UPDATE(data->render_time, stime);
2978         }
2979 }
2980
2981 static void drw_engines_draw_text(void)
2982 {
2983         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2984                 DrawEngineType *engine = link->data;
2985                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2986                 PROFILE_START(stime);
2987
2988                 if (data->text_draw_cache) {
2989                         DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.v3d, DST.draw_ctx.ar, false);
2990                 }
2991
2992                 PROFILE_END_UPDATE(data->render_time, stime);
2993         }
2994 }
2995
2996 #define MAX_INFO_LINES 10
2997
2998 /**
2999  * Returns the offset required for the drawing of engines info.
3000  */
3001 int DRW_draw_region_engine_info_offset(void)
3002 {
3003         int lines = 0;
3004         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3005                 DrawEngineType *engine = link->data;
3006                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3007
3008                 /* Count the number of lines. */
3009                 if (data->info[0] != '\0') {
3010                         lines++;
3011                         char *c = data->info;
3012                         while (*c++ != '\0') {
3013                                 if (*c == '\n') {
3014                                         lines++;
3015                                 }
3016                         }
3017                 }
3018         }
3019         return MIN2(MAX_INFO_LINES, lines) * UI_UNIT_Y;
3020 }
3021
3022 /**
3023  * Actual drawing;
3024  */
3025 void DRW_draw_region_engine_info(void)
3026 {
3027         const char *info_array_final[MAX_INFO_LINES + 1];
3028         /* This should be maxium number of engines running at the same time. */
3029         char info_array[MAX_INFO_LINES][GPU_INFO_SIZE];
3030         int i = 0;
3031
3032         const DRWContextState *draw_ctx = DRW_context_state_get();
3033         ARegion *ar = draw_ctx->ar;
3034         float fill_color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
3035
3036         UI_GetThemeColor3fv(TH_HIGH_GRAD, fill_color);
3037         mul_v3_fl(fill_color, fill_color[3]);
3038
3039         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3040                 DrawEngineType *engine = link->data;
3041                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3042
3043                 if (data->info[0] != '\0') {
3044                         char *chr_current = data->info;
3045                         char *chr_start = chr_current;
3046                         int line_len = 0;
3047
3048                         while (*chr_current++ != '\0') {
3049                                 line_len++;
3050                                 if (*chr_current == '\n') {
3051                                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
3052                                         /* Re-start counting. */
3053                                         chr_start = chr_current + 1;
3054                                         line_len = -1;
3055                                 }
3056                         }
3057
3058                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
3059
3060                         if (i >= MAX_INFO_LINES) {
3061                                 break;
3062                         }
3063                 }
3064         }
3065
3066         for (int j = 0; j < i; j++) {
3067                 info_array_final[j] = info_array[j];
3068         }
3069         info_array_final[i] = NULL;
3070
3071         if (info_array[0] != NULL) {
3072                 ED_region_info_draw_multiline(ar, info_array_final, fill_color, true);
3073         }
3074 }
3075
3076 #undef MAX_INFO_LINES
3077
3078 static void use_drw_engine(DrawEngineType *engine)
3079 {
3080         LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
3081         ld->data = engine;
3082         BLI_addtail(&DST.enabled_engines, ld);
3083 }
3084
3085 /* TODO revisit this when proper layering is implemented */
3086 /* Gather all draw engines needed and store them in DST.enabled_engines
3087  * That also define the rendering order of engines */
3088 static void drw_engines_enable_from_engine(RenderEngineType *engine_type)
3089 {
3090         /* TODO layers */
3091         if (engine_type->draw_engine != NULL) {
3092                 use_drw_engine(engine_type->draw_engine);
3093         }
3094
3095         if ((engine_type->flag & RE_INTERNAL) == 0) {
3096                 drw_engines_enable_external();
3097         }
3098 }
3099
3100 static void drw_engines_enable_from_object_mode(void)
3101 {
3102         use_drw_engine(&draw_engine_object_type);
3103 }
3104
3105 static void drw_engines_enable_from_mode(int mode)
3106 {
3107         switch (mode) {
3108                 case CTX_MODE_EDIT_MESH:
3109                         use_drw_engine(&draw_engine_edit_mesh_type);
3110                         break;
3111                 case CTX_MODE_EDIT_CURVE:
3112                         use_drw_engine(&draw_engine_edit_curve_type);
3113                         break;
3114                 case CTX_MODE_EDIT_SURFACE:
3115                         use_drw_engine(&draw_engine_edit_surface_type);
3116                         break;
3117                 case CTX_MODE_EDIT_TEXT:
3118                         use_drw_engine(&draw_engine_edit_text_type);
3119                         break;
3120                 case CTX_MODE_EDIT_ARMATURE:
3121                         use_drw_engine(&draw_engine_edit_armature_type);
3122                         break;
3123                 case CTX_MODE_EDIT_METABALL:
3124                         use_drw_engine(&draw_engine_edit_metaball_type);
3125                         break;
3126                 case CTX_MODE_EDIT_LATTICE:
3127                         use_drw_engine(&draw_engine_edit_lattice_type);
3128                         break;
3129                 case CTX_MODE_POSE:
3130                         use_drw_engine(&draw_engine_pose_type);
3131                         break;
3132                 case CTX_MODE_SCULPT:
3133                         use_drw_engine(&draw_engine_sculpt_type);
3134                         break;
3135                 case CTX_MODE_PAINT_WEIGHT:
3136                         use_drw_engine(&draw_engine_pose_type);
3137                         use_drw_engine(&draw_engine_paint_weight_type);
3138                         break;
3139                 case CTX_MODE_PAINT_VERTEX:
3140                         use_drw_engine(&draw_engine_paint_vertex_type);
3141                         break;
3142                 case CTX_MODE_PAINT_TEXTURE:
3143                         use_drw_engine(&draw_engine_paint_texture_type);
3144                         break;
3145                 case CTX_MODE_PARTICLE:
3146                         use_drw_engine(&draw_engine_particle_type);
3147                         break;
3148                 case CTX_MODE_OBJECT:
3149                         break;
3150                 default:
3151                         BLI_assert(!"Draw mode invalid");
3152                         break;
3153         }
3154 }
3155
3156 /**
3157  * Use for select and depth-drawing.
3158  */
3159 static void drw_engines_enable_basic(void)
3160 {
3161         use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
3162 }
3163
3164 /**
3165  * Use for external render engines.
3166  */
3167 static void drw_engines_enable_external(void)
3168 {
3169         use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
3170 }
3171
3172 static void drw_engines_enable(ViewLayer *view_layer, RenderEngineType *engine_type)
3173 {
3174         Object *obact = OBACT(view_layer);
3175         const int mode = CTX_data_mode_enum_ex(DST.draw_ctx.object_edit, obact, DST.draw_ctx.object_mode);
3176
3177         drw_engines_enable_from_engine(engine_type);
3178
3179         if (DRW_state_draw_support()) {
3180                 drw_engines_enable_from_object_mode();
3181                 drw_engines_enable_from_mode(mode);
3182         }
3183 }
3184
3185 static void drw_engines_disable(void)
3186 {
3187         BLI_freelistN(&DST.enabled_engines);
3188 }
3189
3190 static unsigned int DRW_engines_get_hash(void)
3191 {
3192         unsigned int hash = 0;
3193         /* The cache depends on enabled engines */
3194         /* FIXME : if collision occurs ... segfault */
3195         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3196                 DrawEngineType *engine = link->data;
3197                 hash += BLI_ghashutil_strhash_p(engine->idname);
3198         }
3199
3200         return hash;
3201 }
3202
3203 static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
3204 {
3205         BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
3206                                rect->ymax - (3 + v) * U.widget_unit, 0.0f,
3207                                txt, size);
3208 }
3209
3210 /* CPU stats */
3211 static void drw_debug_cpu_stats(void)
3212 {
3213         int u, v;
3214         double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
3215         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3216         rcti rect;
3217         struct ARegion *ar = DST.draw_ctx.ar;
3218         ED_region_visible_rect(ar, &rect);
3219
3220         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3221
3222         /* row by row */
3223         v = 0; u = 0;
3224         /* Label row */
3225         char col_label[32];
3226         sprintf(col_label, "Engine");
3227         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3228         sprintf(col_label, "Init");
3229         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3230         sprintf(col_label, "Background");
3231         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3232         sprintf(col_label, "Render");
3233         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3234         sprintf(col_label, "Total (w/o cache)");
3235         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3236         v++;
3237
3238         /* Engines rows */
3239         char time_to_txt[16];
3240         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3241                 u = 0;
3242                 DrawEngineType *engine = link->data;
3243                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3244
3245                 draw_stat(&rect, u++, v, engine->idname, sizeof(engine->idname));
3246
3247                 init_tot_time += data->init_time;
3248                 sprintf(time_to_txt, "%.2fms", data->init_time);
3249                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3250
3251                 background_tot_time += data->background_time;
3252                 sprintf(time_to_txt, "%.2fms", data->background_time);
3253                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3254
3255                 render_tot_time += data->render_time;
3256                 sprintf(time_to_txt, "%.2fms", data->render_time);
3257                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3258
3259                 tot_time += data->init_time + data->background_time + data->render_time;
3260                 sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
3261                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3262                 v++;
3263         }
3264
3265         /* Totals row */
3266         u = 0;
3267         sprintf(col_label, "Sub Total");
3268         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3269         sprintf(time_to_txt, "%.2fms", init_tot_time);
3270         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3271         sprintf(time_to_txt, "%.2fms", background_tot_time);
3272         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3273         sprintf(time_to_txt, "%.2fms", render_tot_time);
3274         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3275         sprintf(time_to_txt, "%.2fms", tot_time);
3276         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3277         v += 2;
3278
3279         u = 0;
3280         sprintf(col_label, "Cache Time");
3281         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3282         sprintf(time_to_txt, "%.2fms", DST.cache_time);
3283         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3284 }
3285
3286 /* Display GPU time for each passes */
3287 static void drw_debug_gpu_stats(void)
3288 {
3289         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3290         rcti rect;
3291         struct ARegion *ar = DST.draw_ctx.ar;
3292         ED_region_visible_rect(ar, &rect);
3293
3294         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3295
3296         int v = BLI_listbase_count(&DST.enabled_engines) + 5;
3297
3298         char stat_string[32];
3299
3300         /* Memory Stats */
3301         unsigned int tex_mem = GPU_texture_memory_usage_get();
3302         unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
3303
3304         sprintf(stat_string, "GPU Memory");
3305         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3306         sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
3307         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3308         sprintf(stat_string, "   |--> Textures");
3309         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3310         sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
3311         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3312         sprintf(stat_string, "   |--> Meshes");
3313         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3314         sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
3315         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3316
3317         /* Pre offset for stats_draw */
3318         rect.ymax -= (3 + ++v) * U.widget_unit;
3319
3320         /* Rendering Stats */
3321         DRW_stats_draw(&rect);
3322 }
3323
3324 /* -------------------------------------------------------------------- */
3325
3326 /** \name View Update
3327  * \{ */
3328
3329 void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
3330 {
3331         RenderEngineType *engine_type = update_ctx->engine_type;
3332         ARegion *ar = update_ctx->ar;
3333         View3D *v3d = update_ctx->v3d;
3334         RegionView3D *rv3d = ar->regiondata;
3335         Depsgraph *depsgraph = update_ctx->depsgraph;
3336         Scene *scene = update_ctx->scene;
3337         ViewLayer *view_layer = update_ctx->view_layer;
3338
3339         if (rv3d->viewport == NULL) {
3340                 return;
3341         }
3342
3343
3344         /* Reset before using it. */
3345         memset(&DST, 0x0, sizeof(DST));
3346
3347         DST.viewport = rv3d->viewport;
3348         DST.draw_ctx = (DRWContextState){
3349                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, depsgraph, OB_MODE_OBJECT,
3350                 NULL,
3351         };
3352
3353         drw_engines_enable(view_layer, engine_type);
3354
3355         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3356                 DrawEngineType *draw_engine = link->data;
3357                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3358
3359                 if (draw_engine->view_update) {
3360                         draw_engine->view_update(data);
3361                 }
3362         }
3363
3364         DST.viewport = NULL;
3365
3366         drw_engines_disable();
3367 }
3368
3369 /** \} */
3370
3371 /** \name ID Update
3372  * \{ */
3373
3374 /* TODO(sergey): This code is run for each changed ID (including the ones which
3375  * are changed indirectly via update flush. Need to find a way to make this to
3376  * run really fast, hopefully without any memory allocations on a heap
3377  * Idea here could be to run every known engine's id_update() and make them
3378  * do nothing if there is no engine-specific data yet.
3379  */
3380 void DRW_notify_id_update