Fix mixed weight-paint & pose mode
[blender.git] / source / blender / draw / intern / draw_manager.c
1 /*
2  * Copyright 2016, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor(s): Blender Institute
19  *
20  */
21
22 /** \file blender/draw/intern/draw_manager.c
23  *  \ingroup draw
24  */
25
26 #include <stdio.h>
27
28 #include "BLI_listbase.h"
29 #include "BLI_mempool.h"
30 #include "BLI_rect.h"
31 #include "BLI_string.h"
32 #include "BLI_string_utils.h"
33
34 #include "BIF_glutil.h"
35
36 #include "BKE_curve.h"
37 #include "BKE_global.h"
38 #include "BKE_mesh.h"
39 #include "BKE_object.h"
40 #include "BKE_pbvh.h"
41 #include "BKE_paint.h"
42 #include "BKE_workspace.h"
43
44 #include "BLT_translation.h"
45 #include "BLF_api.h"
46
47 #include "DRW_engine.h"
48 #include "DRW_render.h"
49
50 #include "DNA_camera_types.h"
51 #include "DNA_curve_types.h"
52 #include "DNA_view3d_types.h"
53 #include "DNA_screen_types.h"
54 #include "DNA_mesh_types.h"
55 #include "DNA_meshdata_types.h"
56 #include "DNA_meta_types.h"
57
58 #include "ED_space_api.h"
59 #include "ED_screen.h"
60
61 #include "intern/gpu_codegen.h"
62 #include "GPU_batch.h"
63 #include "GPU_draw.h"
64 #include "GPU_extensions.h"
65 #include "GPU_framebuffer.h"
66 #include "GPU_immediate.h"
67 #include "GPU_lamp.h"
68 #include "GPU_material.h"
69 #include "GPU_shader.h"
70 #include "GPU_texture.h"
71 #include "GPU_uniformbuffer.h"
72 #include "GPU_viewport.h"
73 #include "GPU_matrix.h"
74
75 #include "IMB_colormanagement.h"
76
77 #include "RE_engine.h"
78 #include "RE_pipeline.h"
79
80 #include "UI_interface.h"
81 #include "UI_resources.h"
82
83 #include "WM_api.h"
84 #include "WM_types.h"
85
86 #include "draw_manager_text.h"
87 #include "draw_manager_profiling.h"
88
89 /* only for callbacks */
90 #include "draw_cache_impl.h"
91
92 #include "draw_instance_data.h"
93
94 #include "draw_mode_engines.h"
95 #include "engines/clay/clay_engine.h"
96 #include "engines/eevee/eevee_engine.h"
97 #include "engines/basic/basic_engine.h"
98 #include "engines/external/external_engine.h"
99
100 #include "DEG_depsgraph.h"
101 #include "DEG_depsgraph_query.h"
102
103 /* -------------------------------------------------------------------- */
104 /** \name Local Features
105  * \{ */
106
107 #define USE_PROFILE
108
109 #ifdef USE_PROFILE
110 #  include "PIL_time.h"
111
112 #  define PROFILE_TIMER_FALLOFF 0.1
113
114 #  define PROFILE_START(time_start) \
115         double time_start = PIL_check_seconds_timer();
116
117 #  define PROFILE_END_ACCUM(time_accum, time_start) { \
118         time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
119 } ((void)0)
120
121 /* exp average */
122 #  define PROFILE_END_UPDATE(time_update, time_start) { \
123         double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
124         time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
125                       (_time_delta * PROFILE_TIMER_FALLOFF); \
126 } ((void)0)
127
128 #else  /* USE_PROFILE */
129
130 #  define PROFILE_START(time_start) ((void)0)
131 #  define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
132 #  define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
133
134 #endif  /* USE_PROFILE */
135
136
137 /* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
138 #define USE_GPU_SELECT
139
140 #ifdef USE_GPU_SELECT
141 #  include "ED_view3d.h"
142 #  include "ED_armature.h"
143 #  include "GPU_select.h"
144 #endif
145
146 /** \} */
147
148
149 #define MAX_ATTRIB_NAME 32
150 #define MAX_ATTRIB_COUNT 6 /* Can be adjusted for more */
151 #define MAX_PASS_NAME 32
152 #define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
153
154 extern char datatoc_gpu_shader_2D_vert_glsl[];
155 extern char datatoc_gpu_shader_3D_vert_glsl[];
156 extern char datatoc_gpu_shader_fullscreen_vert_glsl[];
157
158 /* Prototypes. */
159 static void drw_engines_enable_external(void);
160
161 /* Structures */
162 typedef enum {
163         DRW_UNIFORM_BOOL,
164         DRW_UNIFORM_SHORT_TO_INT,
165         DRW_UNIFORM_SHORT_TO_FLOAT,
166         DRW_UNIFORM_INT,
167         DRW_UNIFORM_FLOAT,
168         DRW_UNIFORM_TEXTURE,
169         DRW_UNIFORM_BUFFER,
170         DRW_UNIFORM_MAT3,
171         DRW_UNIFORM_MAT4,
172         DRW_UNIFORM_BLOCK
173 } DRWUniformType;
174
175 typedef enum {
176         DRW_ATTRIB_INT,
177         DRW_ATTRIB_FLOAT,
178 } DRWAttribType;
179
180 #define MAX_UNIFORM_DATA_SIZE 16
181
182 struct DRWUniform {
183         struct DRWUniform *next;
184         int location;
185         char type; /* DRWUniformType */
186         char length; /* cannot be more than 16 */
187         char arraysize; /* cannot be more than 16 too */
188         const void *value;
189 };
190
191 struct DRWInterface {
192         DRWUniform *uniforms;   /* DRWUniform, single-linked list */
193         int attribs_count;
194         int attribs_stride;
195         int attribs_size[16];
196         int attribs_loc[16];
197         /* matrices locations */
198         int model;
199         int modelinverse;
200         int modelview;
201         int modelviewinverse;
202         int projection;
203         int projectioninverse;
204         int view;
205         int viewinverse;
206         int modelviewprojection;
207         int viewprojection;
208         int viewprojectioninverse;
209         int normal;
210         int worldnormal;
211         int camtexfac;
212         int orcotexfac;
213         int eye;
214         int clipplanes;
215         /* Dynamic batch */
216         Gwn_Batch *instance_batch; /* contains instances attributes */
217         GLuint instance_vbo; /* same as instance_batch but generated from DRWCalls */
218         struct DRWInstanceData *inst_data;
219 #ifdef USE_GPU_SELECT
220         struct DRWInstanceData *inst_selectid;
221         /* Override for single object instances. */
222         int override_selectid;
223 #endif
224         int instance_count;
225         Gwn_VertFormat vbo_format;
226 };
227
228 struct DRWPass {
229         /* Single linked list with last member to append */
230         DRWShadingGroup *shgroups;
231         DRWShadingGroup *shgroups_last;
232
233         DRWState state;
234         char name[MAX_PASS_NAME];
235 };
236
237 typedef struct DRWCallHeader {
238         void *prev;
239
240 #ifdef USE_GPU_SELECT
241         int select_id;
242 #endif
243         uchar type;
244 } DRWCallHeader;
245
246 typedef struct DRWCall {
247         DRWCallHeader head;
248
249         float obmat[4][4];
250         Gwn_Batch *geometry;
251
252         Object *ob; /* Optional */
253         ID *ob_data; /* Optional. */
254 } DRWCall;
255
256 typedef struct DRWCallGenerate {
257         DRWCallHeader head;
258
259         float obmat[4][4];
260
261         DRWCallGenerateFn *geometry_fn;
262         void *user_data;
263 } DRWCallGenerate;
264
265 struct DRWShadingGroup {
266         struct DRWShadingGroup *next;
267
268         GPUShader *shader;               /* Shader to bind */
269         DRWInterface interface;          /* Uniforms pointers */
270
271         /* DRWCall or DRWCallDynamic depending of type */
272         void *calls;
273         void *calls_first; /* To be able to traverse the list in the order of addition */
274
275         DRWState state_extra;            /* State changes for this batch only (or'd with the pass's state) */
276         DRWState state_extra_disable;    /* State changes for this batch only (and'd with the pass's state) */
277         unsigned int stencil_mask;       /* Stencil mask to use for stencil test / write operations */
278         int type;
279
280         ID *instance_data;         /* Object->data to instance */
281         Gwn_Batch *instance_geom;  /* Geometry to instance */
282         Gwn_Batch *batch_geom;     /* Result of call batching */
283
284 #ifdef USE_GPU_SELECT
285         /* backlink to pass we're in */
286         DRWPass *pass_parent;
287 #endif
288 };
289
290 /* Used by DRWShadingGroup.type */
291 enum {
292         DRW_SHG_NORMAL,
293         DRW_SHG_POINT_BATCH,
294         DRW_SHG_LINE_BATCH,
295         DRW_SHG_TRIANGLE_BATCH,
296         DRW_SHG_INSTANCE,
297 };
298
299 /* Used by DRWCall.type */
300 enum {
301         /* A single batch */
302         DRW_CALL_SINGLE,
303         /* Uses a callback to draw with any number of batches. */
304         DRW_CALL_GENERATE,
305         /* Arbitrary number of multiple args. */
306         DRW_CALL_DYNAMIC,
307 };
308
309 /** Render State: No persistent data between draw calls. */
310 static struct DRWGlobalState {
311         /* Cache generation */
312         ViewportMemoryPool *vmempool;
313         DRWUniform *last_uniform;
314         DRWCall *last_call;
315         DRWCallGenerate *last_callgenerate;
316         DRWShadingGroup *last_shgroup;
317         DRWInstanceDataList *idatalist;
318         DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE];
319
320         /* Rendering state */
321         GPUShader *shader;
322
323         /* Managed by `DRW_state_set`, `DRW_state_reset` */
324         DRWState state;
325         unsigned int stencil_mask;
326
327         /* Per viewport */
328         GPUViewport *viewport;
329         struct GPUFrameBuffer *default_framebuffer;
330         float size[2];
331         float screenvecs[2][3];
332         float pixsize;
333
334         GLenum backface, frontface;
335
336         /* Clip planes */
337         int num_clip_planes;
338         float clip_planes_eq[MAX_CLIP_PLANES][4];
339
340         struct {
341                 unsigned int is_select : 1;
342                 unsigned int is_depth : 1;
343                 unsigned int is_image_render : 1;
344                 unsigned int is_scene_render : 1;
345                 unsigned int draw_background : 1;
346         } options;
347
348         /* Current rendering context */
349         DRWContextState draw_ctx;
350
351         /* Convenience pointer to text_store owned by the viewport */
352         struct DRWTextStore **text_store_p;
353
354         ListBase enabled_engines; /* RenderEngineType */
355
356         /* Profiling */
357         double cache_time;
358 } DST = {NULL};
359
360 /** GPU Resource State: Memory storage between drawing. */
361 static struct DRWResourceState {
362         GPUTexture **bound_texs;
363
364         bool *bound_tex_slots;
365
366         int bind_tex_inc;
367         int bind_ubo_inc;
368 } RST = {NULL};
369
370 static struct DRWMatrixOveride {
371         float mat[6][4][4];
372         bool override[6];
373 } viewport_matrix_override = {{{{0}}}};
374
375 ListBase DRW_engines = {NULL, NULL};
376
377 #ifdef USE_GPU_SELECT
378 static unsigned int g_DRW_select_id = (unsigned int)-1;
379
380 void DRW_select_load_id(unsigned int id)
381 {
382         BLI_assert(G.f & G_PICKSEL);
383         g_DRW_select_id = id;
384 }
385 #endif
386
387
388 /* -------------------------------------------------------------------- */
389
390 /** \name Textures (DRW_texture)
391  * \{ */
392
393 static void drw_texture_get_format(
394         DRWTextureFormat format,
395         GPUTextureFormat *r_data_type, int *r_channels)
396 {
397         switch (format) {
398                 case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
399                 case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
400                 case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
401                 case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
402                 case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
403                 case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
404                 case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break;
405                 case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
406                 case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
407                 case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
408                 case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
409 #if 0
410                 case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
411                 case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
412                 case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
413 #endif
414                 case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
415                 case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
416                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
417                 case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
418                 default :
419                         /* file type not supported you must uncomment it from above */
420                         BLI_assert(false);
421                         break;
422         }
423
424         switch (format) {
425                 case DRW_TEX_RGBA_8:
426                 case DRW_TEX_RGBA_16:
427                 case DRW_TEX_RGBA_32:
428                         *r_channels = 4;
429                         break;
430                 case DRW_TEX_RGB_8:
431                 case DRW_TEX_RGB_16:
432                 case DRW_TEX_RGB_32:
433                 case DRW_TEX_RGB_11_11_10:
434                         *r_channels = 3;
435                         break;
436                 case DRW_TEX_RG_8:
437                 case DRW_TEX_RG_16:
438                 case DRW_TEX_RG_16I:
439                 case DRW_TEX_RG_32:
440                         *r_channels = 2;
441                         break;
442                 default:
443                         *r_channels = 1;
444                         break;
445         }
446 }
447
448 static void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
449 {
450         GPU_texture_bind(tex, 0);
451         if (flags & DRW_TEX_MIPMAP) {
452                 GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
453                 DRW_texture_generate_mipmaps(tex);
454         }
455         else {
456                 GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
457         }
458         GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
459         GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
460         GPU_texture_unbind(tex);
461 }
462
463 GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
464 {
465         GPUTexture *tex;
466         GPUTextureFormat data_type;
467         int channels;
468
469         drw_texture_get_format(format, &data_type, &channels);
470         tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
471         drw_texture_set_parameters(tex, flags);
472
473         return tex;
474 }
475
476 GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
477 {
478         GPUTexture *tex;
479         GPUTextureFormat data_type;
480         int channels;
481
482         drw_texture_get_format(format, &data_type, &channels);
483         tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
484         drw_texture_set_parameters(tex, flags);
485
486         return tex;
487 }
488
489 GPUTexture *DRW_texture_create_2D_array(
490         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
491 {
492         GPUTexture *tex;
493         GPUTextureFormat data_type;
494         int channels;
495
496         drw_texture_get_format(format, &data_type, &channels);
497         tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
498         drw_texture_set_parameters(tex, flags);
499
500         return tex;
501 }
502
503 GPUTexture *DRW_texture_create_3D(
504         int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
505 {
506         GPUTexture *tex;
507         GPUTextureFormat data_type;
508         int channels;
509
510         drw_texture_get_format(format, &data_type, &channels);
511         tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
512         drw_texture_set_parameters(tex, flags);
513
514         return tex;
515 }
516
517 GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
518 {
519         GPUTexture *tex;
520         GPUTextureFormat data_type;
521         int channels;
522
523         drw_texture_get_format(format, &data_type, &channels);
524         tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
525         drw_texture_set_parameters(tex, flags);
526
527         return tex;
528 }
529
530 void DRW_texture_generate_mipmaps(GPUTexture *tex)
531 {
532         GPU_texture_bind(tex, 0);
533         GPU_texture_generate_mipmap(tex);
534         GPU_texture_unbind(tex);
535 }
536
537 void DRW_texture_update(GPUTexture *tex, const float *pixels)
538 {
539         GPU_texture_update(tex, pixels);
540 }
541
542 void DRW_texture_free(GPUTexture *tex)
543 {
544         GPU_texture_free(tex);
545 }
546
547 /** \} */
548
549
550 /* -------------------------------------------------------------------- */
551
552 /** \name Uniform Buffer Object (DRW_uniformbuffer)
553  * \{ */
554
555 GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
556 {
557         return GPU_uniformbuffer_create(size, data, NULL);
558 }
559
560 void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
561 {
562         GPU_uniformbuffer_update(ubo, data);
563 }
564
565 void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
566 {
567         GPU_uniformbuffer_free(ubo);
568 }
569
570 /** \} */
571
572
573 /* -------------------------------------------------------------------- */
574
575 /** \name Shaders (DRW_shader)
576  * \{ */
577
578 GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
579 {
580         return GPU_shader_create(vert, frag, geom, NULL, defines);
581 }
582
583 GPUShader *DRW_shader_create_with_lib(
584         const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
585 {
586         GPUShader *sh;
587         char *vert_with_lib = NULL;
588         char *frag_with_lib = NULL;
589         char *geom_with_lib = NULL;
590
591         vert_with_lib = BLI_string_joinN(lib, vert);
592         frag_with_lib = BLI_string_joinN(lib, frag);
593
594         if (geom) {
595                 geom_with_lib = BLI_string_joinN(lib, geom);
596         }
597
598         sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
599
600         MEM_freeN(vert_with_lib);
601         MEM_freeN(frag_with_lib);
602         if (geom) {
603                 MEM_freeN(geom_with_lib);
604         }
605
606         return sh;
607 }
608
609 GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
610 {
611         return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
612 }
613
614 GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
615 {
616         return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
617 }
618
619 GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
620 {
621         return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines);
622 }
623
624 GPUShader *DRW_shader_create_3D_depth_only(void)
625 {
626         return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
627 }
628
629 void DRW_shader_free(GPUShader *shader)
630 {
631         GPU_shader_free(shader);
632 }
633
634 /** \} */
635
636
637 /* -------------------------------------------------------------------- */
638
639 /** \name Interface (DRW_interface)
640  * \{ */
641
642 static void drw_interface_create(DRWInterface *interface, GPUShader *shader)
643 {
644         interface->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
645         interface->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
646         interface->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
647         interface->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
648         interface->projection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION);
649         interface->projectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_PROJECTION_INV);
650         interface->view = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW);
651         interface->viewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEW_INV);
652         interface->viewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION);
653         interface->viewprojectioninverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_VIEWPROJECTION_INV);
654         interface->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
655         interface->normal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
656         interface->worldnormal = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
657         interface->camtexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CAMERATEXCO);
658         interface->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
659         interface->clipplanes = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_CLIPPLANES);
660         interface->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
661         interface->instance_count = 0;
662         interface->attribs_count = 0;
663         interface->attribs_stride = 0;
664         interface->instance_vbo = 0;
665         interface->instance_batch = NULL;
666         interface->inst_data = NULL;
667         interface->uniforms = NULL;
668 #ifdef USE_GPU_SELECT
669         interface->inst_selectid = NULL;
670         interface->override_selectid = -1;
671 #endif
672
673         memset(&interface->vbo_format, 0, sizeof(Gwn_VertFormat));
674 }
675
676
677 static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name,
678                                   DRWUniformType type, const void *value, int length, int arraysize)
679 {
680         int location;
681         if (type == DRW_UNIFORM_BLOCK) {
682                 location = GPU_shader_get_uniform_block(shgroup->shader, name);
683         }
684         else {
685                 location = GPU_shader_get_uniform(shgroup->shader, name);
686         }
687
688         if (location == -1) {
689                 if (G.debug & G_DEBUG)
690                         fprintf(stderr, "Uniform '%s' not found!\n", name);
691                 /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
692                 // BLI_assert(0);
693                 return;
694         }
695
696         DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
697
698         BLI_assert(arraysize > 0 && arraysize <= 16);
699         BLI_assert(arraysize * length <= MAX_UNIFORM_DATA_SIZE);
700
701         uni->location = location;
702         uni->type = type;
703         uni->value = value;
704         uni->length = length;
705         uni->arraysize = arraysize;
706
707         /* Prepend */
708         uni->next = shgroup->interface.uniforms;
709         shgroup->interface.uniforms = uni;
710 }
711
712 static void drw_interface_attrib(DRWShadingGroup *shgroup, const char *name, DRWAttribType UNUSED(type), int size, bool dummy)
713 {
714         unsigned int attrib_id = shgroup->interface.attribs_count;
715         GLuint program = GPU_shader_get_program(shgroup->shader);
716
717         shgroup->interface.attribs_loc[attrib_id] = glGetAttribLocation(program, name);
718         shgroup->interface.attribs_size[attrib_id] = size;
719         shgroup->interface.attribs_stride += size;
720         shgroup->interface.attribs_count += 1;
721
722         if (shgroup->type != DRW_SHG_INSTANCE) {
723                 BLI_assert(size <= 4); /* Matrices are not supported by Gawain. */
724                 GWN_vertformat_attr_add(&shgroup->interface.vbo_format, name, GWN_COMP_F32, size, GWN_FETCH_FLOAT);
725         }
726
727         BLI_assert(shgroup->interface.attribs_count < MAX_ATTRIB_COUNT);
728
729 /* Adding attribute even if not found for now (to keep memory alignment).
730  * Should ideally take vertex format automatically from batch eventually */
731 #if 0
732         if (attrib->location == -1 && !dummy) {
733                 if (G.debug & G_DEBUG)
734                         fprintf(stderr, "Attribute '%s' not found!\n", name);
735                 BLI_assert(0);
736                 MEM_freeN(attrib);
737                 return;
738         }
739 #else
740         UNUSED_VARS(dummy);
741 #endif
742 }
743
744 /** \} */
745
746
747 /* -------------------------------------------------------------------- */
748
749 /** \name Shading Group (DRW_shgroup)
750  * \{ */
751
752 DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
753 {
754         DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
755
756         /* Append */
757         if (pass->shgroups != NULL) {
758                 pass->shgroups_last->next = shgroup;
759         }
760         else {
761                 pass->shgroups = shgroup;
762         }
763         pass->shgroups_last = shgroup;
764         shgroup->next = NULL;
765
766         drw_interface_create(&shgroup->interface, shader);
767
768         shgroup->type = DRW_SHG_NORMAL;
769         shgroup->shader = shader;
770         shgroup->state_extra = 0;
771         shgroup->state_extra_disable = ~0x0;
772         shgroup->stencil_mask = 0;
773         shgroup->batch_geom = NULL;
774         shgroup->instance_geom = NULL;
775         shgroup->instance_data = NULL;
776
777         shgroup->calls = NULL;
778         shgroup->calls_first = NULL;
779
780 #ifdef USE_GPU_SELECT
781         shgroup->pass_parent = pass;
782 #endif
783
784         return shgroup;
785 }
786
787 DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
788 {
789         double time = 0.0; /* TODO make time variable */
790
791         /* TODO : Ideally we should not convert. But since the whole codegen
792          * is relying on GPUPass we keep it as is for now. */
793         GPUPass *gpupass = GPU_material_get_pass(material);
794
795         if (!gpupass) {
796                 /* Shader compilation error */
797                 return NULL;
798         }
799
800         struct GPUShader *shader = GPU_pass_shader(gpupass);
801
802         DRWShadingGroup *grp = DRW_shgroup_create(shader, pass);
803
804         /* Converting dynamic GPUInput to DRWUniform */
805         ListBase *inputs = &gpupass->inputs;
806
807         for (GPUInput *input = inputs->first; input; input = input->next) {
808                 /* Textures */
809                 if (input->ima) {
810                         GPUTexture *tex = GPU_texture_from_blender(
811                                 input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
812
813                         if (input->bindtex) {
814                                 DRW_shgroup_uniform_texture(grp, input->shadername, tex);
815                         }
816                 }
817                 /* Color Ramps */
818                 else if (input->tex) {
819                         DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
820                 }
821                 /* Floats */
822                 else {
823                         switch (input->type) {
824                                 case GPU_FLOAT:
825                                 case GPU_VEC2:
826                                 case GPU_VEC3:
827                                 case GPU_VEC4:
828                                         /* Should already be in the material ubo. */
829                                         break;
830                                 case GPU_MAT3:
831                                         DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
832                                         break;
833                                 case GPU_MAT4:
834                                         DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
835                                         break;
836                                 default:
837                                         break;
838                         }
839                 }
840         }
841
842         GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
843         if (ubo != NULL) {
844                 DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
845         }
846
847         return grp;
848 }
849
850 DRWShadingGroup *DRW_shgroup_material_instance_create(
851         struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob)
852 {
853         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
854
855         if (shgroup) {
856                 shgroup->type = DRW_SHG_INSTANCE;
857                 shgroup->instance_geom = geom;
858                 shgroup->instance_data = ob->data;
859         }
860
861         return shgroup;
862 }
863
864 DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
865         struct GPUMaterial *material, DRWPass *pass, int size)
866 {
867         DRWShadingGroup *shgroup = DRW_shgroup_material_create(material, pass);
868
869         if (shgroup) {
870                 shgroup->type = DRW_SHG_TRIANGLE_BATCH;
871                 shgroup->interface.instance_count = size * 3;
872                 drw_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
873         }
874
875         return shgroup;
876 }
877
878 DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom)
879 {
880         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
881
882         shgroup->type = DRW_SHG_INSTANCE;
883         shgroup->instance_geom = geom;
884
885         return shgroup;
886 }
887
888 DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
889 {
890         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
891
892         shgroup->type = DRW_SHG_POINT_BATCH;
893         DRW_shgroup_attrib_float(shgroup, "pos", 3);
894
895         return shgroup;
896 }
897
898 DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
899 {
900         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
901
902         shgroup->type = DRW_SHG_LINE_BATCH;
903         DRW_shgroup_attrib_float(shgroup, "pos", 3);
904
905         return shgroup;
906 }
907
908 /* Very special batch. Use this if you position
909  * your vertices with the vertex shader
910  * and dont need any VBO attrib */
911 DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int size)
912 {
913         DRWShadingGroup *shgroup = DRW_shgroup_create(shader, pass);
914
915         shgroup->type = DRW_SHG_TRIANGLE_BATCH;
916         shgroup->interface.instance_count = size * 3;
917         drw_interface_attrib(shgroup, "dummy", DRW_ATTRIB_FLOAT, 1, true);
918
919         return shgroup;
920 }
921
922 void DRW_shgroup_free(struct DRWShadingGroup *shgroup)
923 {
924         if (shgroup->interface.instance_vbo &&
925             (shgroup->interface.instance_batch == 0))
926         {
927                 glDeleteBuffers(1, &shgroup->interface.instance_vbo);
928         }
929
930         GWN_BATCH_DISCARD_SAFE(shgroup->batch_geom);
931 }
932
933 #define CALL_PREPEND(shgroup, call) { \
934         if (shgroup->calls == NULL) { \
935                 shgroup->calls = call; \
936                 shgroup->calls_first = call; \
937         } \
938         else { \
939                 ((DRWCall *)(shgroup->calls))->head.prev = call; \
940                 shgroup->calls = call; \
941         } \
942         call->head.prev = NULL; \
943 } ((void)0)
944
945 void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *instances)
946 {
947         BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
948         BLI_assert(shgroup->interface.instance_batch == NULL);
949
950         shgroup->interface.instance_batch = instances;
951
952 #ifdef USE_GPU_SELECT
953         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
954         call->head.select_id = g_DRW_select_id;
955
956         CALL_PREPEND(shgroup, call);
957 #endif
958 }
959
960 void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
961 {
962         BLI_assert(geom != NULL);
963         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
964
965         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
966
967         CALL_PREPEND(shgroup, call);
968
969         call->head.type = DRW_CALL_SINGLE;
970 #ifdef USE_GPU_SELECT
971         call->head.select_id = g_DRW_select_id;
972 #endif
973
974         if (obmat != NULL) {
975                 copy_m4_m4(call->obmat, obmat);
976         }
977
978         call->geometry = geom;
979         call->ob_data = NULL;
980 }
981
982 void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
983 {
984         BLI_assert(geom != NULL);
985         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
986
987         DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
988
989         CALL_PREPEND(shgroup, call);
990
991         call->head.type = DRW_CALL_SINGLE;
992 #ifdef USE_GPU_SELECT
993         call->head.select_id = g_DRW_select_id;
994 #endif
995
996         copy_m4_m4(call->obmat, ob->obmat);
997         call->geometry = geom;
998         call->ob_data = ob->data;
999 }
1000
1001 void DRW_shgroup_call_generate_add(
1002         DRWShadingGroup *shgroup,
1003         DRWCallGenerateFn *geometry_fn, void *user_data,
1004         float (*obmat)[4])
1005 {
1006         BLI_assert(geometry_fn != NULL);
1007         BLI_assert(shgroup->type == DRW_SHG_NORMAL);
1008
1009         DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate);
1010
1011         CALL_PREPEND(shgroup, call);
1012
1013         call->head.type = DRW_CALL_GENERATE;
1014 #ifdef USE_GPU_SELECT
1015         call->head.select_id = g_DRW_select_id;
1016 #endif
1017
1018         if (obmat != NULL) {
1019                 copy_m4_m4(call->obmat, obmat);
1020         }
1021
1022         call->geometry_fn = geometry_fn;
1023         call->user_data = user_data;
1024 }
1025
1026 static void sculpt_draw_cb(
1027         DRWShadingGroup *shgroup,
1028         void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
1029         void *user_data)
1030 {
1031         Object *ob = user_data;
1032         PBVH *pbvh = ob->sculpt->pbvh;
1033
1034         if (pbvh) {
1035                 BKE_pbvh_draw_cb(
1036                         pbvh, NULL, NULL, false,
1037                         (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
1038         }
1039 }
1040
1041 void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
1042 {
1043         DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
1044 }
1045
1046 void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
1047 {
1048         DRWInterface *interface = &shgroup->interface;
1049
1050 #ifdef USE_GPU_SELECT
1051         if (G.f & G_PICKSEL) {
1052                 if (interface->inst_selectid == NULL) {
1053                         interface->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
1054                 }
1055
1056                 int *select_id = DRW_instance_data_next(interface->inst_selectid);
1057                 *select_id = g_DRW_select_id;
1058         }
1059 #endif
1060
1061         BLI_assert(attr_len == interface->attribs_count);
1062         UNUSED_VARS_NDEBUG(attr_len);
1063
1064         if (interface->attribs_stride > 0) {
1065                 if (interface->inst_data == NULL) {
1066                         interface->inst_data = DRW_instance_data_request(DST.idatalist, interface->attribs_stride, 16);
1067                 }
1068
1069                 float *data = DRW_instance_data_next(interface->inst_data);
1070
1071                 for (int i = 0; i < interface->attribs_count; ++i) {
1072                         memcpy(data, attr[i], sizeof(float) * interface->attribs_size[i]);
1073                         data = data + interface->attribs_size[i];
1074                 }
1075         }
1076
1077         interface->instance_count += 1;
1078 }
1079
1080 /* Used for instancing with no attributes */
1081 void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, int count)
1082 {
1083         DRWInterface *interface = &shgroup->interface;
1084
1085         BLI_assert(interface->instance_count == 0);
1086         BLI_assert(interface->attribs_count == 0);
1087
1088 #ifdef USE_GPU_SELECT
1089         if (G.f & G_PICKSEL) {
1090                 interface->override_selectid = g_DRW_select_id;
1091         }
1092 #endif
1093
1094         interface->instance_count = count;
1095 }
1096
1097 /**
1098  * State is added to #Pass.state while drawing.
1099  * Use to temporarily enable draw options.
1100  */
1101 void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
1102 {
1103         shgroup->state_extra |= state;
1104 }
1105
1106 void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
1107 {
1108         shgroup->state_extra_disable &= ~state;
1109 }
1110
1111 void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
1112 {
1113         BLI_assert(mask <= 255);
1114         shgroup->stencil_mask = mask;
1115 }
1116
1117 void DRW_shgroup_attrib_float(DRWShadingGroup *shgroup, const char *name, int size)
1118 {
1119         drw_interface_attrib(shgroup, name, DRW_ATTRIB_FLOAT, size, false);
1120 }
1121
1122 void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
1123 {
1124         drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
1125 }
1126
1127 void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
1128 {
1129         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
1130 }
1131
1132 void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
1133 {
1134         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1);
1135 }
1136
1137 void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1138 {
1139         drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
1140 }
1141
1142 void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1143 {
1144         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
1145 }
1146
1147 void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1148 {
1149         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
1150 }
1151
1152 void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1153 {
1154         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
1155 }
1156
1157 void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
1158 {
1159         drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
1160 }
1161
1162 void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1163 {
1164         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
1165 }
1166
1167 void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
1168 {
1169         drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
1170 }
1171
1172 void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1173 {
1174         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
1175 }
1176
1177 void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1178 {
1179         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
1180 }
1181
1182 void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
1183 {
1184         drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
1185 }
1186
1187 void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
1188 {
1189         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT3, value, 9, 1);
1190 }
1191
1192 void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
1193 {
1194         drw_interface_uniform(shgroup, name, DRW_UNIFORM_MAT4, value, 16, 1);
1195 }
1196
1197 /* Creates a VBO containing OGL primitives for all DRWCallDynamic */
1198 static void shgroup_dynamic_batch(DRWShadingGroup *shgroup)
1199 {
1200         DRWInterface *interface = &shgroup->interface;
1201         int nbr = interface->instance_count;
1202
1203         Gwn_PrimType type = (shgroup->type == DRW_SHG_POINT_BATCH) ? GWN_PRIM_POINTS :
1204                              (shgroup->type == DRW_SHG_TRIANGLE_BATCH) ? GWN_PRIM_TRIS : GWN_PRIM_LINES;
1205
1206         if (nbr == 0)
1207                 return;
1208
1209         /* Upload Data */
1210         Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&interface->vbo_format);
1211         if (interface->inst_data) {
1212                 GWN_vertbuf_data_set(vbo, nbr, DRW_instance_data_get(interface->inst_data), false);
1213         } else {
1214                 /* Use unitialized memory. This is for dummy vertex buffers. */
1215                 /* XXX TODO do not alloc at all. */
1216                 GWN_vertbuf_data_alloc(vbo, nbr);
1217         }
1218
1219         /* TODO make the batch dynamic instead of freeing it every times */
1220         if (shgroup->batch_geom)
1221                 GWN_batch_discard(shgroup->batch_geom);
1222
1223         shgroup->batch_geom = GWN_batch_create_ex(type, vbo, NULL, GWN_BATCH_OWNS_VBO);
1224 }
1225
1226 static void shgroup_dynamic_instance(DRWShadingGroup *shgroup)
1227 {
1228         DRWInterface *interface = &shgroup->interface;
1229         int buffer_size = 0;
1230         void *data = NULL;
1231
1232         if (interface->instance_batch != NULL) {
1233                 return;
1234         }
1235
1236         /* TODO We still need this because gawain does not support Matrix attribs. */
1237         if (interface->instance_count == 0) {
1238                 if (interface->instance_vbo) {
1239                         glDeleteBuffers(1, &interface->instance_vbo);
1240                         interface->instance_vbo = 0;
1241                 }
1242                 return;
1243         }
1244
1245         /* Gather Data */
1246         buffer_size = sizeof(float) * interface->attribs_stride * interface->instance_count;
1247
1248         /* TODO poke mike to add this to gawain */
1249         if (interface->instance_vbo) {
1250                 glDeleteBuffers(1, &interface->instance_vbo);
1251                 interface->instance_vbo = 0;
1252         }
1253
1254         if (interface->inst_data) {
1255                 data = DRW_instance_data_get(interface->inst_data);
1256         }
1257
1258         glGenBuffers(1, &interface->instance_vbo);
1259         glBindBuffer(GL_ARRAY_BUFFER, interface->instance_vbo);
1260         glBufferData(GL_ARRAY_BUFFER, buffer_size, data, GL_STATIC_DRAW);
1261 }
1262
1263 static void shgroup_dynamic_batch_from_calls(DRWShadingGroup *shgroup)
1264 {
1265         if ((shgroup->interface.instance_vbo || shgroup->batch_geom) &&
1266             (G.debug_value == 667))
1267         {
1268                 return;
1269         }
1270
1271         if (shgroup->type == DRW_SHG_INSTANCE) {
1272                 shgroup_dynamic_instance(shgroup);
1273         }
1274         else {
1275                 shgroup_dynamic_batch(shgroup);
1276         }
1277 }
1278
1279 /** \} */
1280
1281
1282 /* -------------------------------------------------------------------- */
1283
1284 /** \name Passes (DRW_pass)
1285  * \{ */
1286
1287 DRWPass *DRW_pass_create(const char *name, DRWState state)
1288 {
1289         DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
1290         pass->state = state;
1291         BLI_strncpy(pass->name, name, MAX_PASS_NAME);
1292
1293         pass->shgroups = NULL;
1294         pass->shgroups_last = NULL;
1295
1296         return pass;
1297 }
1298
1299 void DRW_pass_state_set(DRWPass *pass, DRWState state)
1300 {
1301         pass->state = state;
1302 }
1303
1304 void DRW_pass_free(DRWPass *pass)
1305 {
1306         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1307                 DRW_shgroup_free(shgroup);
1308         }
1309
1310         pass->shgroups = NULL;
1311         pass->shgroups_last = NULL;
1312 }
1313
1314 void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
1315 {
1316         for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) {
1317                 callback(userData, shgroup);
1318         }
1319 }
1320
1321 typedef struct ZSortData {
1322         float *axis;
1323         float *origin;
1324 } ZSortData;
1325
1326 static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
1327 {
1328         const ZSortData *zsortdata = (ZSortData *)thunk;
1329         const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
1330         const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
1331
1332         const DRWCall *call_a;
1333         const DRWCall *call_b;
1334
1335         call_a = shgrp_a->calls_first;
1336         call_b = shgrp_b->calls_first;
1337
1338         if (call_a == NULL) return -1;
1339         if (call_b == NULL) return -1;
1340
1341         float tmp[3];
1342         sub_v3_v3v3(tmp, zsortdata->origin, call_a->obmat[3]);
1343         const float a_sq = dot_v3v3(zsortdata->axis, tmp);
1344         sub_v3_v3v3(tmp, zsortdata->origin, call_b->obmat[3]);
1345         const float b_sq = dot_v3v3(zsortdata->axis, tmp);
1346
1347         if      (a_sq < b_sq) return  1;
1348         else if (a_sq > b_sq) return -1;
1349         else {
1350                 /* If there is a depth prepass put it before */
1351                 if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1352                         return -1;
1353                 }
1354                 else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
1355                         return  1;
1356                 }
1357                 else return  0;
1358         }
1359 }
1360
1361 /* ------------------ Shading group sorting --------------------- */
1362
1363 #define SORT_IMPL_LINKTYPE DRWShadingGroup
1364
1365 #define SORT_IMPL_USE_THUNK
1366 #define SORT_IMPL_FUNC shgroup_sort_fn_r
1367 #include "../../blenlib/intern/list_sort_impl.h"
1368 #undef SORT_IMPL_FUNC
1369 #undef SORT_IMPL_USE_THUNK
1370
1371 #undef SORT_IMPL_LINKTYPE
1372
1373 /**
1374  * Sort Shading groups by decreasing Z of their first draw call.
1375  * This is usefull for order dependant effect such as transparency.
1376  **/
1377 void DRW_pass_sort_shgroup_z(DRWPass *pass)
1378 {
1379         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1380
1381         float (*viewinv)[4];
1382         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1383                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1384
1385         ZSortData zsortdata = {viewinv[2], viewinv[3]};
1386
1387         if (pass->shgroups && pass->shgroups->next) {
1388                 pass->shgroups = shgroup_sort_fn_r(pass->shgroups, pass_shgroup_dist_sort, &zsortdata);
1389
1390                 /* Find the next last */
1391                 DRWShadingGroup *last = pass->shgroups;
1392                 while ((last = last->next)) {
1393                         /* Do nothing */
1394                 }
1395                 pass->shgroups_last = last;
1396         }
1397 }
1398
1399 /** \} */
1400
1401
1402 /* -------------------------------------------------------------------- */
1403
1404 /** \name Draw (DRW_draw)
1405  * \{ */
1406
1407 static void drw_state_set(DRWState state)
1408 {
1409         if (DST.state == state) {
1410                 return;
1411         }
1412
1413
1414 #define CHANGED_TO(f) \
1415         ((DST.state & (f)) ? \
1416                 ((state & (f)) ?  0 : -1) : \
1417                 ((state & (f)) ?  1 :  0))
1418
1419 #define CHANGED_ANY(f) \
1420         ((DST.state & (f)) != (state & (f)))
1421
1422 #define CHANGED_ANY_STORE_VAR(f, enabled) \
1423         ((DST.state & (f)) != (enabled = (state & (f))))
1424
1425         /* Depth Write */
1426         {
1427                 int test;
1428                 if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
1429                         if (test == 1) {
1430                                 glDepthMask(GL_TRUE);
1431                         }
1432                         else {
1433                                 glDepthMask(GL_FALSE);
1434                         }
1435                 }
1436         }
1437
1438         /* Color Write */
1439         {
1440                 int test;
1441                 if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
1442                         if (test == 1) {
1443                                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
1444                         }
1445                         else {
1446                                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
1447                         }
1448                 }
1449         }
1450
1451         /* Cull */
1452         {
1453                 DRWState test;
1454                 if (CHANGED_ANY_STORE_VAR(
1455                         DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
1456                         test))
1457                 {
1458                         if (test) {
1459                                 glEnable(GL_CULL_FACE);
1460
1461                                 if ((state & DRW_STATE_CULL_BACK) != 0) {
1462                                         glCullFace(GL_BACK);
1463                                 }
1464                                 else if ((state & DRW_STATE_CULL_FRONT) != 0) {
1465                                         glCullFace(GL_FRONT);
1466                                 }
1467                                 else {
1468                                         BLI_assert(0);
1469                                 }
1470                         }
1471                         else {
1472                                 glDisable(GL_CULL_FACE);
1473                         }
1474                 }
1475         }
1476
1477         /* Depth Test */
1478         {
1479                 DRWState test;
1480                 if (CHANGED_ANY_STORE_VAR(
1481                         DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
1482                         test))
1483                 {
1484                         if (test) {
1485                                 glEnable(GL_DEPTH_TEST);
1486
1487                                 if (state & DRW_STATE_DEPTH_LESS) {
1488                                         glDepthFunc(GL_LEQUAL);
1489                                 }
1490                                 else if (state & DRW_STATE_DEPTH_EQUAL) {
1491                                         glDepthFunc(GL_EQUAL);
1492                                 }
1493                                 else if (state & DRW_STATE_DEPTH_GREATER) {
1494                                         glDepthFunc(GL_GREATER);
1495                                 }
1496                                 else if (state & DRW_STATE_DEPTH_ALWAYS) {
1497                                         glDepthFunc(GL_ALWAYS);
1498                                 }
1499                                 else {
1500                                         BLI_assert(0);
1501                                 }
1502                         }
1503                         else {
1504                                 glDisable(GL_DEPTH_TEST);
1505                         }
1506                 }
1507         }
1508
1509         /* Wire Width */
1510         {
1511                 if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_LARGE)) {
1512                         if ((state & DRW_STATE_WIRE) != 0) {
1513                                 glLineWidth(1.0f);
1514                         }
1515                         else if ((state & DRW_STATE_WIRE_LARGE) != 0) {
1516                                 glLineWidth(UI_GetThemeValuef(TH_OUTLINE_WIDTH) * 2.0f);
1517                         }
1518                         else {
1519                                 /* do nothing */
1520                         }
1521                 }
1522         }
1523
1524         /* Points Size */
1525         {
1526                 int test;
1527                 if ((test = CHANGED_TO(DRW_STATE_POINT))) {
1528                         if (test == 1) {
1529                                 GPU_enable_program_point_size();
1530                                 glPointSize(5.0f);
1531                         }
1532                         else {
1533                                 GPU_disable_program_point_size();
1534                         }
1535                 }
1536         }
1537
1538         /* Blending (all buffer) */
1539         {
1540                 int test;
1541                 if (CHANGED_ANY_STORE_VAR(
1542                         DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION |
1543                         DRW_STATE_ADDITIVE_FULL,
1544                         test))
1545                 {
1546                         if (test) {
1547                                 glEnable(GL_BLEND);
1548
1549                                 if ((state & DRW_STATE_BLEND) != 0) {
1550                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
1551                                                             GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
1552                                 }
1553                                 else if ((state & DRW_STATE_MULTIPLY) != 0) {
1554                                         glBlendFunc(GL_DST_COLOR, GL_ZERO);
1555                                 }
1556                                 else if ((state & DRW_STATE_TRANSMISSION) != 0) {
1557                                         glBlendFunc(GL_ONE, GL_SRC_ALPHA);
1558                                 }
1559                                 else if ((state & DRW_STATE_ADDITIVE) != 0) {
1560                                         /* Do not let alpha accumulate but premult the source RGB by it. */
1561                                         glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
1562                                                             GL_ZERO, GL_ONE); /* Alpha */
1563                                 }
1564                                 else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
1565                                         /* Let alpha accumulate. */
1566                                         glBlendFunc(GL_ONE, GL_ONE);
1567                                 }
1568                                 else {
1569                                         BLI_assert(0);
1570                                 }
1571                         }
1572                         else {
1573                                 glDisable(GL_BLEND);
1574                         }
1575                 }
1576         }
1577
1578         /* Clip Planes */
1579         {
1580                 int test;
1581                 if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
1582                         if (test == 1) {
1583                                 for (int i = 0; i < DST.num_clip_planes; ++i) {
1584                                         glEnable(GL_CLIP_DISTANCE0 + i);
1585                                 }
1586                         }
1587                         else {
1588                                 for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
1589                                         glDisable(GL_CLIP_DISTANCE0 + i);
1590                                 }
1591                         }
1592                 }
1593         }
1594
1595         /* Line Stipple */
1596         {
1597                 int test;
1598                 if (CHANGED_ANY_STORE_VAR(
1599                         DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
1600                         test))
1601                 {
1602                         if (test) {
1603                                 if ((state & DRW_STATE_STIPPLE_2) != 0) {
1604                                         setlinestyle(2);
1605                                 }
1606                                 else if ((state & DRW_STATE_STIPPLE_3) != 0) {
1607                                         setlinestyle(3);
1608                                 }
1609                                 else if ((state & DRW_STATE_STIPPLE_4) != 0) {
1610                                         setlinestyle(4);
1611                                 }
1612                                 else {
1613                                         BLI_assert(0);
1614                                 }
1615                         }
1616                         else {
1617                                 setlinestyle(0);
1618                         }
1619                 }
1620         }
1621
1622         /* Stencil */
1623         {
1624                 DRWState test;
1625                 if (CHANGED_ANY_STORE_VAR(
1626                         DRW_STATE_WRITE_STENCIL |
1627                         DRW_STATE_STENCIL_EQUAL,
1628                         test))
1629                 {
1630                         if (test) {
1631                                 glEnable(GL_STENCIL_TEST);
1632
1633                                 /* Stencil Write */
1634                                 if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
1635                                         glStencilMask(0xFF);
1636                                         glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
1637                                 }
1638                                 /* Stencil Test */
1639                                 else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
1640                                         glStencilMask(0x00); /* disable write */
1641                                         DST.stencil_mask = 0;
1642                                 }
1643                                 else {
1644                                         BLI_assert(0);
1645                                 }
1646                         }
1647                         else {
1648                                 /* disable write & test */
1649                                 DST.stencil_mask = 0;
1650                                 glStencilMask(0x00);
1651                                 glStencilFunc(GL_ALWAYS, 1, 0xFF);
1652                                 glDisable(GL_STENCIL_TEST);
1653                         }
1654                 }
1655         }
1656
1657 #undef CHANGED_TO
1658 #undef CHANGED_ANY
1659 #undef CHANGED_ANY_STORE_VAR
1660
1661         DST.state = state;
1662 }
1663
1664 static void drw_stencil_set(unsigned int mask)
1665 {
1666         if (DST.stencil_mask != mask) {
1667                 /* Stencil Write */
1668                 if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
1669                         glStencilFunc(GL_ALWAYS, mask, 0xFF);
1670                         DST.stencil_mask = mask;
1671                 }
1672                 /* Stencil Test */
1673                 else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
1674                         glStencilFunc(GL_EQUAL, mask, 0xFF);
1675                         DST.stencil_mask = mask;
1676                 }
1677         }
1678 }
1679
1680 typedef struct DRWBoundTexture {
1681         struct DRWBoundTexture *next, *prev;
1682         GPUTexture *tex;
1683 } DRWBoundTexture;
1684
1685 static void draw_geometry_prepare(
1686         DRWShadingGroup *shgroup, const float (*obmat)[4], const float *texcoloc, const float *texcosize)
1687 {
1688         RegionView3D *rv3d = DST.draw_ctx.rv3d;
1689         DRWInterface *interface = &shgroup->interface;
1690
1691         float mvp[4][4], mv[4][4], mi[4][4], mvi[4][4], pi[4][4], n[3][3], wn[3][3];
1692         float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
1693         float eye[3] = { 0.0f, 0.0f, 1.0f }; /* looking into the screen */
1694         float viewcamtexcofac[4] = { 1.0f, 1.0f, 0.0f, 0.0f };
1695
1696         if (rv3d != NULL) {
1697                 copy_v4_v4(viewcamtexcofac, rv3d->viewcamtexcofac);
1698         }
1699
1700         bool do_pi = (interface->projectioninverse != -1);
1701         bool do_mvp = (interface->modelviewprojection != -1);
1702         bool do_mi = (interface->modelinverse != -1);
1703         bool do_mv = (interface->modelview != -1);
1704         bool do_mvi = (interface->modelviewinverse != -1);
1705         bool do_n = (interface->normal != -1);
1706         bool do_wn = (interface->worldnormal != -1);
1707         bool do_eye = (interface->eye != -1);
1708         bool do_orco = (interface->orcotexfac != -1) && (texcoloc != NULL) && (texcosize != NULL);
1709
1710         /* Matrix override */
1711         float (*persmat)[4];
1712         float (*persinv)[4];
1713         float (*viewmat)[4];
1714         float (*viewinv)[4];
1715         float (*winmat)[4];
1716         float (*wininv)[4];
1717
1718         persmat = (viewport_matrix_override.override[DRW_MAT_PERS])
1719                   ? viewport_matrix_override.mat[DRW_MAT_PERS] : rv3d->persmat;
1720         persinv = (viewport_matrix_override.override[DRW_MAT_PERSINV])
1721                   ? viewport_matrix_override.mat[DRW_MAT_PERSINV] : rv3d->persinv;
1722         viewmat = (viewport_matrix_override.override[DRW_MAT_VIEW])
1723                   ? viewport_matrix_override.mat[DRW_MAT_VIEW] : rv3d->viewmat;
1724         viewinv = (viewport_matrix_override.override[DRW_MAT_VIEWINV])
1725                   ? viewport_matrix_override.mat[DRW_MAT_VIEWINV] : rv3d->viewinv;
1726         winmat = (viewport_matrix_override.override[DRW_MAT_WIN])
1727                   ? viewport_matrix_override.mat[DRW_MAT_WIN] : rv3d->winmat;
1728         wininv = viewport_matrix_override.mat[DRW_MAT_WININV];
1729
1730         if (do_pi) {
1731                 if (!viewport_matrix_override.override[DRW_MAT_WININV]) {
1732                         invert_m4_m4(pi, winmat);
1733                         wininv = pi;
1734                 }
1735         }
1736         if (do_mi) {
1737                 invert_m4_m4(mi, obmat);
1738         }
1739         if (do_mvp) {
1740                 mul_m4_m4m4(mvp, persmat, obmat);
1741         }
1742         if (do_mv || do_mvi || do_n || do_eye) {
1743                 mul_m4_m4m4(mv, viewmat, obmat);
1744         }
1745         if (do_mvi) {
1746                 invert_m4_m4(mvi, mv);
1747         }
1748         if (do_n || do_eye) {
1749                 copy_m3_m4(n, mv);
1750                 invert_m3(n);
1751                 transpose_m3(n);
1752         }
1753         if (do_wn) {
1754                 copy_m3_m4(wn, obmat);
1755                 invert_m3(wn);
1756                 transpose_m3(wn);
1757         }
1758         if (do_eye) {
1759                 /* Used by orthographic wires */
1760                 float tmp[3][3];
1761                 invert_m3_m3(tmp, n);
1762                 /* set eye vector, transformed to object coords */
1763                 mul_m3_v3(tmp, eye);
1764         }
1765         if (do_orco) {
1766                 mul_v3_v3fl(orcofacs[1], texcosize, 2.0f);
1767                 invert_v3(orcofacs[1]);
1768                 sub_v3_v3v3(orcofacs[0], texcoloc, texcosize);
1769                 negate_v3(orcofacs[0]);
1770                 mul_v3_v3(orcofacs[0], orcofacs[1]); /* result in a nice MADD in the shader */
1771         }
1772
1773         /* Should be really simple */
1774         /* step 1 : bind object dependent matrices */
1775         /* TODO : Some of these are not object dependant.
1776          * They should be grouped inside a UBO updated once per redraw.
1777          * The rest can also go into a UBO to reduce API calls. */
1778         GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)obmat);
1779         GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)mi);
1780         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)mvp);
1781         GPU_shader_uniform_vector(shgroup->shader, interface->viewinverse, 16, 1, (float *)viewinv);
1782         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojection, 16, 1, (float *)persmat);
1783         GPU_shader_uniform_vector(shgroup->shader, interface->viewprojectioninverse, 16, 1, (float *)persinv);
1784         GPU_shader_uniform_vector(shgroup->shader, interface->projection, 16, 1, (float *)winmat);
1785         GPU_shader_uniform_vector(shgroup->shader, interface->projectioninverse, 16, 1, (float *)wininv);
1786         GPU_shader_uniform_vector(shgroup->shader, interface->view, 16, 1, (float *)viewmat);
1787         GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)mv);
1788         GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)mvi);
1789         GPU_shader_uniform_vector(shgroup->shader, interface->normal, 9, 1, (float *)n);
1790         GPU_shader_uniform_vector(shgroup->shader, interface->worldnormal, 9, 1, (float *)wn);
1791         GPU_shader_uniform_vector(shgroup->shader, interface->camtexfac, 4, 1, (float *)viewcamtexcofac);
1792         GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)orcofacs);
1793         GPU_shader_uniform_vector(shgroup->shader, interface->eye, 3, 1, (float *)eye);
1794         GPU_shader_uniform_vector(shgroup->shader, interface->clipplanes, 4, DST.num_clip_planes, (float *)DST.clip_planes_eq);
1795 }
1796
1797 static void draw_geometry_execute_ex(
1798         DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count)
1799 {
1800         DRWInterface *interface = &shgroup->interface;
1801         /* step 2 : bind vertex array & draw */
1802         GWN_batch_program_set(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
1803         if (interface->instance_batch) {
1804                 /* Used for Particles. Cannot do partial drawing. */
1805                 GWN_batch_draw_stupid_instanced_with_batch(geom, interface->instance_batch);
1806         }
1807         else if (interface->instance_vbo) {
1808                 GWN_batch_draw_stupid_instanced(
1809                         geom, interface->instance_vbo, start, count, interface->attribs_count,
1810                         interface->attribs_stride, interface->attribs_size, interface->attribs_loc);
1811         }
1812         else {
1813                 GWN_batch_draw_stupid(geom, start, count);
1814         }
1815         /* XXX this just tells gawain we are done with the shader.
1816          * This does not unbind the shader. */
1817         GWN_batch_program_unset(geom);
1818 }
1819
1820 static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
1821 {
1822         draw_geometry_execute_ex(shgroup, geom, 0, 0);
1823 }
1824
1825 static void draw_geometry(
1826         DRWShadingGroup *shgroup, Gwn_Batch *geom, const float (*obmat)[4], ID *ob_data,
1827         unsigned int start, unsigned int count)
1828 {
1829         float *texcoloc = NULL;
1830         float *texcosize = NULL;
1831
1832         if (ob_data != NULL) {
1833                 switch (GS(ob_data->name)) {
1834                         case ID_ME:
1835                                 BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
1836                                 break;
1837                         case ID_CU:
1838                         {
1839                                 Curve *cu = (Curve *)ob_data;
1840                                 if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
1841                                         BKE_curve_texspace_calc(cu);
1842                                 }
1843                                 texcoloc = cu->loc;
1844                                 texcosize = cu->size;
1845                                 break;
1846                         }
1847                         case ID_MB:
1848                         {
1849                                 MetaBall *mb = (MetaBall *)ob_data;
1850                                 texcoloc = mb->loc;
1851                                 texcosize = mb->size;
1852                                 break;
1853                         }
1854                         default:
1855                                 break;
1856                 }
1857         }
1858
1859         draw_geometry_prepare(shgroup, obmat, texcoloc, texcosize);
1860
1861         draw_geometry_execute_ex(shgroup, geom, start, count);
1862 }
1863
1864 static void bind_texture(GPUTexture *tex)
1865 {
1866         int bind_num = GPU_texture_bound_number(tex);
1867         if (bind_num == -1) {
1868                 for (int i = 0; i < GPU_max_textures(); ++i) {
1869                         RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures();
1870                         if (RST.bound_tex_slots[RST.bind_tex_inc] == false) {
1871                                 if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
1872                                         GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
1873                                 }
1874                                 GPU_texture_bind(tex, RST.bind_tex_inc);
1875                                 RST.bound_texs[RST.bind_tex_inc] = tex;
1876                                 RST.bound_tex_slots[RST.bind_tex_inc] = true;
1877                                 return;
1878                         }
1879                 }
1880
1881                 printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
1882         }
1883         RST.bound_tex_slots[bind_num] = true;
1884 }
1885
1886 static void bind_ubo(GPUUniformBuffer *ubo)
1887 {
1888         if (RST.bind_ubo_inc < GPU_max_ubo_binds()) {
1889                 GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
1890                 RST.bind_ubo_inc++;
1891         }
1892         else {
1893                 /* This is not depending on user input.
1894                  * It is our responsability to make sure there enough slots. */
1895                 BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
1896
1897                 /* printf so user can report bad behaviour */
1898                 printf("Not enough ubo slots! This should not happen!\n");
1899         }
1900 }
1901
1902 static void release_texture_slots(void)
1903 {
1904         memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
1905 }
1906
1907 static void release_ubo_slots(void)
1908 {
1909         RST.bind_ubo_inc = 0;
1910 }
1911
1912 static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
1913 {
1914         BLI_assert(shgroup->shader);
1915
1916         DRWInterface *interface = &shgroup->interface;
1917         GPUTexture *tex;
1918         GPUUniformBuffer *ubo;
1919         int val;
1920         float fval;
1921
1922         if (DST.shader != shgroup->shader) {
1923                 if (DST.shader) GPU_shader_unbind();
1924                 GPU_shader_bind(shgroup->shader);
1925                 DST.shader = shgroup->shader;
1926         }
1927
1928         const bool is_normal = ELEM(shgroup->type, DRW_SHG_NORMAL);
1929
1930         if (!is_normal) {
1931                 shgroup_dynamic_batch_from_calls(shgroup);
1932         }
1933
1934         release_texture_slots();
1935         release_ubo_slots();
1936
1937         drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
1938         drw_stencil_set(shgroup->stencil_mask);
1939
1940         /* Binding Uniform */
1941         /* Don't check anything, Interface should already contain the least uniform as possible */
1942         for (DRWUniform *uni = interface->uniforms; uni; uni = uni->next) {
1943                 switch (uni->type) {
1944                         case DRW_UNIFORM_SHORT_TO_INT:
1945                                 val = (int)*((short *)uni->value);
1946                                 GPU_shader_uniform_vector_int(
1947                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
1948                                 break;
1949                         case DRW_UNIFORM_SHORT_TO_FLOAT:
1950                                 fval = (float)*((short *)uni->value);
1951                                 GPU_shader_uniform_vector(
1952                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
1953                                 break;
1954                         case DRW_UNIFORM_BOOL:
1955                         case DRW_UNIFORM_INT:
1956                                 GPU_shader_uniform_vector_int(
1957                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
1958                                 break;
1959                         case DRW_UNIFORM_FLOAT:
1960                         case DRW_UNIFORM_MAT3:
1961                         case DRW_UNIFORM_MAT4:
1962                                 GPU_shader_uniform_vector(
1963                                         shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
1964                                 break;
1965                         case DRW_UNIFORM_TEXTURE:
1966                                 tex = (GPUTexture *)uni->value;
1967                                 BLI_assert(tex);
1968                                 bind_texture(tex);
1969                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1970                                 break;
1971                         case DRW_UNIFORM_BUFFER:
1972                                 if (!DRW_state_is_fbo()) {
1973                                         break;
1974                                 }
1975                                 tex = *((GPUTexture **)uni->value);
1976                                 BLI_assert(tex);
1977                                 bind_texture(tex);
1978                                 GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
1979                                 break;
1980                         case DRW_UNIFORM_BLOCK:
1981                                 ubo = (GPUUniformBuffer *)uni->value;
1982                                 bind_ubo(ubo);
1983                                 GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
1984                                 break;
1985                 }
1986         }
1987
1988 #ifdef USE_GPU_SELECT
1989         /* use the first item because of selection we only ever add one */
1990 #  define GPU_SELECT_LOAD_IF_PICKSEL(_call) \
1991         if ((G.f & G_PICKSEL) && (_call)) { \
1992                 GPU_select_load_id((_call)->head.select_id); \
1993         } ((void)0)
1994
1995 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count)  \
1996         _start = 0;                                                      \
1997         _count = _shgroup->interface.instance_count;                     \
1998         int *select_id = NULL;                                           \
1999         if (G.f & G_PICKSEL) {                                           \
2000                 if (_shgroup->interface.override_selectid == -1) {                        \
2001                         select_id = DRW_instance_data_get(_shgroup->interface.inst_selectid); \
2002                         switch (_shgroup->type) {                                             \
2003                                 case DRW_SHG_TRIANGLE_BATCH: _count = 3; break;                   \
2004                                 case DRW_SHG_LINE_BATCH: _count = 2; break;                       \
2005                                 default: _count = 1; break;                                       \
2006                         }                                                                     \
2007                 }                                                                         \
2008                 else {                                                                    \
2009                         GPU_select_load_id(_shgroup->interface.override_selectid);            \
2010                 }                                                                         \
2011         }                                                                \
2012         while (_start < _shgroup->interface.instance_count) {            \
2013                 if (select_id) {                                             \
2014                         GPU_select_load_id(select_id[_start]);                   \
2015                 }
2016
2017 # define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
2018                 _start += _count;                                    \
2019         }
2020
2021 #else
2022 #  define GPU_SELECT_LOAD_IF_PICKSEL(call)
2023 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2024 #  define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
2025         _start = 0;                                                     \
2026         _count = _shgroup->interface.instance_count;
2027
2028 #endif
2029
2030         /* Rendering Calls */
2031         if (!is_normal) {
2032                 /* Replacing multiple calls with only one */
2033                 float obmat[4][4];
2034                 unit_m4(obmat);
2035
2036                 if (shgroup->type == DRW_SHG_INSTANCE &&
2037                     (interface->instance_count > 0 || interface->instance_batch != NULL))
2038                 {
2039                         if (interface->instance_batch != NULL) {
2040                                 GPU_SELECT_LOAD_IF_PICKSEL((DRWCall *)shgroup->calls_first);
2041                                 draw_geometry(shgroup, shgroup->instance_geom, obmat, shgroup->instance_data, 0, 0);
2042                         }
2043                         else {
2044                                 unsigned int count, start;
2045                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2046                                 {
2047                                         draw_geometry(shgroup, shgroup->instance_geom, obmat, shgroup->instance_data, start, count);
2048                                 }
2049                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2050                         }
2051                 }
2052                 else {
2053                         /* Some dynamic batch can have no geom (no call to aggregate) */
2054                         if (shgroup->batch_geom) {
2055                                 unsigned int count, start;
2056                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
2057                                 {
2058                                         draw_geometry(shgroup, shgroup->batch_geom, obmat, NULL, start, count);
2059                                 }
2060                                 GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
2061                         }
2062                 }
2063         }
2064         else {
2065                 for (DRWCall *call = shgroup->calls_first; call; call = call->head.prev) {
2066                         bool neg_scale = is_negative_m4(call->obmat);
2067
2068                         /* Negative scale objects */
2069                         if (neg_scale) {
2070                                 glFrontFace(DST.backface);
2071                         }
2072
2073                         GPU_SELECT_LOAD_IF_PICKSEL(call);
2074
2075                         if (call->head.type == DRW_CALL_SINGLE) {
2076                                 draw_geometry(shgroup, call->geometry, call->obmat, call->ob_data, 0, 0);
2077                         }
2078                         else {
2079                                 BLI_assert(call->head.type == DRW_CALL_GENERATE);
2080                                 DRWCallGenerate *callgen = ((DRWCallGenerate *)call);
2081                                 draw_geometry_prepare(shgroup, callgen->obmat, NULL, NULL);
2082                                 callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data);
2083                         }
2084
2085                         /* Reset state */
2086                         if (neg_scale) {
2087                                 glFrontFace(DST.frontface);
2088                         }
2089                 }
2090         }
2091
2092         /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
2093         DRW_state_reset();
2094 }
2095
2096 static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2097 {
2098         /* Start fresh */
2099         DST.shader = NULL;
2100
2101         drw_state_set(pass->state);
2102
2103         DRW_stats_query_start(pass->name);
2104
2105         for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
2106                 draw_shgroup(shgroup, pass->state);
2107                 /* break if upper limit */
2108                 if (shgroup == end_group) {
2109                         break;
2110                 }
2111         }
2112
2113         /* Clear Bound textures */
2114         for (int i = 0; i < GPU_max_textures(); i++) {
2115                 if (RST.bound_texs[i] != NULL) {
2116                         GPU_texture_unbind(RST.bound_texs[i]);
2117                         RST.bound_texs[i] = NULL;
2118                 }
2119         }
2120
2121         if (DST.shader) {
2122                 GPU_shader_unbind();
2123                 DST.shader = NULL;
2124         }
2125
2126         DRW_stats_query_end();
2127 }
2128
2129 void DRW_draw_pass(DRWPass *pass)
2130 {
2131         drw_draw_pass_ex(pass, pass->shgroups, pass->shgroups_last);
2132 }
2133
2134 /* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
2135 void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
2136 {
2137         drw_draw_pass_ex(pass, start_group, end_group);
2138 }
2139
2140 void DRW_draw_callbacks_pre_scene(void)
2141 {
2142         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2143
2144         gpuLoadProjectionMatrix(rv3d->winmat);
2145         gpuLoadMatrix(rv3d->viewmat);
2146 }
2147
2148 void DRW_draw_callbacks_post_scene(void)
2149 {
2150         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2151
2152         gpuLoadProjectionMatrix(rv3d->winmat);
2153         gpuLoadMatrix(rv3d->viewmat);
2154 }
2155
2156 /* Reset state to not interfer with other UI drawcall */
2157 void DRW_state_reset_ex(DRWState state)
2158 {
2159         DST.state = ~state;
2160         drw_state_set(state);
2161 }
2162
2163 void DRW_state_reset(void)
2164 {
2165         /* Reset blending function */
2166         glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
2167
2168         DRW_state_reset_ex(DRW_STATE_DEFAULT);
2169 }
2170
2171 /* NOTE : Make sure to reset after use! */
2172 void DRW_state_invert_facing(void)
2173 {
2174         SWAP(GLenum, DST.backface, DST.frontface);
2175         glFrontFace(DST.frontface);
2176 }
2177
2178 /**
2179  * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
2180  * and if the shaders have support for it (see usage of gl_ClipDistance).
2181  * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
2182  **/
2183 void DRW_state_clip_planes_add(float plane_eq[4])
2184 {
2185         copy_v4_v4(DST.clip_planes_eq[DST.num_clip_planes++], plane_eq);
2186 }
2187
2188 void DRW_state_clip_planes_reset(void)
2189 {
2190         DST.num_clip_planes = 0;
2191 }
2192
2193 /** \} */
2194
2195
2196 struct DRWTextStore *DRW_text_cache_ensure(void)
2197 {
2198         BLI_assert(DST.text_store_p);
2199         if (*DST.text_store_p == NULL) {
2200                 *DST.text_store_p = DRW_text_cache_create();
2201         }
2202         return *DST.text_store_p;
2203 }
2204
2205
2206 /* -------------------------------------------------------------------- */
2207
2208 /** \name Settings
2209  * \{ */
2210
2211 bool DRW_object_is_renderable(Object *ob)
2212 {
2213         Scene *scene = DST.draw_ctx.scene;
2214         Object *obedit = scene->obedit;
2215
2216         BLI_assert(BKE_object_is_visible(ob, OB_VISIBILITY_CHECK_UNKNOWN_RENDER_MODE));
2217
2218         if (ob->type == OB_MESH) {
2219                 if (ob == obedit) {
2220                         IDProperty *props = BKE_layer_collection_engine_evaluated_get(ob, COLLECTION_MODE_EDIT, "");
2221                         bool do_show_occlude_wire = BKE_collection_engine_property_value_get_bool(props, "show_occlude_wire");
2222                         if (do_show_occlude_wire) {
2223                                 return false;
2224                         }
2225                         bool do_show_weight = BKE_collection_engine_property_value_get_bool(props, "show_weight");
2226                         if (do_show_weight) {
2227                                 return false;
2228                         }
2229                 }
2230         }
2231
2232         return true;
2233 }
2234
2235 /**
2236  * Return whether this object is visible depending if
2237  * we are rendering or drawing in the viewport.
2238  */
2239 bool DRW_check_object_visible_within_active_context(Object *ob)
2240 {
2241         const eObjectVisibilityCheck mode = DRW_state_is_scene_render() ?
2242                                              OB_VISIBILITY_CHECK_FOR_RENDER :
2243                                              OB_VISIBILITY_CHECK_FOR_VIEWPORT;
2244         return BKE_object_is_visible(ob, mode);
2245 }
2246
2247 bool DRW_object_is_flat_normal(const Object *ob)
2248 {
2249         if (ob->type == OB_MESH) {
2250                 const Mesh *me = ob->data;
2251                 if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
2252                         return false;
2253                 }
2254         }
2255         return true;
2256 }
2257
2258 /**
2259  * Return true if the object has its own draw mode.
2260  * Caller must check this is active */
2261 int DRW_object_is_mode_shade(const Object *ob)
2262 {
2263         BLI_assert(ob == DST.draw_ctx.obact);
2264         if ((DST.draw_ctx.object_mode & OB_MODE_EDIT) == 0) {
2265                 if (DST.draw_ctx.object_mode & (OB_MODE_VERTEX_PAINT | OB_MODE_WEIGHT_PAINT | OB_MODE_TEXTURE_PAINT)) {
2266                         if ((DST.draw_ctx.v3d->flag2 & V3D_SHOW_MODE_SHADE_OVERRIDE) == 0) {
2267                                 return true;
2268                         }
2269                         else {
2270                                 return false;
2271                         }
2272                 }
2273         }
2274         return -1;
2275 }
2276
2277 /** \} */
2278
2279
2280 /* -------------------------------------------------------------------- */
2281
2282 /** \name Framebuffers (DRW_framebuffer)
2283  * \{ */
2284
2285 static GPUTextureFormat convert_tex_format(
2286         int fbo_format,
2287         int *r_channels, bool *r_is_depth)
2288 {
2289         *r_is_depth = ELEM(fbo_format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
2290
2291         switch (fbo_format) {
2292                 case DRW_TEX_R_16:     *r_channels = 1; return GPU_R16F;
2293                 case DRW_TEX_R_32:     *r_channels = 1; return GPU_R32F;
2294                 case DRW_TEX_RG_8:     *r_channels = 2; return GPU_RG8;
2295                 case DRW_TEX_RG_16:    *r_channels = 2; return GPU_RG16F;
2296                 case DRW_TEX_RG_16I:   *r_channels = 2; return GPU_RG16I;
2297                 case DRW_TEX_RG_32:    *r_channels = 2; return GPU_RG32F;
2298                 case DRW_TEX_RGBA_8:   *r_channels = 4; return GPU_RGBA8;
2299                 case DRW_TEX_RGBA_16:  *r_channels = 4; return GPU_RGBA16F;
2300                 case DRW_TEX_RGBA_32:  *r_channels = 4; return GPU_RGBA32F;
2301                 case DRW_TEX_DEPTH_16: *r_channels = 1; return GPU_DEPTH_COMPONENT16;
2302                 case DRW_TEX_DEPTH_24: *r_channels = 1; return GPU_DEPTH_COMPONENT24;
2303                 case DRW_TEX_DEPTH_24_STENCIL_8: *r_channels = 1; return GPU_DEPTH24_STENCIL8;
2304                 case DRW_TEX_DEPTH_32: *r_channels = 1; return GPU_DEPTH_COMPONENT32F;
2305                 case DRW_TEX_RGB_11_11_10: *r_channels = 3; return GPU_R11F_G11F_B10F;
2306                 default:
2307                         BLI_assert(false && "Texture format unsupported as render target!");
2308                         *r_channels = 4; return GPU_RGBA8;
2309         }
2310 }
2311
2312 struct GPUFrameBuffer *DRW_framebuffer_create(void)
2313 {
2314         return GPU_framebuffer_create();
2315 }
2316
2317 void DRW_framebuffer_init(
2318         struct GPUFrameBuffer **fb, void *engine_type, int width, int height,
2319         DRWFboTexture textures[MAX_FBO_TEX], int textures_len)
2320 {
2321         BLI_assert(textures_len <= MAX_FBO_TEX);
2322         BLI_assert(width > 0 && height > 0);
2323
2324         bool create_fb = false;
2325         int color_attachment = -1;
2326
2327         if (!*fb) {
2328                 *fb = GPU_framebuffer_create();
2329                 create_fb = true;
2330         }
2331
2332         for (int i = 0; i < textures_len; ++i) {
2333                 int channels;
2334                 bool is_depth;
2335                 bool create_tex = false;
2336
2337                 DRWFboTexture fbotex = textures[i];
2338                 bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0;
2339
2340                 GPUTextureFormat gpu_format = convert_tex_format(fbotex.format, &channels, &is_depth);
2341
2342                 if (!*fbotex.tex || is_temp) {
2343                         /* Temp textures need to be queried each frame, others not. */
2344                         if (is_temp) {
2345                                 *fbotex.tex = GPU_viewport_texture_pool_query(
2346                                         DST.viewport, engine_type, width, height, channels, gpu_format);
2347                         }
2348                         else {
2349                                 *fbotex.tex = GPU_texture_create_2D_custom(
2350                                         width, height, channels, gpu_format, NULL, NULL);
2351                                 create_tex = true;
2352                         }
2353                 }
2354
2355                 if (!is_depth) {
2356                         ++color_attachment;
2357                 }
2358
2359                 if (create_fb || create_tex) {
2360                         drw_texture_set_parameters(*fbotex.tex, fbotex.flag);
2361                         GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0);
2362                 }
2363         }
2364
2365         if (create_fb && (textures_len > 0)) {
2366                 if (!GPU_framebuffer_check_valid(*fb, NULL)) {
2367                         printf("Error invalid framebuffer\n");
2368                 }
2369
2370                 /* Detach temp textures */
2371                 for (int i = 0; i < textures_len; ++i) {
2372                         DRWFboTexture fbotex = textures[i];
2373
2374                         if ((fbotex.flag & DRW_TEX_TEMP) != 0) {
2375                                 GPU_framebuffer_texture_detach(*fbotex.tex);
2376                         }
2377                 }
2378
2379                 if (DST.default_framebuffer != NULL) {
2380                         GPU_framebuffer_bind(DST.default_framebuffer);
2381                 }
2382         }
2383 }
2384
2385 void DRW_framebuffer_free(struct GPUFrameBuffer *fb)
2386 {
2387         GPU_framebuffer_free(fb);
2388 }
2389
2390 void DRW_framebuffer_bind(struct GPUFrameBuffer *fb)
2391 {
2392         GPU_framebuffer_bind(fb);
2393 }
2394
2395 void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth)
2396 {
2397         if (color) {
2398                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
2399                 glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]);
2400         }
2401         if (depth) {
2402                 glDepthMask(GL_TRUE);
2403                 glClearDepth(clear_depth);
2404         }
2405         if (stencil) {
2406                 glStencilMask(0xFF);
2407         }
2408         glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) |
2409                 ((depth) ? GL_DEPTH_BUFFER_BIT : 0) |
2410                 ((stencil) ? GL_STENCIL_BUFFER_BIT : 0));
2411 }
2412
2413 void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data)
2414 {
2415         GLenum type;
2416         switch (channels) {
2417                 case 1: type = GL_RED; break;
2418                 case 2: type = GL_RG; break;
2419                 case 3: type = GL_RGB; break;
2420                 case 4: type = GL_RGBA; break;
2421                 default:
2422                         BLI_assert(false && "wrong number of read channels");
2423                         return;
2424         }
2425         glReadBuffer(GL_COLOR_ATTACHMENT0 + slot);
2426         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2427 }
2428
2429 void DRW_framebuffer_read_depth(int x, int y, int w, int h, float *data)
2430 {
2431         GLenum type = GL_DEPTH_COMPONENT;
2432
2433         glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */
2434         glReadPixels(x, y, w, h, type, GL_FLOAT, data);
2435 }
2436
2437 void DRW_framebuffer_texture_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip)
2438 {
2439         GPU_framebuffer_texture_attach(fb, tex, slot, mip);
2440 }
2441
2442 void DRW_framebuffer_texture_layer_attach(struct GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip)
2443 {
2444         GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip);
2445 }
2446
2447 void DRW_framebuffer_cubeface_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip)
2448 {
2449         GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip);
2450 }
2451
2452 void DRW_framebuffer_texture_detach(GPUTexture *tex)
2453 {
2454         GPU_framebuffer_texture_detach(tex);
2455 }
2456
2457 void DRW_framebuffer_blit(struct GPUFrameBuffer *fb_read, struct GPUFrameBuffer *fb_write, bool depth, bool stencil)
2458 {
2459         GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil);
2460 }
2461
2462 void DRW_framebuffer_recursive_downsample(
2463         struct GPUFrameBuffer *fb, struct GPUTexture *tex, int num_iter,
2464         void (*callback)(void *userData, int level), void *userData)
2465 {
2466         GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData);
2467 }
2468
2469 void DRW_framebuffer_viewport_size(struct GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h)
2470 {
2471         glViewport(x, y, w, h);
2472 }
2473
2474 /* Use color management profile to draw texture to framebuffer */
2475 void DRW_transform_to_display(GPUTexture *tex)
2476 {
2477         drw_state_set(DRW_STATE_WRITE_COLOR);
2478
2479         Gwn_VertFormat *vert_format = immVertexFormat();
2480         unsigned int pos = GWN_vertformat_attr_add(vert_format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2481         unsigned int texco = GWN_vertformat_attr_add(vert_format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
2482
2483         const float dither = 1.0f;
2484
2485         bool use_ocio = false;
2486
2487         /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2488         if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
2489                 Scene *scene = DST.draw_ctx.scene;
2490                 use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
2491                         &scene->view_settings, &scene->display_settings, NULL, dither, false);
2492         }
2493
2494         if (!use_ocio) {
2495                 /* View transform is already applied for offscreen, don't apply again, see: T52046 */
2496                 if (DST.options.is_image_render && !DST.options.is_scene_render) {
2497                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
2498                         immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
2499                 }
2500                 else {
2501                         immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
2502                 }
2503                 immUniform1i("image", 0);
2504         }
2505
2506         GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
2507
2508         float mat[4][4];
2509         unit_m4(mat);
2510         immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
2511
2512         /* Full screen triangle */
2513         immBegin(GWN_PRIM_TRIS, 3);
2514         immAttrib2f(texco, 0.0f, 0.0f);
2515         immVertex2f(pos, -1.0f, -1.0f);
2516
2517         immAttrib2f(texco, 2.0f, 0.0f);
2518         immVertex2f(pos, 3.0f, -1.0f);
2519
2520         immAttrib2f(texco, 0.0f, 2.0f);
2521         immVertex2f(pos, -1.0f, 3.0f);
2522         immEnd();
2523
2524         GPU_texture_unbind(tex);
2525
2526         if (use_ocio) {
2527                 IMB_colormanagement_finish_glsl_draw();
2528         }
2529         else {
2530                 immUnbindProgram();
2531         }
2532 }
2533
2534 /** \} */
2535
2536
2537 /* -------------------------------------------------------------------- */
2538
2539 /** \name Viewport (DRW_viewport)
2540  * \{ */
2541
2542 static void *DRW_viewport_engine_data_ensure(void *engine_type)
2543 {
2544         void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
2545
2546         if (data == NULL) {
2547                 data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
2548         }
2549         return data;
2550 }
2551
2552 void DRW_engine_viewport_data_size_get(
2553         const void *engine_type_v,
2554         int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
2555 {
2556         const DrawEngineType *engine_type = engine_type_v;
2557
2558         if (r_fbl_len) {
2559                 *r_fbl_len = engine_type->vedata_size->fbl_len;
2560         }
2561         if (r_txl_len) {
2562                 *r_txl_len = engine_type->vedata_size->txl_len;
2563         }
2564         if (r_psl_len) {
2565                 *r_psl_len = engine_type->vedata_size->psl_len;
2566         }
2567         if (r_stl_len) {
2568                 *r_stl_len = engine_type->vedata_size->stl_len;
2569         }
2570 }
2571
2572 const float *DRW_viewport_size_get(void)
2573 {
2574         return &DST.size[0];
2575 }
2576
2577 const float *DRW_viewport_screenvecs_get(void)
2578 {
2579         return &DST.screenvecs[0][0];
2580 }
2581
2582 const float *DRW_viewport_pixelsize_get(void)
2583 {
2584         return &DST.pixsize;
2585 }
2586
2587 static void drw_viewport_cache_resize(void)
2588 {
2589         /* Release the memiter before clearing the mempools that references them */
2590         GPU_viewport_cache_release(DST.viewport);
2591
2592         if (DST.vmempool != NULL) {
2593                 BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_count(DST.vmempool->calls));
2594                 BLI_mempool_clear_ex(DST.vmempool->calls_generate, BLI_mempool_count(DST.vmempool->calls_generate));
2595                 BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_count(DST.vmempool->shgroups));
2596                 BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_count(DST.vmempool->uniforms));
2597                 BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_count(DST.vmempool->passes));
2598         }
2599
2600         DRW_instance_data_list_free_unused(DST.idatalist);
2601         DRW_instance_data_list_resize(DST.idatalist);
2602 }
2603
2604 /* It also stores viewport variable to an immutable place: DST
2605  * This is because a cache uniform only store reference
2606  * to its value. And we don't want to invalidate the cache
2607  * if this value change per viewport */
2608 static void drw_viewport_var_init(void)
2609 {
2610         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2611
2612         /* Refresh DST.size */
2613         if (DST.viewport) {
2614                 int size[2];
2615                 GPU_viewport_size_get(DST.viewport, size);
2616                 DST.size[0] = size[0];
2617                 DST.size[1] = size[1];
2618
2619                 DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
2620                 DST.default_framebuffer = fbl->default_fb;
2621
2622                 DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
2623
2624                 if (DST.vmempool->calls == NULL) {
2625                         DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
2626                 }
2627                 if (DST.vmempool->calls_generate == NULL) {
2628                         DST.vmempool->calls_generate = BLI_mempool_create(sizeof(DRWCallGenerate), 0, 512, 0);
2629                 }
2630                 if (DST.vmempool->shgroups == NULL) {
2631                         DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
2632                 }
2633                 if (DST.vmempool->uniforms == NULL) {
2634                         DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
2635                 }
2636                 if (DST.vmempool->passes == NULL) {
2637                         DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
2638                 }
2639
2640                 DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
2641                 DRW_instance_data_list_reset(DST.idatalist);
2642         }
2643         else {
2644                 DST.size[0] = 0;
2645                 DST.size[1] = 0;
2646
2647                 DST.default_framebuffer = NULL;
2648                 DST.vmempool = NULL;
2649         }
2650
2651         if (rv3d != NULL) {
2652                 /* Refresh DST.screenvecs */
2653                 copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
2654                 copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
2655                 normalize_v3(DST.screenvecs[0]);
2656                 normalize_v3(DST.screenvecs[1]);
2657
2658                 /* Refresh DST.pixelsize */
2659                 DST.pixsize = rv3d->pixsize;
2660         }
2661
2662         /* Reset facing */
2663         DST.frontface = GL_CCW;
2664         DST.backface = GL_CW;
2665         glFrontFace(DST.frontface);
2666
2667         if (DST.draw_ctx.scene->obedit) {
2668                 ED_view3d_init_mats_rv3d(DST.draw_ctx.scene->obedit, rv3d);
2669         }
2670
2671         /* Alloc array of texture reference. */
2672         if (RST.bound_texs == NULL) {
2673                 RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
2674         }
2675         if (RST.bound_tex_slots == NULL) {
2676                 RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots");
2677         }
2678
2679         memset(viewport_matrix_override.override, 0x0, sizeof(viewport_matrix_override.override));
2680         memset(DST.common_instance_data, 0x0, sizeof(DST.common_instance_data));
2681
2682         /* Not a viewport variable, we could split this out. */
2683         {
2684                 if (DST.draw_ctx.object_mode & OB_MODE_POSE) {
2685                         DST.draw_ctx.object_pose = DST.draw_ctx.obact;
2686                 }
2687                 else if (DST.draw_ctx.object_mode & OB_MODE_WEIGHT_PAINT) {
2688                         DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
2689                 }
2690                 else {
2691                         DST.draw_ctx.object_pose = NULL;
2692                 }
2693         }
2694 }
2695
2696 void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
2697 {
2698         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2699         BLI_assert(type >= DRW_MAT_PERS && type <= DRW_MAT_WININV);
2700
2701         if (viewport_matrix_override.override[type]) {
2702                 copy_m4_m4(mat, viewport_matrix_override.mat[type]);
2703         }
2704         else {
2705                 BLI_assert(rv3d != NULL); /* Can't use this in render mode. */
2706                 switch (type) {
2707                         case DRW_MAT_PERS:
2708                                 copy_m4_m4(mat, rv3d->persmat);
2709                                 break;
2710                         case DRW_MAT_PERSINV:
2711                                 copy_m4_m4(mat, rv3d->persinv);
2712                                 break;
2713                         case DRW_MAT_VIEW:
2714                                 copy_m4_m4(mat, rv3d->viewmat);
2715                                 break;
2716                         case DRW_MAT_VIEWINV:
2717                                 copy_m4_m4(mat, rv3d->viewinv);
2718                                 break;
2719                         case DRW_MAT_WIN:
2720                                 copy_m4_m4(mat, rv3d->winmat);
2721                                 break;
2722                         case DRW_MAT_WININV:
2723                                 invert_m4_m4(mat, rv3d->winmat);
2724                                 break;
2725                         default:
2726                                 BLI_assert(!"Matrix type invalid");
2727                                 break;
2728                 }
2729         }
2730 }
2731
2732 void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type)
2733 {
2734         copy_m4_m4(viewport_matrix_override.mat[type], mat);
2735         viewport_matrix_override.override[type] = true;
2736 }
2737
2738 void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
2739 {
2740         viewport_matrix_override.override[type] = false;
2741 }
2742
2743 bool DRW_viewport_is_persp_get(void)
2744 {
2745         RegionView3D *rv3d = DST.draw_ctx.rv3d;
2746         if (rv3d) {
2747                 return rv3d->is_persp;
2748         }
2749         else {
2750                 if (viewport_matrix_override.override[DRW_MAT_WIN]) {
2751                         return viewport_matrix_override.mat[DRW_MAT_WIN][3][3] == 0.0f;
2752                 }
2753         }
2754         BLI_assert(0);
2755         return false;
2756 }
2757
2758 DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
2759 {
2760         return GPU_viewport_framebuffer_list_get(DST.viewport);
2761 }
2762
2763 DefaultTextureList *DRW_viewport_texture_list_get(void)
2764 {
2765         return GPU_viewport_texture_list_get(DST.viewport);
2766 }
2767
2768 void DRW_viewport_request_redraw(void)
2769 {
2770         GPU_viewport_tag_update(DST.viewport);
2771 }
2772
2773 /** \} */
2774
2775
2776 /* -------------------------------------------------------------------- */
2777 /** \name ViewLayers (DRW_scenelayer)
2778  * \{ */
2779
2780 void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
2781 {
2782         for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2783                 if (sled->engine_type == engine_type) {
2784                         return sled->storage;
2785                 }
2786         }
2787         return NULL;
2788 }
2789
2790 void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
2791 {
2792         ViewLayerEngineData *sled;
2793
2794         for (sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
2795                 if (sled->engine_type == engine_type) {
2796                         return &sled->storage;
2797                 }
2798         }
2799
2800         sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
2801         sled->engine_type = engine_type;
2802         sled->free = callback;
2803         BLI_addtail(&DST.draw_ctx.view_layer->drawdata, sled);
2804
2805         return &sled->storage;
2806 }
2807
2808 /** \} */
2809
2810
2811 /* -------------------------------------------------------------------- */
2812
2813 /** \name Objects (DRW_object)
2814  * \{ */
2815
2816 ObjectEngineData *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type)
2817 {
2818         for (ObjectEngineData *oed = ob->drawdata.first; oed; oed = oed->next) {
2819                 if (oed->engine_type == engine_type) {
2820                         return oed;
2821                 }
2822         }
2823         return NULL;
2824 }
2825
2826 ObjectEngineData *DRW_object_engine_data_ensure(
2827         Object *ob,
2828         DrawEngineType *engine_type,
2829         size_t size,
2830         ObjectEngineDataInitCb init_cb,
2831         ObjectEngineDataFreeCb free_cb)
2832 {
2833         BLI_assert(size >= sizeof(ObjectEngineData));
2834         /* Try to re-use existing data. */
2835         ObjectEngineData *oed = DRW_object_engine_data_get(ob, engine_type);
2836         if (oed != NULL) {
2837                 return oed;
2838         }
2839         /* Allocate new data. */
2840         if ((ob->base_flag & BASE_FROMDUPLI) != 0) {
2841                 /* NOTE: data is not persistent in this case. It is reset each redraw. */
2842                 BLI_assert(free_cb == NULL); /* No callback allowed. */
2843                 /* Round to sizeof(float) for DRW_instance_data_request(). */
2844                 const size_t t = sizeof(float) - 1;
2845                 size = (size + t) & ~t;
2846                 size_t fsize = size / sizeof(float);
2847                 if (DST.common_instance_data[fsize] == NULL) {
2848                         DST.common_instance_data[fsize] = DRW_instance_data_request(DST.idatalist, fsize, 16);
2849                 }
2850                 oed = (ObjectEngineData *)DRW_instance_data_next(DST.common_instance_data[fsize]);
2851                 memset(oed, 0, size);
2852         }
2853         else {
2854                 oed = MEM_callocN(size, "ObjectEngineData");
2855         }
2856         oed->engine_type = engine_type;
2857         oed->free = free_cb;
2858         /* Perform user-side initialization, if needed. */
2859         if (init_cb != NULL) {
2860                 init_cb(oed);
2861         }
2862         /* Register in the list. */
2863         BLI_addtail(&ob->drawdata, oed);
2864         return oed;
2865 }
2866
2867 /* XXX There is definitly some overlap between this and DRW_object_engine_data_ensure.
2868  * We should get rid of one of the two. */
2869 LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, RenderEngineType *engine_type)
2870 {
2871         BLI_assert(ob->type == OB_LAMP);
2872
2873         Scene *scene = DST.draw_ctx.scene;
2874
2875         /* TODO Dupliobjects */
2876         /* TODO Should be per scenelayer */
2877         return GPU_lamp_engine_data_get(scene, ob, NULL, engine_type);
2878 }
2879
2880 void DRW_lamp_engine_data_free(LampEngineData *led)
2881 {
2882         GPU_lamp_engine_data_free(led);
2883 }
2884
2885 /** \} */
2886
2887
2888 /* -------------------------------------------------------------------- */
2889
2890 /** \name Rendering (DRW_engines)
2891  * \{ */
2892
2893 static void drw_engines_init(void)
2894 {
2895         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2896                 DrawEngineType *engine = link->data;
2897                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2898                 PROFILE_START(stime);
2899
2900                 if (engine->engine_init) {
2901                         engine->engine_init(data);
2902                 }
2903
2904                 PROFILE_END_UPDATE(data->init_time, stime);
2905         }
2906 }
2907
2908 static void drw_engines_cache_init(void)
2909 {
2910         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2911                 DrawEngineType *engine = link->data;
2912                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2913
2914                 if (data->text_draw_cache) {
2915                         DRW_text_cache_destroy(data->text_draw_cache);
2916                         data->text_draw_cache = NULL;
2917                 }
2918                 if (DST.text_store_p == NULL) {
2919                         DST.text_store_p = &data->text_draw_cache;
2920                 }
2921
2922                 if (engine->cache_init) {
2923                         engine->cache_init(data);
2924                 }
2925         }
2926 }
2927
2928 static void drw_engines_cache_populate(Object *ob)
2929 {
2930         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2931                 DrawEngineType *engine = link->data;
2932                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2933
2934                 if (engine->id_update) {
2935                         engine->id_update(data, &ob->id);
2936                 }
2937
2938                 if (engine->cache_populate) {
2939                         engine->cache_populate(data, ob);
2940                 }
2941         }
2942 }
2943
2944 static void drw_engines_cache_finish(void)
2945 {
2946         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2947                 DrawEngineType *engine = link->data;
2948                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2949
2950                 if (engine->cache_finish) {
2951                         engine->cache_finish(data);
2952                 }
2953         }
2954 }
2955
2956 static void drw_engines_draw_background(void)
2957 {
2958         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2959                 DrawEngineType *engine = link->data;
2960                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2961
2962                 if (engine->draw_background) {
2963                         PROFILE_START(stime);
2964
2965                         DRW_stats_group_start(engine->idname);
2966                         engine->draw_background(data);
2967                         DRW_stats_group_end();
2968
2969                         PROFILE_END_UPDATE(data->background_time, stime);
2970                         return;
2971                 }
2972         }
2973
2974         /* No draw_background found, doing default background */
2975         if (DRW_state_draw_background()) {
2976                 DRW_draw_background();
2977         }
2978 }
2979
2980 static void drw_engines_draw_scene(void)
2981 {
2982         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
2983                 DrawEngineType *engine = link->data;
2984                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
2985                 PROFILE_START(stime);
2986
2987                 if (engine->draw_scene) {
2988                         DRW_stats_group_start(engine->idname);
2989                         engine->draw_scene(data);
2990                         DRW_stats_group_end();
2991                 }
2992
2993                 PROFILE_END_UPDATE(data->render_time, stime);
2994         }
2995 }
2996
2997 static void drw_engines_draw_text(void)
2998 {
2999         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3000                 DrawEngineType *engine = link->data;
3001                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3002                 PROFILE_START(stime);
3003
3004                 if (data->text_draw_cache) {
3005                         DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.v3d, DST.draw_ctx.ar, false);
3006                 }
3007
3008                 PROFILE_END_UPDATE(data->render_time, stime);
3009         }
3010 }
3011
3012 #define MAX_INFO_LINES 10
3013
3014 /**
3015  * Returns the offset required for the drawing of engines info.
3016  */
3017 int DRW_draw_region_engine_info_offset(void)
3018 {
3019         int lines = 0;
3020         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3021                 DrawEngineType *engine = link->data;
3022                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3023
3024                 /* Count the number of lines. */
3025                 if (data->info[0] != '\0') {
3026                         lines++;
3027                         char *c = data->info;
3028                         while (*c++ != '\0') {
3029                                 if (*c == '\n') {
3030                                         lines++;
3031                                 }
3032                         }
3033                 }
3034         }
3035         return MIN2(MAX_INFO_LINES, lines) * UI_UNIT_Y;
3036 }
3037
3038 /**
3039  * Actual drawing;
3040  */
3041 void DRW_draw_region_engine_info(void)
3042 {
3043         const char *info_array_final[MAX_INFO_LINES + 1];
3044         /* This should be maxium number of engines running at the same time. */
3045         char info_array[MAX_INFO_LINES][GPU_INFO_SIZE];
3046         int i = 0;
3047
3048         const DRWContextState *draw_ctx = DRW_context_state_get();
3049         ARegion *ar = draw_ctx->ar;
3050         float fill_color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
3051
3052         UI_GetThemeColor3fv(TH_HIGH_GRAD, fill_color);
3053         mul_v3_fl(fill_color, fill_color[3]);
3054
3055         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3056                 DrawEngineType *engine = link->data;
3057                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3058
3059                 if (data->info[0] != '\0') {
3060                         char *chr_current = data->info;
3061                         char *chr_start = chr_current;
3062                         int line_len = 0;
3063
3064                         while (*chr_current++ != '\0') {
3065                                 line_len++;
3066                                 if (*chr_current == '\n') {
3067                                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
3068                                         /* Re-start counting. */
3069                                         chr_start = chr_current + 1;
3070                                         line_len = -1;
3071                                 }
3072                         }
3073
3074                         BLI_strncpy(info_array[i++], chr_start, line_len + 1);
3075
3076                         if (i >= MAX_INFO_LINES) {
3077                                 break;
3078                         }
3079                 }
3080         }
3081
3082         for (int j = 0; j < i; j++) {
3083                 info_array_final[j] = info_array[j];
3084         }
3085         info_array_final[i] = NULL;
3086
3087         if (info_array[0] != NULL) {
3088                 ED_region_info_draw_multiline(ar, info_array_final, fill_color, true);
3089         }
3090 }
3091
3092 #undef MAX_INFO_LINES
3093
3094 static void use_drw_engine(DrawEngineType *engine)
3095 {
3096         LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
3097         ld->data = engine;
3098         BLI_addtail(&DST.enabled_engines, ld);
3099 }
3100
3101 /* TODO revisit this when proper layering is implemented */
3102 /* Gather all draw engines needed and store them in DST.enabled_engines
3103  * That also define the rendering order of engines */
3104 static void drw_engines_enable_from_engine(RenderEngineType *engine_type)
3105 {
3106         /* TODO layers */
3107         if (engine_type->draw_engine != NULL) {
3108                 use_drw_engine(engine_type->draw_engine);
3109         }
3110
3111         if ((engine_type->flag & RE_INTERNAL) == 0) {
3112                 drw_engines_enable_external();
3113         }
3114 }
3115
3116 static void drw_engines_enable_from_object_mode(void)
3117 {
3118         use_drw_engine(&draw_engine_object_type);
3119 }
3120
3121 static void drw_engines_enable_from_mode(int mode)
3122 {
3123         switch (mode) {
3124                 case CTX_MODE_EDIT_MESH:
3125                         use_drw_engine(&draw_engine_edit_mesh_type);
3126                         break;
3127                 case CTX_MODE_EDIT_CURVE:
3128                         use_drw_engine(&draw_engine_edit_curve_type);
3129                         break;
3130                 case CTX_MODE_EDIT_SURFACE:
3131                         use_drw_engine(&draw_engine_edit_surface_type);
3132                         break;
3133                 case CTX_MODE_EDIT_TEXT:
3134                         use_drw_engine(&draw_engine_edit_text_type);
3135                         break;
3136                 case CTX_MODE_EDIT_ARMATURE:
3137                         use_drw_engine(&draw_engine_edit_armature_type);
3138                         break;
3139                 case CTX_MODE_EDIT_METABALL:
3140                         use_drw_engine(&draw_engine_edit_metaball_type);
3141                         break;
3142                 case CTX_MODE_EDIT_LATTICE:
3143                         use_drw_engine(&draw_engine_edit_lattice_type);
3144                         break;
3145                 case CTX_MODE_POSE:
3146                         use_drw_engine(&draw_engine_pose_type);
3147                         break;
3148                 case CTX_MODE_SCULPT:
3149                         use_drw_engine(&draw_engine_sculpt_type);
3150                         break;
3151                 case CTX_MODE_PAINT_WEIGHT:
3152                         use_drw_engine(&draw_engine_pose_type);
3153                         use_drw_engine(&draw_engine_paint_weight_type);
3154                         break;
3155                 case CTX_MODE_PAINT_VERTEX:
3156                         use_drw_engine(&draw_engine_paint_vertex_type);
3157                         break;
3158                 case CTX_MODE_PAINT_TEXTURE:
3159                         use_drw_engine(&draw_engine_paint_texture_type);
3160                         break;
3161                 case CTX_MODE_PARTICLE:
3162                         use_drw_engine(&draw_engine_particle_type);
3163                         break;
3164                 case CTX_MODE_OBJECT:
3165                         break;
3166                 default:
3167                         BLI_assert(!"Draw mode invalid");
3168                         break;
3169         }
3170 }
3171
3172 /**
3173  * Use for select and depth-drawing.
3174  */
3175 static void drw_engines_enable_basic(void)
3176 {
3177         use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
3178 }
3179
3180 /**
3181  * Use for external render engines.
3182  */
3183 static void drw_engines_enable_external(void)
3184 {
3185         use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
3186 }
3187
3188 static void drw_engines_enable(const Scene *scene, ViewLayer *view_layer, RenderEngineType *engine_type)
3189 {
3190         Object *obact = OBACT(view_layer);
3191         const int mode = CTX_data_mode_enum_ex(scene->obedit, obact, DST.draw_ctx.object_mode);
3192
3193         drw_engines_enable_from_engine(engine_type);
3194
3195         if (DRW_state_draw_support()) {
3196                 drw_engines_enable_from_object_mode();
3197                 drw_engines_enable_from_mode(mode);
3198         }
3199 }
3200
3201 static void drw_engines_disable(void)
3202 {
3203         BLI_freelistN(&DST.enabled_engines);
3204 }
3205
3206 static unsigned int DRW_engines_get_hash(void)
3207 {
3208         unsigned int hash = 0;
3209         /* The cache depends on enabled engines */
3210         /* FIXME : if collision occurs ... segfault */
3211         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3212                 DrawEngineType *engine = link->data;
3213                 hash += BLI_ghashutil_strhash_p(engine->idname);
3214         }
3215
3216         return hash;
3217 }
3218
3219 static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
3220 {
3221         BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
3222                                rect->ymax - (3 + v) * U.widget_unit, 0.0f,
3223                                txt, size);
3224 }
3225
3226 /* CPU stats */
3227 static void drw_debug_cpu_stats(void)
3228 {
3229         int u, v;
3230         double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
3231         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3232         rcti rect;
3233         struct ARegion *ar = DST.draw_ctx.ar;
3234         ED_region_visible_rect(ar, &rect);
3235
3236         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3237
3238         /* row by row */
3239         v = 0; u = 0;
3240         /* Label row */
3241         char col_label[32];
3242         sprintf(col_label, "Engine");
3243         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3244         sprintf(col_label, "Init");
3245         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3246         sprintf(col_label, "Background");
3247         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3248         sprintf(col_label, "Render");
3249         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3250         sprintf(col_label, "Total (w/o cache)");
3251         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3252         v++;
3253
3254         /* Engines rows */
3255         char time_to_txt[16];
3256         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3257                 u = 0;
3258                 DrawEngineType *engine = link->data;
3259                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine);
3260
3261                 draw_stat(&rect, u++, v, engine->idname, sizeof(engine->idname));
3262
3263                 init_tot_time += data->init_time;
3264                 sprintf(time_to_txt, "%.2fms", data->init_time);
3265                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3266
3267                 background_tot_time += data->background_time;
3268                 sprintf(time_to_txt, "%.2fms", data->background_time);
3269                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3270
3271                 render_tot_time += data->render_time;
3272                 sprintf(time_to_txt, "%.2fms", data->render_time);
3273                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3274
3275                 tot_time += data->init_time + data->background_time + data->render_time;
3276                 sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
3277                 draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3278                 v++;
3279         }
3280
3281         /* Totals row */
3282         u = 0;
3283         sprintf(col_label, "Sub Total");
3284         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3285         sprintf(time_to_txt, "%.2fms", init_tot_time);
3286         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3287         sprintf(time_to_txt, "%.2fms", background_tot_time);
3288         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3289         sprintf(time_to_txt, "%.2fms", render_tot_time);
3290         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3291         sprintf(time_to_txt, "%.2fms", tot_time);
3292         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3293         v += 2;
3294
3295         u = 0;
3296         sprintf(col_label, "Cache Time");
3297         draw_stat(&rect, u++, v, col_label, sizeof(col_label));
3298         sprintf(time_to_txt, "%.2fms", DST.cache_time);
3299         draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt));
3300 }
3301
3302 /* Display GPU time for each passes */
3303 static void drw_debug_gpu_stats(void)
3304 {
3305         /* local coordinate visible rect inside region, to accomodate overlapping ui */
3306         rcti rect;
3307         struct ARegion *ar = DST.draw_ctx.ar;
3308         ED_region_visible_rect(ar, &rect);
3309
3310         UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
3311
3312         int v = BLI_listbase_count(&DST.enabled_engines) + 5;
3313
3314         char stat_string[32];
3315
3316         /* Memory Stats */
3317         unsigned int tex_mem = GPU_texture_memory_usage_get();
3318         unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
3319
3320         sprintf(stat_string, "GPU Memory");
3321         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3322         sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
3323         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3324         sprintf(stat_string, "   |--> Textures");
3325         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3326         sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
3327         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3328         sprintf(stat_string, "   |--> Meshes");
3329         draw_stat(&rect, 0, v, stat_string, sizeof(stat_string));
3330         sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
3331         draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string));
3332
3333         /* Pre offset for stats_draw */
3334         rect.ymax -= (3 + ++v) * U.widget_unit;
3335
3336         /* Rendering Stats */
3337         DRW_stats_draw(&rect);
3338 }
3339
3340 /* -------------------------------------------------------------------- */
3341
3342 /** \name View Update
3343  * \{ */
3344
3345 void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
3346 {
3347         RenderEngineType *engine_type = update_ctx->engine_type;
3348         ARegion *ar = update_ctx->ar;
3349         View3D *v3d = update_ctx->v3d;
3350         RegionView3D *rv3d = ar->regiondata;
3351         Depsgraph *depsgraph = update_ctx->depsgraph;
3352         Scene *scene = update_ctx->scene;
3353         ViewLayer *view_layer = update_ctx->view_layer;
3354
3355         if (rv3d->viewport == NULL) {
3356                 return;
3357         }
3358
3359
3360         /* Reset before using it. */
3361         memset(&DST, 0x0, sizeof(DST));
3362
3363         DST.viewport = rv3d->viewport;
3364         DST.draw_ctx = (DRWContextState){
3365                 ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, depsgraph, OB_MODE_OBJECT,
3366                 NULL,
3367         };
3368
3369         drw_engines_enable(scene, view_layer, engine_type);
3370
3371         for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
3372                 DrawEngineType *draw_engine = link->data;
3373                 ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine);
3374
3375                 if (draw_engine->view_update) {
3376                         draw_engine->view_update(data);
3377                 }
3378         }
3379
3380         DST.viewport = NULL;
3381
3382         drw_engines_disable();
3383 }
3384
3385 /** \} */