Cleanup: use 'uint' for GPU_select
[blender.git] / source / blender / gpu / intern / gpu_select_pick.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file blender/gpu/intern/gpu_select_pick.c
25  *  \ingroup gpu
26  *
27  * Custom select code for picking small regions (not efficient for large regions).
28  * `gpu_select_pick_*` API.
29  */
30 #include <string.h>
31 #include <stdlib.h>
32 #include <float.h>
33
34 #include "GPU_select.h"
35 #include "GPU_extensions.h"
36 #include "GPU_glew.h"
37  
38 #include "MEM_guardedalloc.h"
39
40 #include "BLI_rect.h"
41 #include "BLI_listbase.h"
42 #include "BLI_math_vector.h"
43 #include "BLI_utildefines.h"
44
45 #include "gpu_select_private.h"
46
47 #include "BLI_strict_flags.h"
48
49 /* #define DEBUG_PRINT */
50
51 /* Alloc number for depths */
52 #define ALLOC_DEPTHS 200
53
54 /* Z-depth of cleared depth buffer */
55 #define DEPTH_MAX 0xffffffff
56
57 /* ----------------------------------------------------------------------------
58  * SubRectStride
59  */
60
61 /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
62 typedef struct SubRectStride {
63         uint start;     /* start here */
64         uint span;      /* read these */
65         uint span_len;  /* len times (read span 'len' times). */
66         uint skip;      /* skip those */
67 } SubRectStride;
68
69 /* we may want to change back to float if uint isn't well supported */
70 typedef uint depth_t;
71
72 /**
73  * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).
74  *
75  * 'src' must be bigger than 'dst'.
76  */
77 static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
78 {
79         const int src_x = BLI_rcti_size_x(src);
80         // const int src_y = BLI_rcti_size_y(src);
81         const int dst_x = BLI_rcti_size_x(dst);
82         const int dst_y = BLI_rcti_size_y(dst);
83         const int x = dst->xmin - src->xmin;
84         const int y = dst->ymin - src->ymin;
85
86         BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin &&
87                    src->ymax >= dst->ymax && src->ymax >= dst->ymax);
88         BLI_assert(x >= 0 && y >= 0);
89
90         r_sub->start    = (uint)((src_x * y) + x);
91         r_sub->span     = (uint)dst_x;
92         r_sub->span_len = (uint)dst_y;
93         r_sub->skip     = (uint)(src_x - dst_x);
94 }
95
96 /**
97  * Ignore depth clearing as a change,
98  * only check if its been changed _and_ filled in (ignore clearing since XRAY does this).
99  */
100 BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
101 {
102         return (*prev != *curr) && (*curr != DEPTH_MAX);
103 }
104
105 /* ----------------------------------------------------------------------------
106  * DepthBufCache
107  *
108  * Result of reading glReadPixels,
109  * use for both cache and non-cached storage.
110  */
111
112 /* store result of glReadPixels */
113 typedef struct DepthBufCache {
114         struct DepthBufCache *next, *prev;
115         uint id;
116         depth_t buf[0];
117 } DepthBufCache;
118
119 static DepthBufCache *depth_buf_malloc(uint rect_len)
120 {
121         DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
122         rect->id = SELECT_ID_NONE;
123         return rect;
124 }
125
126 static bool depth_buf_rect_depth_any(
127         const DepthBufCache *rect_depth,
128         uint rect_len)
129 {
130         const depth_t *curr = rect_depth->buf;
131         for (uint i = 0; i < rect_len; i++, curr++) {
132                 if (*curr != DEPTH_MAX) {
133                         return true;
134                 }
135         }
136         return false;
137 }
138
139 static bool depth_buf_subrect_depth_any(
140         const DepthBufCache *rect_depth,
141         const SubRectStride *sub_rect)
142 {
143         const depth_t *curr = rect_depth->buf + sub_rect->start;
144         for (uint i = 0; i < sub_rect->span_len; i++) {
145                 const depth_t *curr_end = curr + sub_rect->span;
146                 for (; curr < curr_end; curr++, curr++) {
147                         if (*curr != DEPTH_MAX) {
148                                 return true;
149                         }
150                 }
151                 curr += sub_rect->skip;
152         }
153         return false;
154 }
155
156 static bool depth_buf_rect_depth_any_filled(
157         const DepthBufCache *rect_prev, const DepthBufCache *rect_curr,
158         uint rect_len)
159 {
160 #if 0
161         return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
162 #else
163         const depth_t *prev = rect_prev->buf;
164         const depth_t *curr = rect_curr->buf;
165         for (uint i = 0; i < rect_len; i++, curr++, prev++) {
166                 if (depth_is_filled(prev, curr)) {
167                         return true;
168                 }
169         }
170         return false;
171 #endif
172 }
173
174 /**
175  * Both buffers are the same size, just check if the sub-rect contains any differences.
176  */
177 static bool depth_buf_subrect_depth_any_filled(
178         const DepthBufCache *rect_src, const DepthBufCache *rect_dst,
179         const SubRectStride *sub_rect)
180 {
181         /* same as above but different rect sizes */
182         const depth_t *prev = rect_src->buf + sub_rect->start;
183         const depth_t *curr = rect_dst->buf + sub_rect->start;
184         for (uint i = 0; i < sub_rect->span_len; i++) {
185                 const depth_t *curr_end = curr + sub_rect->span;
186                 for (; curr < curr_end; prev++, curr++) {
187                         if (depth_is_filled(prev, curr)) {
188                                 return true;
189                         }
190                 }
191                 prev += sub_rect->skip;
192                 curr += sub_rect->skip;
193         }
194         return false;
195 }
196
197 /* ----------------------------------------------------------------------------
198  * DepthID
199  *
200  * Internal structure for storing hits.
201  */
202
203 typedef struct DepthID {
204         uint id;
205         depth_t depth;
206 } DepthID;
207
208 static int depth_id_cmp(const void *v1, const void *v2)
209 {
210         const DepthID *d1 = v1, *d2 = v2;
211         if (d1->id < d2->id) {
212                 return -1;
213         }
214         else if (d1->id > d2->id) {
215                 return 1;
216         }
217         else {
218                 return 0;
219         }
220 }
221
222 static int depth_cmp(const void *v1, const void *v2)
223 {
224         const DepthID *d1 = v1, *d2 = v2;
225         if (d1->depth < d2->depth) {
226                 return -1;
227         }
228         else if (d1->depth > d2->depth) {
229                 return 1;
230         }
231         else {
232                 return 0;
233         }
234 }
235
236 /* depth sorting */
237 typedef struct GPUPickState {
238         /* cache on initialization */
239         uint (*buffer)[4];
240
241         /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
242         uint bufsize;
243         /* mode of operation */
244         char mode;
245
246         /* OpenGL drawing, never use when (is_cached == true). */
247         struct {
248                 /* The current depth, accumulated as we draw */
249                 DepthBufCache *rect_depth;
250                 /* Scratch buffer, avoid allocs every time (when not caching) */
251                 DepthBufCache *rect_depth_test;
252
253                 /* Pass to glReadPixels (x, y, w, h) */
254                 int clip_readpixels[4];
255
256                 /* Set after first draw */
257                 bool is_init;
258                 bool is_finalized;
259                 uint prev_id;
260         } gl;
261
262         /* src: data stored in 'cache' and 'gl',
263          * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
264         struct {
265                 rcti clip_rect;
266                 uint rect_len;
267         } src, dst;
268
269         /* Store cache between `GPU_select_cache_begin/end` */
270         bool use_cache;
271         bool is_cached;
272         struct {
273                 /* Cleanup used for iterating over both source and destination buffers:
274                  * src.clip_rect -> dst.clip_rect */
275                 SubRectStride sub_rect;
276
277                 /* List of DepthBufCache, sized of 'src.clip_rect' */
278                 ListBase bufs;
279         } cache;
280
281         /* Pickign methods */
282         union {
283                 /* GPU_SELECT_PICK_ALL */
284                 struct {
285                         DepthID *hits;
286                         uint hits_len;
287                         uint hits_len_alloc;
288                 } all;
289
290                 /* GPU_SELECT_PICK_NEAREST */
291                 struct {
292                         uint *rect_id;
293                 } nearest;
294         };
295 } GPUPickState;
296
297
298 static GPUPickState g_pick_state = {0};
299
300 void gpu_select_pick_begin(
301         uint (*buffer)[4], uint bufsize,
302         const rcti *input, char mode)
303 {
304         GPUPickState *ps = &g_pick_state;
305
306 #ifdef DEBUG_PRINT
307         printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
308 #endif
309
310         ps->bufsize = bufsize;
311         ps->buffer = buffer;
312         ps->mode = mode;
313
314         const uint rect_len = (uint)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
315         ps->dst.clip_rect = *input;
316         ps->dst.rect_len = rect_len;
317
318         /* Restrict OpenGL operations for when we don't have cache */
319         if (ps->is_cached == false) {
320
321                 glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT);
322                 /* disable writing to the framebuffer */
323                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
324
325                 glEnable(GL_DEPTH_TEST);
326                 glDepthMask(GL_TRUE);
327
328                 if (mode == GPU_SELECT_PICK_ALL) {
329                         glDepthFunc(GL_ALWAYS);
330                 }
331                 else {
332                         glDepthFunc(GL_LEQUAL);
333                 }
334
335                 /* set just in case */
336                 glPixelTransferf(GL_DEPTH_BIAS, 0.0);
337                 glPixelTransferf(GL_DEPTH_SCALE, 1.0);
338
339                 float viewport[4];
340                 glGetFloatv(GL_SCISSOR_BOX, viewport);
341
342                 ps->src.clip_rect = *input;
343                 ps->src.rect_len = rect_len;
344
345                 ps->gl.clip_readpixels[0] = (int)viewport[0];
346                 ps->gl.clip_readpixels[1] = (int)viewport[1];
347                 ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect);
348                 ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect);
349
350                 glViewport(UNPACK4(ps->gl.clip_readpixels));
351
352                 /* It's possible we don't want to clear depth buffer,
353                  * so existing elements are masked by current z-buffer. */
354                 glClear(GL_DEPTH_BUFFER_BIT);
355
356                 /* scratch buffer (read new values here) */
357                 ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
358                 ps->gl.rect_depth = depth_buf_malloc(rect_len);
359
360                 /* set initial 'far' value */
361 #if 0
362                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth->buf);
363 #else
364                 for (uint i = 0; i < rect_len; i++) {
365                         ps->gl.rect_depth->buf[i] = DEPTH_MAX;
366                 }
367 #endif
368
369                 ps->gl.is_init = false;
370                 ps->gl.is_finalized = false;
371                 ps->gl.prev_id = 0;
372         }
373         else {
374                 /* Using cache (ps->is_cached == true) */
375                 /* src.clip_rect -> dst.clip_rect */
376                 rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
377                 BLI_assert(ps->gl.rect_depth == NULL);
378                 BLI_assert(ps->gl.rect_depth_test == NULL);
379         }
380
381         if (mode == GPU_SELECT_PICK_ALL) {
382                 ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
383                 ps->all.hits_len = 0;
384                 ps->all.hits_len_alloc = ALLOC_DEPTHS;
385         }
386         else {
387                 /* Set to 0xff for SELECT_ID_NONE */
388                 ps->nearest.rect_id = MEM_mallocN(sizeof(uint) * ps->dst.rect_len, __func__);
389                 memset(ps->nearest.rect_id, 0xff, sizeof(uint) * ps->dst.rect_len);
390         }
391 }
392
393 /**
394  * Given 2x depths, we know are different - update the depth information
395  * use for both cached/uncached depth buffers.
396  */
397 static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
398 {
399         GPUPickState *ps = &g_pick_state;
400         const uint id = rect_curr->id;
401         /* find the best depth for this pass and store in 'all.hits' */
402         depth_t depth_best = DEPTH_MAX;
403
404 #define EVAL_TEST() \
405         if (depth_best > *curr) { \
406                 depth_best = *curr; \
407         } ((void)0)
408
409         if (ps->is_cached == false) {
410                 const depth_t *curr = rect_curr->buf;
411                 BLI_assert(ps->src.rect_len == ps->dst.rect_len);
412                 const uint rect_len = ps->src.rect_len;
413                 for (uint i = 0; i < rect_len; i++, curr++) {
414                         EVAL_TEST();
415                 }
416         }
417         else {
418                 /* same as above but different rect sizes */
419                 const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
420                 for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
421                         const depth_t *curr_end = curr + ps->cache.sub_rect.span;
422                         for (; curr < curr_end; curr++) {
423                                 EVAL_TEST();
424                         }
425                         curr += ps->cache.sub_rect.skip;
426                 }
427         }
428
429 #undef EVAL_TEST
430
431         /* ensure enough space */
432         if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
433                 ps->all.hits_len_alloc += ALLOC_DEPTHS;
434                 ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
435         }
436         DepthID *d = &ps->all.hits[ps->all.hits_len++];
437         d->id = id;
438         d->depth = depth_best;
439 }
440
441 static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
442 {
443         GPUPickState *ps = &g_pick_state;
444         const uint id = rect_curr->id;
445         /* keep track each pixels ID in 'nearest.rect_id' */
446         if (id != SELECT_ID_NONE) {
447                 uint *id_ptr = ps->nearest.rect_id;
448
449                 /* Check against DEPTH_MAX because XRAY will clear the buffer,
450                  * so previously set values will become unset.
451                  * In this case just leave those id's left as-is. */
452 #define EVAL_TEST() \
453                 if (depth_is_filled(prev, curr)) { \
454                         *id_ptr = id; \
455                 } ((void)0)
456
457                 if (ps->is_cached == false) {
458                         const depth_t *prev = rect_prev->buf;
459                         const depth_t *curr = rect_curr->buf;
460                         BLI_assert(ps->src.rect_len == ps->dst.rect_len);
461                         const uint rect_len = ps->src.rect_len;
462                         for (uint i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
463                                 EVAL_TEST();
464                         }
465                 }
466                 else {
467                         /* same as above but different rect sizes */
468                         const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
469                         const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
470                         for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
471                                 const depth_t *curr_end = curr + ps->cache.sub_rect.span;
472                                 for (; curr < curr_end; prev++, curr++, id_ptr++) {
473                                         EVAL_TEST();
474                                 }
475                                 prev += ps->cache.sub_rect.skip;
476                                 curr += ps->cache.sub_rect.skip;
477                         }
478                 }
479
480 #undef EVAL_TEST
481         }
482 }
483
484
485 bool gpu_select_pick_load_id(uint id)
486 {
487         GPUPickState *ps = &g_pick_state;
488         if (ps->gl.is_init) {
489                 const uint rect_len = ps->src.rect_len;
490                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
491                 /* perform initial check since most cases the array remains unchanged  */
492
493                 bool do_pass = false;
494                 if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
495                         if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
496                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
497                                 gpu_select_load_id_pass_all(ps->gl.rect_depth_test);
498                                 do_pass = true;
499                         }
500                 }
501                 else {
502                         if (depth_buf_rect_depth_any_filled(ps->gl.rect_depth, ps->gl.rect_depth_test, rect_len)) {
503                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
504                                 gpu_select_load_id_pass_nearest(ps->gl.rect_depth, ps->gl.rect_depth_test);
505                                 do_pass = true;
506                         }
507                 }
508
509                 if (do_pass) {
510                         /* Store depth in cache */
511                         if (ps->use_cache) {
512                                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
513                                 ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len);
514                         }
515
516                         SWAP(DepthBufCache *, ps->gl.rect_depth, ps->gl.rect_depth_test);
517
518                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
519                                 /* we want new depths every time */
520                                 glClear(GL_DEPTH_BUFFER_BIT);
521                         }
522                 }
523         }
524
525         ps->gl.is_init = true;
526         ps->gl.prev_id = id;
527
528         return true;
529 }
530
531  /**
532   * (Optional), call before 'gpu_select_pick_end' if GL context is not kept.
533   * is not compatible with regular select case.
534   * */
535 void gpu_select_pick_finalize(void)
536 {
537         GPUPickState *ps = &g_pick_state;
538         if (ps->gl.is_init) {
539                 /* force finishing last pass */
540                 gpu_select_pick_load_id(ps->gl.prev_id);
541         }
542         ps->gl.is_finalized = true;
543 }
544
545 uint gpu_select_pick_end(void)
546 {
547         GPUPickState *ps = &g_pick_state;
548
549 #ifdef DEBUG_PRINT
550         printf("%s\n", __func__);
551 #endif
552
553         if (ps->is_cached == false) {
554                 if (ps->gl.is_finalized == false) {
555                         gpu_select_pick_finalize();
556                 }
557
558                 glPopAttrib();
559                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
560         }
561
562         /* assign but never free directly since it may be in cache */
563         DepthBufCache *rect_depth_final;
564
565         /* Store depth in cache */
566         if (ps->use_cache && !ps->is_cached) {
567                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
568                 ps->gl.rect_depth = NULL;
569                 rect_depth_final = ps->cache.bufs.last;
570         }
571         else if (ps->is_cached) {
572                 rect_depth_final = ps->cache.bufs.last;
573         }
574         else {
575                 /* common case, no cache */
576                 rect_depth_final = ps->gl.rect_depth;
577         }
578
579         uint maxhits = g_pick_state.bufsize;
580         DepthID *depth_data;
581         uint depth_data_len = 0;
582
583         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
584                 depth_data = ps->all.hits;
585                 depth_data_len = ps->all.hits_len;
586                 /* move ownership */
587                 ps->all.hits = NULL;
588                 ps->all.hits_len = 0;
589                 ps->all.hits_len_alloc = 0;
590         }
591         else {
592                 /* GPU_SELECT_PICK_NEAREST */
593
594                 /* Over alloc (unlikely we have as many depths as pixels) */
595                 uint depth_data_len_first_pass = 0;
596                 depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
597
598                 /* Partially de-duplicating copy,
599                  * when contiguous ID's are found - update their closest depth.
600                  * This isn't essential but means there is less data to sort. */
601
602 #define EVAL_TEST(i_src, i_dst) \
603                 { \
604                         const uint id = ps->nearest.rect_id[i_dst]; \
605                         if (id != SELECT_ID_NONE) { \
606                                 const depth_t depth = rect_depth_final->buf[i_src]; \
607                                 if (depth_last == NULL || depth_last->id != id) { \
608                                         DepthID *d = &depth_data[depth_data_len_first_pass++]; \
609                                         d->id = id; \
610                                         d->depth = depth; \
611                                 } \
612                                 else if (depth_last->depth > depth) { \
613                                         depth_last->depth = depth; \
614                                 } \
615                         } \
616                 } ((void)0)
617
618                 {
619                         DepthID *depth_last = NULL;
620                         if (ps->is_cached == false) {
621                                 for (uint i = 0; i < ps->src.rect_len; i++) {
622                                         EVAL_TEST(i, i);
623                                 }
624                         }
625                         else {
626                                 /* same as above but different rect sizes */
627                                 uint i_src = ps->cache.sub_rect.start, i_dst = 0;
628                                 for (uint j = 0; j < ps->cache.sub_rect.span_len; j++) {
629                                         const uint i_src_end = i_src + ps->cache.sub_rect.span;
630                                         for (; i_src < i_src_end; i_src++, i_dst++) {
631                                                 EVAL_TEST(i_src, i_dst);
632                                         }
633                                         i_src += ps->cache.sub_rect.skip;
634                                 }
635                         }
636                 }
637
638 #undef EVAL_TEST
639
640                 qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
641
642                 /* Sort by ID's then keep the best depth for each ID */
643                 depth_data_len = 0;
644                 {
645                         DepthID *depth_last = NULL;
646                         for (uint i = 0; i < depth_data_len_first_pass; i++) {
647                                 if (depth_last == NULL || depth_last->id != depth_data[i].id) {
648                                         depth_last = &depth_data[depth_data_len++];
649                                         *depth_last = depth_data[i];
650                                 }
651                                 else if (depth_last->depth > depth_data[i].depth) {
652                                         depth_last->depth = depth_data[i].depth;
653                                 }
654                         }
655                 }
656         }
657
658         /* Finally sort each unique (id, depth) pair by depth
659          * so the final hit-list is sorted by depth (nearest first) */
660         uint hits = 0;
661
662         if (depth_data_len > maxhits) {
663                 hits = (uint)-1;
664         }
665         else {
666                 /* leave sorting up to the caller */
667                 qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
668
669                 for (uint i = 0; i < depth_data_len; i++) {
670 #ifdef DEBUG_PRINT
671                         printf("  hit: %u: depth %u\n", depth_data[i].id,  depth_data[i].depth);
672 #endif
673                         /* first 3 are dummy values */
674                         g_pick_state.buffer[hits][0] = 1;
675                         g_pick_state.buffer[hits][1] = 0x0;  /* depth_data[i].depth; */ /* unused */
676                         g_pick_state.buffer[hits][2] = 0x0;  /* z-far is currently never used. */
677                         g_pick_state.buffer[hits][3] = depth_data[i].id;
678                         hits++;
679                 }
680                 BLI_assert(hits < maxhits);
681         }
682
683         MEM_freeN(depth_data);
684
685         MEM_SAFE_FREE(ps->gl.rect_depth);
686         MEM_SAFE_FREE(ps->gl.rect_depth_test);
687
688         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
689                 /* 'hits' already freed as 'depth_data' */
690         }
691         else {
692                 MEM_freeN(ps->nearest.rect_id);
693                 ps->nearest.rect_id = NULL;
694         }
695
696         if (ps->use_cache) {
697                 ps->is_cached = true;
698         }
699
700         return hits;
701 }
702
703 /* ----------------------------------------------------------------------------
704  * Caching
705  *
706  * Support multiple begin/end's reusing depth buffers.
707  */
708
709 void gpu_select_pick_cache_begin(void)
710 {
711         BLI_assert(g_pick_state.use_cache == false);
712 #ifdef DEBUG_PRINT
713         printf("%s\n", __func__);
714 #endif
715         g_pick_state.use_cache = true;
716         g_pick_state.is_cached = false;
717 }
718
719 void gpu_select_pick_cache_end(void)
720 {
721 #ifdef DEBUG_PRINT
722         printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
723 #endif
724         g_pick_state.use_cache = false;
725         g_pick_state.is_cached = false;
726
727         BLI_freelistN(&g_pick_state.cache.bufs);
728 }
729
730 /* is drawing needed? */
731 bool gpu_select_pick_is_cached(void)
732 {
733         return g_pick_state.is_cached;
734 }
735
736 void gpu_select_pick_cache_load_id(void)
737 {
738         BLI_assert(g_pick_state.is_cached == true);
739         GPUPickState *ps = &g_pick_state;
740 #ifdef DEBUG_PRINT
741         printf("%s (building depth from cache)\n", __func__);
742 #endif
743         for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) {
744                 if (rect_depth->next != NULL) {
745                         /* we know the buffers differ, but this sub-region may not.
746                          * double check before adding an id-pass */
747                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
748                                 if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
749                                         gpu_select_load_id_pass_all(rect_depth->next);
750                                 }
751                         }
752                         else {
753                                 if (depth_buf_subrect_depth_any_filled(rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
754                                         gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
755                                 }
756                         }
757                 }
758         }
759 }