Merge branch 'master' into blender2.8
[blender.git] / source / blender / gpu / intern / gpu_select_pick.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file blender/gpu/intern/gpu_select_pick.c
25  *  \ingroup gpu
26  *
27  * Custom select code for picking small regions (not efficient for large regions).
28  * `gpu_select_pick_*` API.
29  */
30 #include <string.h>
31 #include <stdlib.h>
32 #include <float.h>
33
34 #include "GPU_immediate.h"
35 #include "GPU_draw.h"
36 #include "GPU_select.h"
37 #include "GPU_extensions.h"
38 #include "GPU_glew.h"
39  
40 #include "MEM_guardedalloc.h"
41
42 #include "BLI_rect.h"
43 #include "BLI_listbase.h"
44 #include "BLI_math_vector.h"
45 #include "BLI_utildefines.h"
46
47 #include "gpu_select_private.h"
48
49 #include "BLI_strict_flags.h"
50
51 /* #define DEBUG_PRINT */
52
53 /* Alloc number for depths */
54 #define ALLOC_DEPTHS 200
55
56 /* Z-depth of cleared depth buffer */
57 #define DEPTH_MAX 0xffffffff
58
59 /* ----------------------------------------------------------------------------
60  * SubRectStride
61  */
62
63 /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
64 typedef struct SubRectStride {
65         uint start;     /* start here */
66         uint span;      /* read these */
67         uint span_len;  /* len times (read span 'len' times). */
68         uint skip;      /* skip those */
69 } SubRectStride;
70
71 /* we may want to change back to float if uint isn't well supported */
72 typedef uint depth_t;
73
74 /**
75  * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).
76  *
77  * 'src' must be bigger than 'dst'.
78  */
79 static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
80 {
81         const int src_x = BLI_rcti_size_x(src);
82         // const int src_y = BLI_rcti_size_y(src);
83         const int dst_x = BLI_rcti_size_x(dst);
84         const int dst_y = BLI_rcti_size_y(dst);
85         const int x = dst->xmin - src->xmin;
86         const int y = dst->ymin - src->ymin;
87
88         BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin &&
89                    src->ymax >= dst->ymax && src->ymax >= dst->ymax);
90         BLI_assert(x >= 0 && y >= 0);
91
92         r_sub->start    = (uint)((src_x * y) + x);
93         r_sub->span     = (uint)dst_x;
94         r_sub->span_len = (uint)dst_y;
95         r_sub->skip     = (uint)(src_x - dst_x);
96 }
97
98 /**
99  * Ignore depth clearing as a change,
100  * only check if its been changed _and_ filled in (ignore clearing since XRAY does this).
101  */
102 BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
103 {
104         return (*prev != *curr) && (*curr != DEPTH_MAX);
105 }
106
107 /* ----------------------------------------------------------------------------
108  * DepthBufCache
109  *
110  * Result of reading glReadPixels,
111  * use for both cache and non-cached storage.
112  */
113
114 /* store result of glReadPixels */
115 typedef struct DepthBufCache {
116         struct DepthBufCache *next, *prev;
117         uint id;
118         depth_t buf[0];
119 } DepthBufCache;
120
121 static DepthBufCache *depth_buf_malloc(uint rect_len)
122 {
123         DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
124         rect->id = SELECT_ID_NONE;
125         return rect;
126 }
127
128 static bool depth_buf_rect_depth_any(
129         const DepthBufCache *rect_depth,
130         uint rect_len)
131 {
132         const depth_t *curr = rect_depth->buf;
133         for (uint i = 0; i < rect_len; i++, curr++) {
134                 if (*curr != DEPTH_MAX) {
135                         return true;
136                 }
137         }
138         return false;
139 }
140
141 static bool depth_buf_subrect_depth_any(
142         const DepthBufCache *rect_depth,
143         const SubRectStride *sub_rect)
144 {
145         const depth_t *curr = rect_depth->buf + sub_rect->start;
146         for (uint i = 0; i < sub_rect->span_len; i++) {
147                 const depth_t *curr_end = curr + sub_rect->span;
148                 for (; curr < curr_end; curr++, curr++) {
149                         if (*curr != DEPTH_MAX) {
150                                 return true;
151                         }
152                 }
153                 curr += sub_rect->skip;
154         }
155         return false;
156 }
157
158 static bool depth_buf_rect_depth_any_filled(
159         const DepthBufCache *rect_prev, const DepthBufCache *rect_curr,
160         uint rect_len)
161 {
162 #if 0
163         return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
164 #else
165         const depth_t *prev = rect_prev->buf;
166         const depth_t *curr = rect_curr->buf;
167         for (uint i = 0; i < rect_len; i++, curr++, prev++) {
168                 if (depth_is_filled(prev, curr)) {
169                         return true;
170                 }
171         }
172         return false;
173 #endif
174 }
175
176 /**
177  * Both buffers are the same size, just check if the sub-rect contains any differences.
178  */
179 static bool depth_buf_subrect_depth_any_filled(
180         const DepthBufCache *rect_src, const DepthBufCache *rect_dst,
181         const SubRectStride *sub_rect)
182 {
183         /* same as above but different rect sizes */
184         const depth_t *prev = rect_src->buf + sub_rect->start;
185         const depth_t *curr = rect_dst->buf + sub_rect->start;
186         for (uint i = 0; i < sub_rect->span_len; i++) {
187                 const depth_t *curr_end = curr + sub_rect->span;
188                 for (; curr < curr_end; prev++, curr++) {
189                         if (depth_is_filled(prev, curr)) {
190                                 return true;
191                         }
192                 }
193                 prev += sub_rect->skip;
194                 curr += sub_rect->skip;
195         }
196         return false;
197 }
198
199 /* ----------------------------------------------------------------------------
200  * DepthID
201  *
202  * Internal structure for storing hits.
203  */
204
205 typedef struct DepthID {
206         uint id;
207         depth_t depth;
208 } DepthID;
209
210 static int depth_id_cmp(const void *v1, const void *v2)
211 {
212         const DepthID *d1 = v1, *d2 = v2;
213         if (d1->id < d2->id) {
214                 return -1;
215         }
216         else if (d1->id > d2->id) {
217                 return 1;
218         }
219         else {
220                 return 0;
221         }
222 }
223
224 static int depth_cmp(const void *v1, const void *v2)
225 {
226         const DepthID *d1 = v1, *d2 = v2;
227         if (d1->depth < d2->depth) {
228                 return -1;
229         }
230         else if (d1->depth > d2->depth) {
231                 return 1;
232         }
233         else {
234                 return 0;
235         }
236 }
237
238 /* depth sorting */
239 typedef struct GPUPickState {
240         /* cache on initialization */
241         uint (*buffer)[4];
242
243         /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
244         uint bufsize;
245         /* mode of operation */
246         char mode;
247
248         /* OpenGL drawing, never use when (is_cached == true). */
249         struct {
250                 /* The current depth, accumulated as we draw */
251                 DepthBufCache *rect_depth;
252                 /* Scratch buffer, avoid allocs every time (when not caching) */
253                 DepthBufCache *rect_depth_test;
254
255                 /* Pass to glReadPixels (x, y, w, h) */
256                 int clip_readpixels[4];
257
258                 /* Set after first draw */
259                 bool is_init;
260                 bool is_finalized;
261                 uint prev_id;
262         } gl;
263
264         /* src: data stored in 'cache' and 'gl',
265          * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
266         struct {
267                 rcti clip_rect;
268                 uint rect_len;
269         } src, dst;
270
271         /* Store cache between `GPU_select_cache_begin/end` */
272         bool use_cache;
273         bool is_cached;
274         struct {
275                 /* Cleanup used for iterating over both source and destination buffers:
276                  * src.clip_rect -> dst.clip_rect */
277                 SubRectStride sub_rect;
278
279                 /* List of DepthBufCache, sized of 'src.clip_rect' */
280                 ListBase bufs;
281         } cache;
282
283         /* Pickign methods */
284         union {
285                 /* GPU_SELECT_PICK_ALL */
286                 struct {
287                         DepthID *hits;
288                         uint hits_len;
289                         uint hits_len_alloc;
290                 } all;
291
292                 /* GPU_SELECT_PICK_NEAREST */
293                 struct {
294                         uint *rect_id;
295                 } nearest;
296         };
297 } GPUPickState;
298
299
300 static GPUPickState g_pick_state = {0};
301
302 void gpu_select_pick_begin(
303         uint (*buffer)[4], uint bufsize,
304         const rcti *input, char mode)
305 {
306         GPUPickState *ps = &g_pick_state;
307
308 #ifdef DEBUG_PRINT
309         printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
310 #endif
311
312         ps->bufsize = bufsize;
313         ps->buffer = buffer;
314         ps->mode = mode;
315
316         const uint rect_len = (uint)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
317         ps->dst.clip_rect = *input;
318         ps->dst.rect_len = rect_len;
319
320         /* Restrict OpenGL operations for when we don't have cache */
321         if (ps->is_cached == false) {
322                 gpuPushAttrib(GPU_DEPTH_BUFFER_BIT | GPU_VIEWPORT_BIT);
323
324                 /* disable writing to the framebuffer */
325                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
326
327                 glEnable(GL_DEPTH_TEST);
328                 glDepthMask(GL_TRUE);
329
330                 if (mode == GPU_SELECT_PICK_ALL) {
331                         /* Note that other depth settings (such as #GL_LEQUAL) work too,
332                          * since the depth is always cleared.
333                          * Noting this for cases when depth picking is used where drawing calls change depth settings. */
334                         glDepthFunc(GL_ALWAYS);
335                 }
336                 else {
337                         glDepthFunc(GL_LEQUAL);
338                 }
339
340                 float viewport[4];
341                 glGetFloatv(GL_SCISSOR_BOX, viewport);
342
343                 ps->src.clip_rect = *input;
344                 ps->src.rect_len = rect_len;
345
346                 ps->gl.clip_readpixels[0] = (int)viewport[0];
347                 ps->gl.clip_readpixels[1] = (int)viewport[1];
348                 ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect);
349                 ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect);
350
351                 glViewport(UNPACK4(ps->gl.clip_readpixels));
352
353                 /* It's possible we don't want to clear depth buffer,
354                  * so existing elements are masked by current z-buffer. */
355                 glClear(GL_DEPTH_BUFFER_BIT);
356
357                 /* scratch buffer (read new values here) */
358                 ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
359                 ps->gl.rect_depth = depth_buf_malloc(rect_len);
360
361                 /* set initial 'far' value */
362 #if 0
363                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth->buf);
364 #else
365                 for (uint i = 0; i < rect_len; i++) {
366                         ps->gl.rect_depth->buf[i] = DEPTH_MAX;
367                 }
368 #endif
369
370                 ps->gl.is_init = false;
371                 ps->gl.is_finalized = false;
372                 ps->gl.prev_id = 0;
373         }
374         else {
375                 /* Using cache (ps->is_cached == true) */
376                 /* src.clip_rect -> dst.clip_rect */
377                 rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
378                 BLI_assert(ps->gl.rect_depth == NULL);
379                 BLI_assert(ps->gl.rect_depth_test == NULL);
380         }
381
382         if (mode == GPU_SELECT_PICK_ALL) {
383                 ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
384                 ps->all.hits_len = 0;
385                 ps->all.hits_len_alloc = ALLOC_DEPTHS;
386         }
387         else {
388                 /* Set to 0xff for SELECT_ID_NONE */
389                 ps->nearest.rect_id = MEM_mallocN(sizeof(uint) * ps->dst.rect_len, __func__);
390                 memset(ps->nearest.rect_id, 0xff, sizeof(uint) * ps->dst.rect_len);
391         }
392 }
393
394 /**
395  * Given 2x depths, we know are different - update the depth information
396  * use for both cached/uncached depth buffers.
397  */
398 static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
399 {
400         GPUPickState *ps = &g_pick_state;
401         const uint id = rect_curr->id;
402         /* find the best depth for this pass and store in 'all.hits' */
403         depth_t depth_best = DEPTH_MAX;
404
405 #define EVAL_TEST() \
406         if (depth_best > *curr) { \
407                 depth_best = *curr; \
408         } ((void)0)
409
410         if (ps->is_cached == false) {
411                 const depth_t *curr = rect_curr->buf;
412                 BLI_assert(ps->src.rect_len == ps->dst.rect_len);
413                 const uint rect_len = ps->src.rect_len;
414                 for (uint i = 0; i < rect_len; i++, curr++) {
415                         EVAL_TEST();
416                 }
417         }
418         else {
419                 /* same as above but different rect sizes */
420                 const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
421                 for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
422                         const depth_t *curr_end = curr + ps->cache.sub_rect.span;
423                         for (; curr < curr_end; curr++) {
424                                 EVAL_TEST();
425                         }
426                         curr += ps->cache.sub_rect.skip;
427                 }
428         }
429
430 #undef EVAL_TEST
431
432         /* ensure enough space */
433         if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
434                 ps->all.hits_len_alloc += ALLOC_DEPTHS;
435                 ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
436         }
437         DepthID *d = &ps->all.hits[ps->all.hits_len++];
438         d->id = id;
439         d->depth = depth_best;
440 }
441
442 static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
443 {
444         GPUPickState *ps = &g_pick_state;
445         const uint id = rect_curr->id;
446         /* keep track each pixels ID in 'nearest.rect_id' */
447         if (id != SELECT_ID_NONE) {
448                 uint *id_ptr = ps->nearest.rect_id;
449
450                 /* Check against DEPTH_MAX because XRAY will clear the buffer,
451                  * so previously set values will become unset.
452                  * In this case just leave those id's left as-is. */
453 #define EVAL_TEST() \
454                 if (depth_is_filled(prev, curr)) { \
455                         *id_ptr = id; \
456                 } ((void)0)
457
458                 if (ps->is_cached == false) {
459                         const depth_t *prev = rect_prev->buf;
460                         const depth_t *curr = rect_curr->buf;
461                         BLI_assert(ps->src.rect_len == ps->dst.rect_len);
462                         const uint rect_len = ps->src.rect_len;
463                         for (uint i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
464                                 EVAL_TEST();
465                         }
466                 }
467                 else {
468                         /* same as above but different rect sizes */
469                         const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
470                         const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
471                         for (uint i = 0; i < ps->cache.sub_rect.span_len; i++) {
472                                 const depth_t *curr_end = curr + ps->cache.sub_rect.span;
473                                 for (; curr < curr_end; prev++, curr++, id_ptr++) {
474                                         EVAL_TEST();
475                                 }
476                                 prev += ps->cache.sub_rect.skip;
477                                 curr += ps->cache.sub_rect.skip;
478                         }
479                 }
480
481 #undef EVAL_TEST
482         }
483 }
484
485
486 bool gpu_select_pick_load_id(uint id)
487 {
488         GPUPickState *ps = &g_pick_state;
489         if (ps->gl.is_init) {
490                 const uint rect_len = ps->src.rect_len;
491                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
492                 /* perform initial check since most cases the array remains unchanged  */
493
494                 bool do_pass = false;
495                 if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
496                         if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
497                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
498                                 gpu_select_load_id_pass_all(ps->gl.rect_depth_test);
499                                 do_pass = true;
500                         }
501                 }
502                 else {
503                         if (depth_buf_rect_depth_any_filled(ps->gl.rect_depth, ps->gl.rect_depth_test, rect_len)) {
504                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
505                                 gpu_select_load_id_pass_nearest(ps->gl.rect_depth, ps->gl.rect_depth_test);
506                                 do_pass = true;
507                         }
508                 }
509
510                 if (do_pass) {
511                         /* Store depth in cache */
512                         if (ps->use_cache) {
513                                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
514                                 ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len);
515                         }
516
517                         SWAP(DepthBufCache *, ps->gl.rect_depth, ps->gl.rect_depth_test);
518
519                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
520                                 /* we want new depths every time */
521                                 glClear(GL_DEPTH_BUFFER_BIT);
522                         }
523                 }
524         }
525
526         ps->gl.is_init = true;
527         ps->gl.prev_id = id;
528
529         return true;
530 }
531
532  /**
533   * (Optional), call before 'gpu_select_pick_end' if GL context is not kept.
534   * is not compatible with regular select case.
535   * */
536 void gpu_select_pick_finalize(void)
537 {
538         GPUPickState *ps = &g_pick_state;
539         if (ps->gl.is_init) {
540                 /* force finishing last pass */
541                 gpu_select_pick_load_id(ps->gl.prev_id);
542         }
543         ps->gl.is_finalized = true;
544 }
545
546 uint gpu_select_pick_end(void)
547 {
548         GPUPickState *ps = &g_pick_state;
549
550 #ifdef DEBUG_PRINT
551         printf("%s\n", __func__);
552 #endif
553
554         if (ps->is_cached == false) {
555                 if (ps->gl.is_finalized == false) {
556                         gpu_select_pick_finalize();
557                 }
558
559                 gpuPopAttrib();
560                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
561         }
562
563         /* assign but never free directly since it may be in cache */
564         DepthBufCache *rect_depth_final;
565
566         /* Store depth in cache */
567         if (ps->use_cache && !ps->is_cached) {
568                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
569                 ps->gl.rect_depth = NULL;
570                 rect_depth_final = ps->cache.bufs.last;
571         }
572         else if (ps->is_cached) {
573                 rect_depth_final = ps->cache.bufs.last;
574         }
575         else {
576                 /* common case, no cache */
577                 rect_depth_final = ps->gl.rect_depth;
578         }
579
580         uint maxhits = g_pick_state.bufsize;
581         DepthID *depth_data;
582         uint depth_data_len = 0;
583
584         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
585                 depth_data = ps->all.hits;
586                 depth_data_len = ps->all.hits_len;
587                 /* move ownership */
588                 ps->all.hits = NULL;
589                 ps->all.hits_len = 0;
590                 ps->all.hits_len_alloc = 0;
591         }
592         else {
593                 /* GPU_SELECT_PICK_NEAREST */
594
595                 /* Over alloc (unlikely we have as many depths as pixels) */
596                 uint depth_data_len_first_pass = 0;
597                 depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
598
599                 /* Partially de-duplicating copy,
600                  * when contiguous ID's are found - update their closest depth.
601                  * This isn't essential but means there is less data to sort. */
602
603 #define EVAL_TEST(i_src, i_dst) \
604                 { \
605                         const uint id = ps->nearest.rect_id[i_dst]; \
606                         if (id != SELECT_ID_NONE) { \
607                                 const depth_t depth = rect_depth_final->buf[i_src]; \
608                                 if (depth_last == NULL || depth_last->id != id) { \
609                                         DepthID *d = &depth_data[depth_data_len_first_pass++]; \
610                                         d->id = id; \
611                                         d->depth = depth; \
612                                 } \
613                                 else if (depth_last->depth > depth) { \
614                                         depth_last->depth = depth; \
615                                 } \
616                         } \
617                 } ((void)0)
618
619                 {
620                         DepthID *depth_last = NULL;
621                         if (ps->is_cached == false) {
622                                 for (uint i = 0; i < ps->src.rect_len; i++) {
623                                         EVAL_TEST(i, i);
624                                 }
625                         }
626                         else {
627                                 /* same as above but different rect sizes */
628                                 uint i_src = ps->cache.sub_rect.start, i_dst = 0;
629                                 for (uint j = 0; j < ps->cache.sub_rect.span_len; j++) {
630                                         const uint i_src_end = i_src + ps->cache.sub_rect.span;
631                                         for (; i_src < i_src_end; i_src++, i_dst++) {
632                                                 EVAL_TEST(i_src, i_dst);
633                                         }
634                                         i_src += ps->cache.sub_rect.skip;
635                                 }
636                         }
637                 }
638
639 #undef EVAL_TEST
640
641                 qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
642
643                 /* Sort by ID's then keep the best depth for each ID */
644                 depth_data_len = 0;
645                 {
646                         DepthID *depth_last = NULL;
647                         for (uint i = 0; i < depth_data_len_first_pass; i++) {
648                                 if (depth_last == NULL || depth_last->id != depth_data[i].id) {
649                                         depth_last = &depth_data[depth_data_len++];
650                                         *depth_last = depth_data[i];
651                                 }
652                                 else if (depth_last->depth > depth_data[i].depth) {
653                                         depth_last->depth = depth_data[i].depth;
654                                 }
655                         }
656                 }
657         }
658
659         /* Finally sort each unique (id, depth) pair by depth
660          * so the final hit-list is sorted by depth (nearest first) */
661         uint hits = 0;
662
663         if (depth_data_len > maxhits) {
664                 hits = (uint)-1;
665         }
666         else {
667                 /* leave sorting up to the caller */
668                 qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
669
670                 for (uint i = 0; i < depth_data_len; i++) {
671 #ifdef DEBUG_PRINT
672                         printf("  hit: %u: depth %u\n", depth_data[i].id,  depth_data[i].depth);
673 #endif
674                         /* first 3 are dummy values */
675                         g_pick_state.buffer[hits][0] = 1;
676                         g_pick_state.buffer[hits][1] = 0x0;  /* depth_data[i].depth; */ /* unused */
677                         g_pick_state.buffer[hits][2] = 0x0;  /* z-far is currently never used. */
678                         g_pick_state.buffer[hits][3] = depth_data[i].id;
679                         hits++;
680                 }
681                 BLI_assert(hits < maxhits);
682         }
683
684         MEM_freeN(depth_data);
685
686         MEM_SAFE_FREE(ps->gl.rect_depth);
687         MEM_SAFE_FREE(ps->gl.rect_depth_test);
688
689         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
690                 /* 'hits' already freed as 'depth_data' */
691         }
692         else {
693                 MEM_freeN(ps->nearest.rect_id);
694                 ps->nearest.rect_id = NULL;
695         }
696
697         if (ps->use_cache) {
698                 ps->is_cached = true;
699         }
700
701         return hits;
702 }
703
704 /* ----------------------------------------------------------------------------
705  * Caching
706  *
707  * Support multiple begin/end's reusing depth buffers.
708  */
709
710 void gpu_select_pick_cache_begin(void)
711 {
712         BLI_assert(g_pick_state.use_cache == false);
713 #ifdef DEBUG_PRINT
714         printf("%s\n", __func__);
715 #endif
716         g_pick_state.use_cache = true;
717         g_pick_state.is_cached = false;
718 }
719
720 void gpu_select_pick_cache_end(void)
721 {
722 #ifdef DEBUG_PRINT
723         printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
724 #endif
725         g_pick_state.use_cache = false;
726         g_pick_state.is_cached = false;
727
728         BLI_freelistN(&g_pick_state.cache.bufs);
729 }
730
731 /* is drawing needed? */
732 bool gpu_select_pick_is_cached(void)
733 {
734         return g_pick_state.is_cached;
735 }
736
737 void gpu_select_pick_cache_load_id(void)
738 {
739         BLI_assert(g_pick_state.is_cached == true);
740         GPUPickState *ps = &g_pick_state;
741 #ifdef DEBUG_PRINT
742         printf("%s (building depth from cache)\n", __func__);
743 #endif
744         for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) {
745                 if (rect_depth->next != NULL) {
746                         /* we know the buffers differ, but this sub-region may not.
747                          * double check before adding an id-pass */
748                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
749                                 if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
750                                         gpu_select_load_id_pass_all(rect_depth->next);
751                                 }
752                         }
753                         else {
754                                 if (depth_buf_subrect_depth_any_filled(rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
755                                         gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
756                                 }
757                         }
758                 }
759         }
760 }