Code cleanup: fix a few harmless warnings.
[blender.git] / source / blender / gpu / intern / gpu_select_pick.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file blender/gpu/intern/gpu_select_pick.c
25  *  \ingroup gpu
26  *
27  * Custom select code for picking small regions (not efficient for large regions).
28  * `gpu_select_pick_*` API.
29  */
30 #include <string.h>
31 #include <stdlib.h>
32 #include <float.h>
33
34 #include "GPU_immediate.h"
35 #include "GPU_draw.h"
36 #include "GPU_select.h"
37 #include "GPU_extensions.h"
38 #include "GPU_glew.h"
39  
40 #include "MEM_guardedalloc.h"
41
42 #include "BLI_rect.h"
43 #include "BLI_listbase.h"
44 #include "BLI_math_vector.h"
45 #include "BLI_utildefines.h"
46
47 #include "gpu_select_private.h"
48
49 #include "BLI_strict_flags.h"
50
51 /* #define DEBUG_PRINT */
52
53 /* Alloc number for depths */
54 #define ALLOC_DEPTHS 200
55
56 /* Z-depth of cleared depth buffer */
57 #define DEPTH_MAX 0xffffffff
58
59 /* ----------------------------------------------------------------------------
60  * SubRectStride
61  */
62
63 /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
64 typedef struct SubRectStride {
65         unsigned int start;     /* start here */
66         unsigned int span;      /* read these */
67         unsigned int span_len;  /* len times (read span 'len' times). */
68         unsigned int skip;      /* skip those */
69 } SubRectStride;
70
71 /* we may want to change back to float if uint isn't well supported */
72 typedef unsigned int depth_t;
73
74 /**
75  * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).
76  *
77  * 'src' must be bigger than 'dst'.
78  */
79 static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
80 {
81         const int src_x = BLI_rcti_size_x(src);
82         // const int src_y = BLI_rcti_size_y(src);
83         const int dst_x = BLI_rcti_size_x(dst);
84         const int dst_y = BLI_rcti_size_y(dst);
85         const int x = dst->xmin - src->xmin;
86         const int y = dst->ymin - src->ymin;
87
88         BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin &&
89                    src->ymax >= dst->ymax && src->ymax >= dst->ymax);
90         BLI_assert(x >= 0 && y >= 0);
91
92         r_sub->start    = (unsigned int)((src_x * y) + x);
93         r_sub->span     = (unsigned int)dst_x;
94         r_sub->span_len = (unsigned int)dst_y;
95         r_sub->skip     = (unsigned int)(src_x - dst_x);
96 }
97
98 /**
99  * Ignore depth clearing as a change,
100  * only check if its been changed _and_ filled in (ignore clearing since XRAY does this).
101  */
102 BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
103 {
104         return (*prev != *curr) && (*curr != DEPTH_MAX);
105 }
106
107 /* ----------------------------------------------------------------------------
108  * DepthBufCache
109  *
110  * Result of reading glReadPixels,
111  * use for both cache and non-cached storage.
112  */
113
114 /* store result of glReadPixels */
115 typedef struct DepthBufCache {
116         struct DepthBufCache *next, *prev;
117         unsigned int id;
118         depth_t buf[0];
119 } DepthBufCache;
120
121 static DepthBufCache *depth_buf_malloc(unsigned int rect_len)
122 {
123         DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
124         rect->id = SELECT_ID_NONE;
125         return rect;
126 }
127
128 static bool depth_buf_rect_depth_any(
129         const DepthBufCache *rect_depth,
130         unsigned int rect_len)
131 {
132         const depth_t *curr = rect_depth->buf;
133         for (unsigned int i = 0; i < rect_len; i++, curr++) {
134                 if (*curr != DEPTH_MAX) {
135                         return true;
136                 }
137         }
138         return false;
139 }
140
141 static bool depth_buf_subrect_depth_any(
142         const DepthBufCache *rect_depth,
143         const SubRectStride *sub_rect)
144 {
145         const depth_t *curr = rect_depth->buf + sub_rect->start;
146         for (unsigned int i = 0; i < sub_rect->span_len; i++) {
147                 const depth_t *curr_end = curr + sub_rect->span;
148                 for (; curr < curr_end; curr++, curr++) {
149                         if (*curr != DEPTH_MAX) {
150                                 return true;
151                         }
152                 }
153                 curr += sub_rect->skip;
154         }
155         return false;
156 }
157
158 static bool depth_buf_rect_depth_any_filled(
159         const DepthBufCache *rect_prev, const DepthBufCache *rect_curr,
160         unsigned int rect_len)
161 {
162 #if 0
163         return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
164 #else
165         const depth_t *prev = rect_prev->buf;
166         const depth_t *curr = rect_curr->buf;
167         for (unsigned int i = 0; i < rect_len; i++, curr++, prev++) {
168                 if (depth_is_filled(prev, curr)) {
169                         return true;
170                 }
171         }
172         return false;
173 #endif
174 }
175
176 /**
177  * Both buffers are the same size, just check if the sub-rect contains any differences.
178  */
179 static bool depth_buf_subrect_depth_any_filled(
180         const DepthBufCache *rect_src, const DepthBufCache *rect_dst,
181         const SubRectStride *sub_rect)
182 {
183         /* same as above but different rect sizes */
184         const depth_t *prev = rect_src->buf + sub_rect->start;
185         const depth_t *curr = rect_dst->buf + sub_rect->start;
186         for (unsigned int i = 0; i < sub_rect->span_len; i++) {
187                 const depth_t *curr_end = curr + sub_rect->span;
188                 for (; curr < curr_end; prev++, curr++) {
189                         if (depth_is_filled(prev, curr)) {
190                                 return true;
191                         }
192                 }
193                 prev += sub_rect->skip;
194                 curr += sub_rect->skip;
195         }
196         return false;
197 }
198
199 /* ----------------------------------------------------------------------------
200  * DepthID
201  *
202  * Internal structure for storing hits.
203  */
204
205 typedef struct DepthID {
206         unsigned int id;
207         depth_t depth;
208 } DepthID;
209
210 static int depth_id_cmp(const void *v1, const void *v2)
211 {
212         const DepthID *d1 = v1, *d2 = v2;
213         if (d1->id < d2->id) {
214                 return -1;
215         }
216         else if (d1->id > d2->id) {
217                 return 1;
218         }
219         else {
220                 return 0;
221         }
222 }
223
224 static int depth_cmp(const void *v1, const void *v2)
225 {
226         const DepthID *d1 = v1, *d2 = v2;
227         if (d1->depth < d2->depth) {
228                 return -1;
229         }
230         else if (d1->depth > d2->depth) {
231                 return 1;
232         }
233         else {
234                 return 0;
235         }
236 }
237
238 /* depth sorting */
239 typedef struct GPUPickState {
240         /* cache on initialization */
241         unsigned int (*buffer)[4];
242
243         /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
244         unsigned int bufsize;
245         /* mode of operation */
246         char mode;
247
248         /* OpenGL drawing, never use when (is_cached == true). */
249         struct {
250                 /* The current depth, accumulated as we draw */
251                 DepthBufCache *rect_depth;
252                 /* Scratch buffer, avoid allocs every time (when not caching) */
253                 DepthBufCache *rect_depth_test;
254
255                 /* Pass to glReadPixels (x, y, w, h) */
256                 int clip_readpixels[4];
257
258                 /* Set after first draw */
259                 bool is_init;
260                 bool is_finalized;
261                 unsigned int prev_id;
262         } gl;
263
264         /* src: data stored in 'cache' and 'gl',
265          * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
266         struct {
267                 rcti clip_rect;
268                 unsigned int rect_len;
269         } src, dst;
270
271         /* Store cache between `GPU_select_cache_begin/end` */
272         bool use_cache;
273         bool is_cached;
274         struct {
275                 /* Cleanup used for iterating over both source and destination buffers:
276                  * src.clip_rect -> dst.clip_rect */
277                 SubRectStride sub_rect;
278
279                 /* List of DepthBufCache, sized of 'src.clip_rect' */
280                 ListBase bufs;
281         } cache;
282
283         /* Pickign methods */
284         union {
285                 /* GPU_SELECT_PICK_ALL */
286                 struct {
287                         DepthID *hits;
288                         unsigned int hits_len;
289                         unsigned int hits_len_alloc;
290                 } all;
291
292                 /* GPU_SELECT_PICK_NEAREST */
293                 struct {
294                         unsigned int *rect_id;
295                 } nearest;
296         };
297 } GPUPickState;
298
299
300 static GPUPickState g_pick_state = {0};
301
302 void gpu_select_pick_begin(
303         unsigned int (*buffer)[4], unsigned int bufsize,
304         const rcti *input, char mode)
305 {
306         GPUPickState *ps = &g_pick_state;
307
308 #ifdef DEBUG_PRINT
309         printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
310 #endif
311
312         ps->bufsize = bufsize;
313         ps->buffer = buffer;
314         ps->mode = mode;
315
316         const unsigned int rect_len = (unsigned int)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
317         ps->dst.clip_rect = *input;
318         ps->dst.rect_len = rect_len;
319
320         /* Restrict OpenGL operations for when we don't have cache */
321         if (ps->is_cached == false) {
322                 gpuPushAttrib(GPU_DEPTH_BUFFER_BIT | GPU_VIEWPORT_BIT);
323
324                 /* disable writing to the framebuffer */
325                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
326
327                 glEnable(GL_DEPTH_TEST);
328                 glDepthMask(GL_TRUE);
329
330                 if (mode == GPU_SELECT_PICK_ALL) {
331                         glDepthFunc(GL_ALWAYS);
332                 }
333                 else {
334                         glDepthFunc(GL_LEQUAL);
335                 }
336
337                 float viewport[4];
338                 glGetFloatv(GL_SCISSOR_BOX, viewport);
339
340                 ps->src.clip_rect = *input;
341                 ps->src.rect_len = rect_len;
342
343                 ps->gl.clip_readpixels[0] = (int)viewport[0];
344                 ps->gl.clip_readpixels[1] = (int)viewport[1];
345                 ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect);
346                 ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect);
347
348                 glViewport(UNPACK4(ps->gl.clip_readpixels));
349
350                 /* It's possible we don't want to clear depth buffer,
351                  * so existing elements are masked by current z-buffer. */
352                 glClear(GL_DEPTH_BUFFER_BIT);
353
354                 /* scratch buffer (read new values here) */
355                 ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
356                 ps->gl.rect_depth = depth_buf_malloc(rect_len);
357
358                 /* set initial 'far' value */
359 #if 0
360                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth->buf);
361 #else
362                 for (unsigned int i = 0; i < rect_len; i++) {
363                         ps->gl.rect_depth->buf[i] = DEPTH_MAX;
364                 }
365 #endif
366
367                 ps->gl.is_init = false;
368                 ps->gl.is_finalized = false;
369                 ps->gl.prev_id = 0;
370         }
371         else {
372                 /* Using cache (ps->is_cached == true) */
373                 /* src.clip_rect -> dst.clip_rect */
374                 rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
375                 BLI_assert(ps->gl.rect_depth == NULL);
376                 BLI_assert(ps->gl.rect_depth_test == NULL);
377         }
378
379         if (mode == GPU_SELECT_PICK_ALL) {
380                 ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
381                 ps->all.hits_len = 0;
382                 ps->all.hits_len_alloc = ALLOC_DEPTHS;
383         }
384         else {
385                 /* Set to 0xff for SELECT_ID_NONE */
386                 ps->nearest.rect_id = MEM_mallocN(sizeof(unsigned int) * ps->dst.rect_len, __func__);
387                 memset(ps->nearest.rect_id, 0xff, sizeof(unsigned int) * ps->dst.rect_len);
388         }
389 }
390
391 /**
392  * Given 2x depths, we know are different - update the depth information
393  * use for both cached/uncached depth buffers.
394  */
395 static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
396 {
397         GPUPickState *ps = &g_pick_state;
398         const unsigned int id = rect_curr->id;
399         /* find the best depth for this pass and store in 'all.hits' */
400         depth_t depth_best = DEPTH_MAX;
401
402 #define EVAL_TEST() \
403         if (depth_best > *curr) { \
404                 depth_best = *curr; \
405         } ((void)0)
406
407         if (ps->is_cached == false) {
408                 const depth_t *curr = rect_curr->buf;
409                 BLI_assert(ps->src.rect_len == ps->dst.rect_len);
410                 const unsigned int rect_len = ps->src.rect_len;
411                 for (unsigned int i = 0; i < rect_len; i++, curr++) {
412                         EVAL_TEST();
413                 }
414         }
415         else {
416                 /* same as above but different rect sizes */
417                 const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
418                 for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
419                         const depth_t *curr_end = curr + ps->cache.sub_rect.span;
420                         for (; curr < curr_end; curr++) {
421                                 EVAL_TEST();
422                         }
423                         curr += ps->cache.sub_rect.skip;
424                 }
425         }
426
427 #undef EVAL_TEST
428
429         /* ensure enough space */
430         if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
431                 ps->all.hits_len_alloc += ALLOC_DEPTHS;
432                 ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
433         }
434         DepthID *d = &ps->all.hits[ps->all.hits_len++];
435         d->id = id;
436         d->depth = depth_best;
437 }
438
439 static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
440 {
441         GPUPickState *ps = &g_pick_state;
442         const unsigned int id = rect_curr->id;
443         /* keep track each pixels ID in 'nearest.rect_id' */
444         if (id != SELECT_ID_NONE) {
445                 unsigned int *id_ptr = ps->nearest.rect_id;
446
447                 /* Check against DEPTH_MAX because XRAY will clear the buffer,
448                  * so previously set values will become unset.
449                  * In this case just leave those id's left as-is. */
450 #define EVAL_TEST() \
451                 if (depth_is_filled(prev, curr)) { \
452                         *id_ptr = id; \
453                 } ((void)0)
454
455                 if (ps->is_cached == false) {
456                         const depth_t *prev = rect_prev->buf;
457                         const depth_t *curr = rect_curr->buf;
458                         BLI_assert(ps->src.rect_len == ps->dst.rect_len);
459                         const unsigned int rect_len = ps->src.rect_len;
460                         for (unsigned int i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
461                                 EVAL_TEST();
462                         }
463                 }
464                 else {
465                         /* same as above but different rect sizes */
466                         const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
467                         const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
468                         for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
469                                 const depth_t *curr_end = curr + ps->cache.sub_rect.span;
470                                 for (; curr < curr_end; prev++, curr++, id_ptr++) {
471                                         EVAL_TEST();
472                                 }
473                                 prev += ps->cache.sub_rect.skip;
474                                 curr += ps->cache.sub_rect.skip;
475                         }
476                 }
477
478 #undef EVAL_TEST
479         }
480 }
481
482
483 bool gpu_select_pick_load_id(unsigned int id)
484 {
485         GPUPickState *ps = &g_pick_state;
486         if (ps->gl.is_init) {
487                 const unsigned int rect_len = ps->src.rect_len;
488                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
489                 /* perform initial check since most cases the array remains unchanged  */
490
491                 bool do_pass = false;
492                 if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
493                         if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
494                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
495                                 gpu_select_load_id_pass_all(ps->gl.rect_depth_test);
496                                 do_pass = true;
497                         }
498                 }
499                 else {
500                         if (depth_buf_rect_depth_any_filled(ps->gl.rect_depth, ps->gl.rect_depth_test, rect_len)) {
501                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
502                                 gpu_select_load_id_pass_nearest(ps->gl.rect_depth, ps->gl.rect_depth_test);
503                                 do_pass = true;
504                         }
505                 }
506
507                 if (do_pass) {
508                         /* Store depth in cache */
509                         if (ps->use_cache) {
510                                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
511                                 ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len);
512                         }
513
514                         SWAP(DepthBufCache *, ps->gl.rect_depth, ps->gl.rect_depth_test);
515
516                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
517                                 /* we want new depths every time */
518                                 glClear(GL_DEPTH_BUFFER_BIT);
519                         }
520                 }
521         }
522
523         ps->gl.is_init = true;
524         ps->gl.prev_id = id;
525
526         return true;
527 }
528
529  /**
530   * (Optional), call before 'gpu_select_pick_end' if GL context is not kept.
531   * is not compatible with regular select case.
532   * */
533 void gpu_select_pick_finalize(void)
534 {
535         GPUPickState *ps = &g_pick_state;
536         if (ps->gl.is_init) {
537                 /* force finishing last pass */
538                 gpu_select_pick_load_id(ps->gl.prev_id);
539         }
540         ps->gl.is_finalized = true;
541 }
542
543 unsigned int gpu_select_pick_end(void)
544 {
545         GPUPickState *ps = &g_pick_state;
546
547 #ifdef DEBUG_PRINT
548         printf("%s\n", __func__);
549 #endif
550
551         if (ps->is_cached == false) {
552                 if (ps->gl.is_finalized == false) {
553                         gpu_select_pick_finalize();
554                 }
555
556                 gpuPopAttrib();
557                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
558         }
559
560         /* assign but never free directly since it may be in cache */
561         DepthBufCache *rect_depth_final;
562
563         /* Store depth in cache */
564         if (ps->use_cache && !ps->is_cached) {
565                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
566                 ps->gl.rect_depth = NULL;
567                 rect_depth_final = ps->cache.bufs.last;
568         }
569         else if (ps->is_cached) {
570                 rect_depth_final = ps->cache.bufs.last;
571         }
572         else {
573                 /* common case, no cache */
574                 rect_depth_final = ps->gl.rect_depth;
575         }
576
577         unsigned int maxhits = g_pick_state.bufsize;
578         DepthID *depth_data;
579         unsigned int depth_data_len = 0;
580
581         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
582                 depth_data = ps->all.hits;
583                 depth_data_len = ps->all.hits_len;
584                 /* move ownership */
585                 ps->all.hits = NULL;
586                 ps->all.hits_len = 0;
587                 ps->all.hits_len_alloc = 0;
588         }
589         else {
590                 /* GPU_SELECT_PICK_NEAREST */
591
592                 /* Over alloc (unlikely we have as many depths as pixels) */
593                 unsigned int depth_data_len_first_pass = 0;
594                 depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
595
596                 /* Partially de-duplicating copy,
597                  * when contiguous ID's are found - update their closest depth.
598                  * This isn't essential but means there is less data to sort. */
599
600 #define EVAL_TEST(i_src, i_dst) \
601                 { \
602                         const unsigned int id = ps->nearest.rect_id[i_dst]; \
603                         if (id != SELECT_ID_NONE) { \
604                                 const depth_t depth = rect_depth_final->buf[i_src]; \
605                                 if (depth_last == NULL || depth_last->id != id) { \
606                                         DepthID *d = &depth_data[depth_data_len_first_pass++]; \
607                                         d->id = id; \
608                                         d->depth = depth; \
609                                 } \
610                                 else if (depth_last->depth > depth) { \
611                                         depth_last->depth = depth; \
612                                 } \
613                         } \
614                 } ((void)0)
615
616                 {
617                         DepthID *depth_last = NULL;
618                         if (ps->is_cached == false) {
619                                 for (unsigned int i = 0; i < ps->src.rect_len; i++) {
620                                         EVAL_TEST(i, i);
621                                 }
622                         }
623                         else {
624                                 /* same as above but different rect sizes */
625                                 unsigned int i_src = ps->cache.sub_rect.start, i_dst = 0;
626                                 for (unsigned int j = 0; j < ps->cache.sub_rect.span_len; j++) {
627                                         const unsigned int i_src_end = i_src + ps->cache.sub_rect.span;
628                                         for (; i_src < i_src_end; i_src++, i_dst++) {
629                                                 EVAL_TEST(i_src, i_dst);
630                                         }
631                                         i_src += ps->cache.sub_rect.skip;
632                                 }
633                         }
634                 }
635
636 #undef EVAL_TEST
637
638                 qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
639
640                 /* Sort by ID's then keep the best depth for each ID */
641                 depth_data_len = 0;
642                 {
643                         DepthID *depth_last = NULL;
644                         for (unsigned int i = 0; i < depth_data_len_first_pass; i++) {
645                                 if (depth_last == NULL || depth_last->id != depth_data[i].id) {
646                                         depth_last = &depth_data[depth_data_len++];
647                                         *depth_last = depth_data[i];
648                                 }
649                                 else if (depth_last->depth > depth_data[i].depth) {
650                                         depth_last->depth = depth_data[i].depth;
651                                 }
652                         }
653                 }
654         }
655
656         /* Finally sort each unique (id, depth) pair by depth
657          * so the final hit-list is sorted by depth (nearest first) */
658         unsigned int hits = 0;
659
660         if (depth_data_len > maxhits) {
661                 hits = (unsigned int)-1;
662         }
663         else {
664                 /* leave sorting up to the caller */
665                 qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
666
667                 for (unsigned int i = 0; i < depth_data_len; i++) {
668 #ifdef DEBUG_PRINT
669                         printf("  hit: %u: depth %u\n", depth_data[i].id,  depth_data[i].depth);
670 #endif
671                         /* first 3 are dummy values */
672                         g_pick_state.buffer[hits][0] = 1;
673                         g_pick_state.buffer[hits][1] = 0x0;  /* depth_data[i].depth; */ /* unused */
674                         g_pick_state.buffer[hits][2] = 0x0;  /* z-far is currently never used. */
675                         g_pick_state.buffer[hits][3] = depth_data[i].id;
676                         hits++;
677                 }
678                 BLI_assert(hits < maxhits);
679         }
680
681         MEM_freeN(depth_data);
682
683         MEM_SAFE_FREE(ps->gl.rect_depth);
684         MEM_SAFE_FREE(ps->gl.rect_depth_test);
685
686         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
687                 /* 'hits' already freed as 'depth_data' */
688         }
689         else {
690                 MEM_freeN(ps->nearest.rect_id);
691                 ps->nearest.rect_id = NULL;
692         }
693
694         if (ps->use_cache) {
695                 ps->is_cached = true;
696         }
697
698         return hits;
699 }
700
701 /* ----------------------------------------------------------------------------
702  * Caching
703  *
704  * Support multiple begin/end's reusing depth buffers.
705  */
706
707 void gpu_select_pick_cache_begin(void)
708 {
709         BLI_assert(g_pick_state.use_cache == false);
710 #ifdef DEBUG_PRINT
711         printf("%s\n", __func__);
712 #endif
713         g_pick_state.use_cache = true;
714         g_pick_state.is_cached = false;
715 }
716
717 void gpu_select_pick_cache_end(void)
718 {
719 #ifdef DEBUG_PRINT
720         printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
721 #endif
722         g_pick_state.use_cache = false;
723         g_pick_state.is_cached = false;
724
725         BLI_freelistN(&g_pick_state.cache.bufs);
726 }
727
728 /* is drawing needed? */
729 bool gpu_select_pick_is_cached(void)
730 {
731         return g_pick_state.is_cached;
732 }
733
734 void gpu_select_pick_cache_load_id(void)
735 {
736         BLI_assert(g_pick_state.is_cached == true);
737         GPUPickState *ps = &g_pick_state;
738 #ifdef DEBUG_PRINT
739         printf("%s (building depth from cache)\n", __func__);
740 #endif
741         for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) {
742                 if (rect_depth->next != NULL) {
743                         /* we know the buffers differ, but this sub-region may not.
744                          * double check before adding an id-pass */
745                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
746                                 if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
747                                         gpu_select_load_id_pass_all(rect_depth->next);
748                                 }
749                         }
750                         else {
751                                 if (depth_buf_subrect_depth_any_filled(rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
752                                         gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
753                                 }
754                         }
755                 }
756         }
757 }