GPU_select: utility function to finalize selection
[blender-staging.git] / source / blender / gpu / intern / gpu_select_pick.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2017 Blender Foundation.
19  * All rights reserved.
20  *
21  * ***** END GPL LICENSE BLOCK *****
22  */
23
24 /** \file blender/gpu/intern/gpu_select_pick.c
25  *  \ingroup gpu
26  *
27  * Custom select code for picking small regions (not efficient for large regions).
28  * `gpu_select_pick_*` API.
29  */
30 #include <string.h>
31 #include <stdlib.h>
32 #include <float.h>
33
34 #include "GPU_select.h"
35 #include "GPU_extensions.h"
36 #include "GPU_glew.h"
37  
38 #include "MEM_guardedalloc.h"
39
40 #include "BLI_rect.h"
41 #include "BLI_listbase.h"
42 #include "BLI_math_vector.h"
43 #include "BLI_utildefines.h"
44
45 #include "gpu_select_private.h"
46
47 #include "BLI_strict_flags.h"
48
49 /* #define DEBUG_PRINT */
50
51 /* Alloc number for depths */
52 #define ALLOC_DEPTHS 200
53
54 /* Z-depth of cleared depth buffer */
55 #define DEPTH_MAX 0xffffffff
56
57 /* ----------------------------------------------------------------------------
58  * SubRectStride
59  */
60
61 /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
62 typedef struct SubRectStride {
63         unsigned int start;     /* start here */
64         unsigned int span;      /* read these */
65         unsigned int span_len;  /* len times (read span 'len' times). */
66         unsigned int skip;      /* skip those */
67 } SubRectStride;
68
69 /* we may want to change back to float if uint isn't well supported */
70 typedef unsigned int depth_t;
71
72 /**
73  * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).
74  *
75  * 'src' must be bigger than 'dst'.
76  */
77 static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
78 {
79         const int src_x = BLI_rcti_size_x(src);
80         // const int src_y = BLI_rcti_size_y(src);
81         const int dst_x = BLI_rcti_size_x(dst);
82         const int dst_y = BLI_rcti_size_y(dst);
83         const int x = dst->xmin - src->xmin;
84         const int y = dst->ymin - src->ymin;
85
86         BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin &&
87                    src->ymax >= dst->ymax && src->ymax >= dst->ymax);
88         BLI_assert(x >= 0 && y >= 0);
89
90         r_sub->start    = (unsigned int)((src_x * y) + x);
91         r_sub->span     = (unsigned int)dst_x;
92         r_sub->span_len = (unsigned int)dst_y;
93         r_sub->skip     = (unsigned int)(src_x - dst_x);
94 }
95
96 /**
97  * Ignore depth clearing as a change,
98  * only check if its been changed _and_ filled in (ignore clearing since XRAY does this).
99  */
100 BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
101 {
102         return (*prev != *curr) && (*curr != DEPTH_MAX);
103 }
104
105 /* ----------------------------------------------------------------------------
106  * DepthBufCache
107  *
108  * Result of reading glReadPixels,
109  * use for both cache and non-cached storage.
110  */
111
112 /* store result of glReadPixels */
113 typedef struct DepthBufCache {
114         struct DepthBufCache *next, *prev;
115         unsigned int id;
116         depth_t buf[0];
117 } DepthBufCache;
118
119 static DepthBufCache *depth_buf_malloc(unsigned int rect_len)
120 {
121         DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
122         rect->id = SELECT_ID_NONE;
123         return rect;
124 }
125
126 static bool depth_buf_rect_depth_any(
127         const DepthBufCache *rect_depth,
128         unsigned int rect_len)
129 {
130         const depth_t *curr = rect_depth->buf;
131         for (unsigned int i = 0; i < rect_len; i++, curr++) {
132                 if (*curr != DEPTH_MAX) {
133                         return true;
134                 }
135         }
136         return false;
137 }
138
139 static bool depth_buf_subrect_depth_any(
140         const DepthBufCache *rect_depth,
141         const SubRectStride *sub_rect)
142 {
143         const depth_t *curr = rect_depth->buf + sub_rect->start;
144         for (unsigned int i = 0; i < sub_rect->span_len; i++) {
145                 const depth_t *curr_end = curr + sub_rect->span;
146                 for (; curr < curr_end; curr++, curr++) {
147                         if (*curr != DEPTH_MAX) {
148                                 return true;
149                         }
150                 }
151                 curr += sub_rect->skip;
152         }
153         return false;
154 }
155
156 static bool depth_buf_rect_depth_any_filled(
157         const DepthBufCache *rect_prev, const DepthBufCache *rect_curr,
158         unsigned int rect_len)
159 {
160 #if 0
161         return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
162 #else
163         const depth_t *prev = rect_prev->buf;
164         const depth_t *curr = rect_curr->buf;
165         for (unsigned int i = 0; i < rect_len; i++, curr++, prev++) {
166                 if (depth_is_filled(prev, curr)) {
167                         return true;
168                 }
169         }
170         return false;
171 #endif
172 }
173
174 /**
175  * Both buffers are the same size, just check if the sub-rect contains any differences.
176  */
177 static bool depth_buf_subrect_depth_any_filled(
178         const DepthBufCache *rect_src, const DepthBufCache *rect_dst,
179         const SubRectStride *sub_rect)
180 {
181         /* same as above but different rect sizes */
182         const depth_t *prev = rect_src->buf + sub_rect->start;
183         const depth_t *curr = rect_dst->buf + sub_rect->start;
184         for (unsigned int i = 0; i < sub_rect->span_len; i++) {
185                 const depth_t *curr_end = curr + sub_rect->span;
186                 for (; curr < curr_end; prev++, curr++) {
187                         if (depth_is_filled(prev, curr)) {
188                                 return true;
189                         }
190                 }
191                 prev += sub_rect->skip;
192                 curr += sub_rect->skip;
193         }
194         return false;
195 }
196
197 /* ----------------------------------------------------------------------------
198  * DepthID
199  *
200  * Internal structure for storing hits.
201  */
202
203 typedef struct DepthID {
204         unsigned int id;
205         depth_t depth;
206 } DepthID;
207
208 static int depth_id_cmp(const void *v1, const void *v2)
209 {
210         const DepthID *d1 = v1, *d2 = v2;
211         if (d1->id < d2->id) {
212                 return -1;
213         }
214         else if (d1->id > d2->id) {
215                 return 1;
216         }
217         else {
218                 return 0;
219         }
220 }
221
222 static int depth_cmp(const void *v1, const void *v2)
223 {
224         const DepthID *d1 = v1, *d2 = v2;
225         if (d1->depth < d2->depth) {
226                 return -1;
227         }
228         else if (d1->depth > d2->depth) {
229                 return 1;
230         }
231         else {
232                 return 0;
233         }
234 }
235
236 /* depth sorting */
237 typedef struct GPUPickState {
238         /* cache on initialization */
239         unsigned int (*buffer)[4];
240
241         /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
242         unsigned int bufsize;
243         /* mode of operation */
244         char mode;
245
246         /* OpenGL drawing, never use when (is_cached == true). */
247         struct {
248                 /* The current depth, accumulated as we draw */
249                 DepthBufCache *rect_depth;
250                 /* Scratch buffer, avoid allocs every time (when not caching) */
251                 DepthBufCache *rect_depth_test;
252
253                 /* Pass to glReadPixels (x, y, w, h) */
254                 int clip_readpixels[4];
255
256                 /* Set after first draw */
257                 bool is_init;
258                 unsigned int prev_id;
259         } gl;
260
261         /* src: data stored in 'cache' and 'gl',
262          * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
263         struct {
264                 rcti clip_rect;
265                 unsigned int rect_len;
266         } src, dst;
267
268         /* Store cache between `GPU_select_cache_begin/end` */
269         bool use_cache;
270         bool is_cached;
271         struct {
272                 /* Cleanup used for iterating over both source and destination buffers:
273                  * src.clip_rect -> dst.clip_rect */
274                 SubRectStride sub_rect;
275
276                 /* List of DepthBufCache, sized of 'src.clip_rect' */
277                 ListBase bufs;
278         } cache;
279
280         /* Pickign methods */
281         union {
282                 /* GPU_SELECT_PICK_ALL */
283                 struct {
284                         DepthID *hits;
285                         unsigned int hits_len;
286                         unsigned int hits_len_alloc;
287                 } all;
288
289                 /* GPU_SELECT_PICK_NEAREST */
290                 struct {
291                         unsigned int *rect_id;
292                 } nearest;
293         };
294 } GPUPickState;
295
296
297 static GPUPickState g_pick_state = {0};
298
299 void gpu_select_pick_begin(
300         unsigned int (*buffer)[4], unsigned int bufsize,
301         const rcti *input, char mode)
302 {
303         GPUPickState *ps = &g_pick_state;
304
305 #ifdef DEBUG_PRINT
306         printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
307 #endif
308
309         ps->bufsize = bufsize;
310         ps->buffer = buffer;
311         ps->mode = mode;
312
313         const unsigned int rect_len = (unsigned int)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
314         ps->dst.clip_rect = *input;
315         ps->dst.rect_len = rect_len;
316
317         /* Restrict OpenGL operations for when we don't have cache */
318         if (ps->is_cached == false) {
319
320                 glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT);
321                 /* disable writing to the framebuffer */
322                 glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
323
324                 glEnable(GL_DEPTH_TEST);
325                 glDepthMask(GL_TRUE);
326
327                 if (mode == GPU_SELECT_PICK_ALL) {
328                         glDepthFunc(GL_ALWAYS);
329                 }
330                 else {
331                         glDepthFunc(GL_LEQUAL);
332                 }
333
334                 /* set just in case */
335                 glPixelTransferf(GL_DEPTH_BIAS, 0.0);
336                 glPixelTransferf(GL_DEPTH_SCALE, 1.0);
337
338                 float viewport[4];
339                 glGetFloatv(GL_SCISSOR_BOX, viewport);
340
341                 ps->src.clip_rect = *input;
342                 ps->src.rect_len = rect_len;
343
344                 ps->gl.clip_readpixels[0] = (int)viewport[0];
345                 ps->gl.clip_readpixels[1] = (int)viewport[1];
346                 ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect);
347                 ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect);
348
349                 glViewport(UNPACK4(ps->gl.clip_readpixels));
350
351                 /* It's possible we don't want to clear depth buffer,
352                  * so existing elements are masked by current z-buffer. */
353                 glClear(GL_DEPTH_BUFFER_BIT);
354
355                 /* scratch buffer (read new values here) */
356                 ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
357                 ps->gl.rect_depth = depth_buf_malloc(rect_len);
358
359                 /* set initial 'far' value */
360 #if 0
361                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth->buf);
362 #else
363                 for (unsigned int i = 0; i < rect_len; i++) {
364                         ps->gl.rect_depth->buf[i] = DEPTH_MAX;
365                 }
366 #endif
367
368                 ps->gl.is_init = false;
369                 ps->gl.prev_id = 0;
370         }
371         else {
372                 /* Using cache (ps->is_cached == true) */
373                 /* src.clip_rect -> dst.clip_rect */
374                 rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
375                 BLI_assert(ps->gl.rect_depth == NULL);
376                 BLI_assert(ps->gl.rect_depth_test == NULL);
377         }
378
379         if (mode == GPU_SELECT_PICK_ALL) {
380                 ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
381                 ps->all.hits_len = 0;
382                 ps->all.hits_len_alloc = ALLOC_DEPTHS;
383         }
384         else {
385                 /* Set to 0xff for SELECT_ID_NONE */
386                 ps->nearest.rect_id = MEM_mallocN(sizeof(unsigned int) * ps->dst.rect_len, __func__);
387                 memset(ps->nearest.rect_id, 0xff, sizeof(unsigned int) * ps->dst.rect_len);
388         }
389 }
390
391 /**
392  * Given 2x depths, we know are different - update the depth information
393  * use for both cached/uncached depth buffers.
394  */
395 static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
396 {
397         GPUPickState *ps = &g_pick_state;
398         const unsigned int id = rect_curr->id;
399         /* find the best depth for this pass and store in 'all.hits' */
400         depth_t depth_best = DEPTH_MAX;
401
402 #define EVAL_TEST() \
403         if (depth_best > *curr) { \
404                 depth_best = *curr; \
405         } ((void)0)
406
407         if (ps->is_cached == false) {
408                 const depth_t *curr = rect_curr->buf;
409                 BLI_assert(ps->src.rect_len == ps->dst.rect_len);
410                 const unsigned int rect_len = ps->src.rect_len;
411                 for (unsigned int i = 0; i < rect_len; i++, curr++) {
412                         EVAL_TEST();
413                 }
414         }
415         else {
416                 /* same as above but different rect sizes */
417                 const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
418                 for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
419                         const depth_t *curr_end = curr + ps->cache.sub_rect.span;
420                         for (; curr < curr_end; curr++) {
421                                 EVAL_TEST();
422                         }
423                         curr += ps->cache.sub_rect.skip;
424                 }
425         }
426
427 #undef EVAL_TEST
428
429         /* ensure enough space */
430         if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
431                 ps->all.hits_len_alloc += ALLOC_DEPTHS;
432                 ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
433         }
434         DepthID *d = &ps->all.hits[ps->all.hits_len++];
435         d->id = id;
436         d->depth = depth_best;
437 }
438
439 static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
440 {
441         GPUPickState *ps = &g_pick_state;
442         const unsigned int id = rect_curr->id;
443         /* keep track each pixels ID in 'nearest.rect_id' */
444         if (id != SELECT_ID_NONE) {
445                 unsigned int *id_ptr = ps->nearest.rect_id;
446
447                 /* Check against DEPTH_MAX because XRAY will clear the buffer,
448                  * so previously set values will become unset.
449                  * In this case just leave those id's left as-is. */
450 #define EVAL_TEST() \
451                 if (depth_is_filled(prev, curr)) { \
452                         *id_ptr = id; \
453                 } ((void)0)
454
455                 if (ps->is_cached == false) {
456                         const depth_t *prev = rect_prev->buf;
457                         const depth_t *curr = rect_curr->buf;
458                         BLI_assert(ps->src.rect_len == ps->dst.rect_len);
459                         const unsigned int rect_len = ps->src.rect_len;
460                         for (unsigned int i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
461                                 EVAL_TEST();
462                         }
463                 }
464                 else {
465                         /* same as above but different rect sizes */
466                         const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
467                         const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
468                         for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
469                                 const depth_t *curr_end = curr + ps->cache.sub_rect.span;
470                                 for (; curr < curr_end; prev++, curr++, id_ptr++) {
471                                         EVAL_TEST();
472                                 }
473                                 prev += ps->cache.sub_rect.skip;
474                                 curr += ps->cache.sub_rect.skip;
475                         }
476                 }
477
478 #undef EVAL_TEST
479         }
480 }
481
482
483 bool gpu_select_pick_load_id(unsigned int id)
484 {
485         GPUPickState *ps = &g_pick_state;
486         if (ps->gl.is_init) {
487                 const unsigned int rect_len = ps->src.rect_len;
488                 glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
489                 /* perform initial check since most cases the array remains unchanged  */
490
491                 bool do_pass = false;
492                 if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
493                         if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
494                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
495                                 gpu_select_load_id_pass_all(ps->gl.rect_depth_test);
496                                 do_pass = true;
497                         }
498                 }
499                 else {
500                         if (depth_buf_rect_depth_any_filled(ps->gl.rect_depth, ps->gl.rect_depth_test, rect_len)) {
501                                 ps->gl.rect_depth_test->id = ps->gl.prev_id;
502                                 gpu_select_load_id_pass_nearest(ps->gl.rect_depth, ps->gl.rect_depth_test);
503                                 do_pass = true;
504                         }
505                 }
506
507                 if (do_pass) {
508                         /* Store depth in cache */
509                         if (ps->use_cache) {
510                                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
511                                 ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len);
512                         }
513
514                         SWAP(DepthBufCache *, ps->gl.rect_depth, ps->gl.rect_depth_test);
515
516                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
517                                 /* we want new depths every time */
518                                 glClear(GL_DEPTH_BUFFER_BIT);
519                         }
520                 }
521         }
522
523         ps->gl.is_init = true;
524         ps->gl.prev_id = id;
525
526         return true;
527 }
528
529  /**
530   * (Optional), call before 'gpu_select_pick_end' if GL context is not kept.
531   * is not compatible with regular select case.
532   * */
533 void gpu_select_pick_finalize(void)
534 {
535         GPUPickState *ps = &g_pick_state;
536         if (ps->gl.is_init) {
537                 /* force finishing last pass */
538                 gpu_select_pick_load_id(ps->gl.prev_id);
539         }
540 }
541
542 unsigned int gpu_select_pick_end(void)
543 {
544         GPUPickState *ps = &g_pick_state;
545
546 #ifdef DEBUG_PRINT
547         printf("%s\n", __func__);
548 #endif
549
550         if (ps->is_cached == false) {
551                 gpu_select_pick_finalize();
552
553                 glPopAttrib();
554                 glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
555         }
556
557         /* assign but never free directly since it may be in cache */
558         DepthBufCache *rect_depth_final;
559
560         /* Store depth in cache */
561         if (ps->use_cache && !ps->is_cached) {
562                 BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
563                 ps->gl.rect_depth = NULL;
564                 rect_depth_final = ps->cache.bufs.last;
565         }
566         else if (ps->is_cached) {
567                 rect_depth_final = ps->cache.bufs.last;
568         }
569         else {
570                 /* common case, no cache */
571                 rect_depth_final = ps->gl.rect_depth;
572         }
573
574         unsigned int maxhits = g_pick_state.bufsize;
575         DepthID *depth_data;
576         unsigned int depth_data_len = 0;
577
578         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
579                 depth_data = ps->all.hits;
580                 depth_data_len = ps->all.hits_len;
581                 /* move ownership */
582                 ps->all.hits = NULL;
583                 ps->all.hits_len = 0;
584                 ps->all.hits_len_alloc = 0;
585         }
586         else {
587                 /* GPU_SELECT_PICK_NEAREST */
588
589                 /* Over alloc (unlikely we have as many depths as pixels) */
590                 unsigned int depth_data_len_first_pass = 0;
591                 depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
592
593                 /* Partially de-duplicating copy,
594                  * when contiguous ID's are found - update their closest depth.
595                  * This isn't essential but means there is less data to sort. */
596
597 #define EVAL_TEST(i_src, i_dst) \
598                 { \
599                         const unsigned int id = ps->nearest.rect_id[i_dst]; \
600                         if (id != SELECT_ID_NONE) { \
601                                 const depth_t depth = rect_depth_final->buf[i_src]; \
602                                 if (depth_last == NULL || depth_last->id != id) { \
603                                         DepthID *d = &depth_data[depth_data_len_first_pass++]; \
604                                         d->id = id; \
605                                         d->depth = depth; \
606                                 } \
607                                 else if (depth_last->depth > depth) { \
608                                         depth_last->depth = depth; \
609                                 } \
610                         } \
611                 } ((void)0)
612
613                 {
614                         DepthID *depth_last = NULL;
615                         if (ps->is_cached == false) {
616                                 for (unsigned int i = 0; i < ps->src.rect_len; i++) {
617                                         EVAL_TEST(i, i);
618                                 }
619                         }
620                         else {
621                                 /* same as above but different rect sizes */
622                                 unsigned int i_src = ps->cache.sub_rect.start, i_dst = 0;
623                                 for (unsigned int j = 0; j < ps->cache.sub_rect.span_len; j++) {
624                                         const unsigned int i_src_end = i_src + ps->cache.sub_rect.span;
625                                         for (; i_src < i_src_end; i_src++, i_dst++) {
626                                                 EVAL_TEST(i_src, i_dst);
627                                         }
628                                         i_src += ps->cache.sub_rect.skip;
629                                 }
630                         }
631                 }
632
633 #undef EVAL_TEST
634
635                 qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
636
637                 /* Sort by ID's then keep the best depth for each ID */
638                 depth_data_len = 0;
639                 {
640                         DepthID *depth_last = NULL;
641                         for (unsigned int i = 0; i < depth_data_len_first_pass; i++) {
642                                 if (depth_last == NULL || depth_last->id != depth_data[i].id) {
643                                         depth_last = &depth_data[depth_data_len++];
644                                         *depth_last = depth_data[i];
645                                 }
646                                 else if (depth_last->depth > depth_data[i].depth) {
647                                         depth_last->depth = depth_data[i].depth;
648                                 }
649                         }
650                 }
651         }
652
653         /* Finally sort each unique (id, depth) pair by depth
654          * so the final hit-list is sorted by depth (nearest first) */
655         unsigned int hits = 0;
656
657         if (depth_data_len > maxhits) {
658                 hits = (unsigned int)-1;
659         }
660         else {
661                 /* leave sorting up to the caller */
662                 qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
663
664                 for (unsigned int i = 0; i < depth_data_len; i++) {
665 #ifdef DEBUG_PRINT
666                         printf("  hit: %u: depth %u\n", depth_data[i].id,  depth_data[i].depth);
667 #endif
668                         /* first 3 are dummy values */
669                         g_pick_state.buffer[hits][0] = 1;
670                         g_pick_state.buffer[hits][1] = 0x0;  /* depth_data[i].depth; */ /* unused */
671                         g_pick_state.buffer[hits][2] = 0x0;  /* z-far is currently never used. */
672                         g_pick_state.buffer[hits][3] = depth_data[i].id;
673                         hits++;
674                 }
675                 BLI_assert(hits < maxhits);
676         }
677
678         MEM_freeN(depth_data);
679
680         MEM_SAFE_FREE(ps->gl.rect_depth);
681         MEM_SAFE_FREE(ps->gl.rect_depth_test);
682
683         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
684                 /* 'hits' already freed as 'depth_data' */
685         }
686         else {
687                 MEM_freeN(ps->nearest.rect_id);
688                 ps->nearest.rect_id = NULL;
689         }
690
691         if (ps->use_cache) {
692                 ps->is_cached = true;
693         }
694
695         return hits;
696 }
697
698 /* ----------------------------------------------------------------------------
699  * Caching
700  *
701  * Support multiple begin/end's reusing depth buffers.
702  */
703
704 void gpu_select_pick_cache_begin(void)
705 {
706         BLI_assert(g_pick_state.use_cache == false);
707 #ifdef DEBUG_PRINT
708         printf("%s\n", __func__);
709 #endif
710         g_pick_state.use_cache = true;
711         g_pick_state.is_cached = false;
712 }
713
714 void gpu_select_pick_cache_end(void)
715 {
716 #ifdef DEBUG_PRINT
717         printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
718 #endif
719         g_pick_state.use_cache = false;
720         g_pick_state.is_cached = false;
721
722         BLI_freelistN(&g_pick_state.cache.bufs);
723 }
724
725 /* is drawing needed? */
726 bool gpu_select_pick_is_cached(void)
727 {
728         return g_pick_state.is_cached;
729 }
730
731 void gpu_select_pick_cache_load_id(void)
732 {
733         BLI_assert(g_pick_state.is_cached == true);
734         GPUPickState *ps = &g_pick_state;
735 #ifdef DEBUG_PRINT
736         printf("%s (building depth from cache)\n", __func__);
737 #endif
738         for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) {
739                 if (rect_depth->next != NULL) {
740                         /* we know the buffers differ, but this sub-region may not.
741                          * double check before adding an id-pass */
742                         if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
743                                 if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
744                                         gpu_select_load_id_pass_all(rect_depth->next);
745                                 }
746                         }
747                         else {
748                                 if (depth_buf_subrect_depth_any_filled(rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
749                                         gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
750                                 }
751                         }
752                 }
753         }
754 }