ded2fd7e06d71abde0063444577107a8787ccfe4
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38 #include "BLI_listbase.h"
39 #include "BLI_gsqueue.h"
40 #include "BLI_task.h"
41 #include "BLI_threads.h"
42
43 #include "PIL_time.h"
44
45 /* for checking system threads - BLI_system_thread_count */
46 #ifdef WIN32
47 #  include <windows.h>
48 #  include <sys/timeb.h>
49 #elif defined(__APPLE__)
50 #  include <sys/types.h>
51 #  include <sys/sysctl.h>
52 #else
53 #  include <unistd.h>
54 #  include <sys/time.h>
55 #endif
56
57 #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && !defined(__clang__)
58 #  define USE_APPLE_OMP_FIX
59 #endif
60
61 #ifdef USE_APPLE_OMP_FIX
62 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
63 extern pthread_key_t gomp_tls_key;
64 static void *thread_tls_data;
65 #endif
66
67 /* We're using one global task scheduler for all kind of tasks. */
68 static TaskScheduler *task_scheduler = NULL;
69
70 /* ********** basic thread control API ************ 
71  * 
72  * Many thread cases have an X amount of jobs, and only an Y amount of
73  * threads are useful (typically amount of cpus)
74  *
75  * This code can be used to start a maximum amount of 'thread slots', which
76  * then can be filled in a loop with an idle timer. 
77  *
78  * A sample loop can look like this (pseudo c);
79  *
80  *     ListBase lb;
81  *     int maxthreads = 2;
82  *     int cont = 1;
83  * 
84  *     BLI_init_threads(&lb, do_something_func, maxthreads);
85  * 
86  *     while (cont) {
87  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
88  *             // get new job (data pointer)
89  *             // tag job 'processed 
90  *             BLI_insert_thread(&lb, job);
91  *         }
92  *         else PIL_sleep_ms(50);
93  *         
94  *         // find if a job is ready, this the do_something_func() should write in job somewhere
95  *         cont = 0;
96  *         for (go over all jobs)
97  *             if (job is ready) {
98  *                 if (job was not removed) {
99  *                     BLI_remove_thread(&lb, job);
100  *                 }
101  *             }
102  *             else cont = 1;
103  *         }
104  *         // conditions to exit loop 
105  *         if (if escape loop event) {
106  *             if (BLI_available_threadslots(&lb) == maxthreads)
107  *                 break;
108  *         }
109  *     }
110  * 
111  *     BLI_end_threads(&lb);
112  *
113  ************************************************ */
114 static SpinLock _malloc_lock;
115 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
116 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
117 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
118 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
119 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
120 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
121 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
122 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
123 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
124 static pthread_mutex_t _fftw_lock = PTHREAD_MUTEX_INITIALIZER;
125 static pthread_t mainid;
126 static int thread_levels = 0;  /* threads can be invoked inside threads */
127 static int num_threads_override = 0;
128
129 /* just a max for security reasons */
130 #define RE_MAX_THREAD BLENDER_MAX_THREADS
131
132 typedef struct ThreadSlot {
133         struct ThreadSlot *next, *prev;
134         void *(*do_thread)(void *);
135         void *callerdata;
136         pthread_t pthread;
137         int avail;
138 } ThreadSlot;
139
140 static void BLI_lock_malloc_thread(void)
141 {
142         BLI_spin_lock(&_malloc_lock);
143 }
144
145 static void BLI_unlock_malloc_thread(void)
146 {
147         BLI_spin_unlock(&_malloc_lock);
148 }
149
150 void BLI_threadapi_init(void)
151 {
152         mainid = pthread_self();
153
154         BLI_spin_init(&_malloc_lock);
155 }
156
157 void BLI_threadapi_exit(void)
158 {
159         if (task_scheduler) {
160                 BLI_task_scheduler_free(task_scheduler);
161         }
162         BLI_spin_end(&_malloc_lock);
163 }
164
165 TaskScheduler *BLI_task_scheduler_get(void)
166 {
167         if (task_scheduler == NULL) {
168                 int tot_thread = BLI_system_thread_count();
169
170                 /* Do a lazy initialization, so it happens after
171                  * command line arguments parsing
172                  */
173                 task_scheduler = BLI_task_scheduler_create(tot_thread);
174         }
175
176         return task_scheduler;
177 }
178
179 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
180  * problem otherwise: scene render will kill of the mutex!
181  */
182
183 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
184 {
185         int a;
186
187         if (threadbase != NULL && tot > 0) {
188                 BLI_listbase_clear(threadbase);
189         
190                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
191                 else if (tot < 1) tot = 1;
192         
193                 for (a = 0; a < tot; a++) {
194                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
195                         BLI_addtail(threadbase, tslot);
196                         tslot->do_thread = do_thread;
197                         tslot->avail = 1;
198                 }
199         }
200         
201         if (thread_levels == 0) {
202                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
203
204 #ifdef USE_APPLE_OMP_FIX
205                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
206                  * we copy gomp thread local storage pointer to setting it again
207                  * inside the thread that we start */
208                 thread_tls_data = pthread_getspecific(gomp_tls_key);
209 #endif
210         }
211
212         thread_levels++;
213 }
214
215 /* amount of available threads */
216 int BLI_available_threads(ListBase *threadbase)
217 {
218         ThreadSlot *tslot;
219         int counter = 0;
220         
221         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
222                 if (tslot->avail)
223                         counter++;
224         }
225         return counter;
226 }
227
228 /* returns thread number, for sample patterns or threadsafe tables */
229 int BLI_available_thread_index(ListBase *threadbase)
230 {
231         ThreadSlot *tslot;
232         int counter = 0;
233         
234         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
235                 if (tslot->avail)
236                         return counter;
237         }
238         return 0;
239 }
240
241 static void *tslot_thread_start(void *tslot_p)
242 {
243         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
244
245 #ifdef USE_APPLE_OMP_FIX
246         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
247          * set gomp thread local storage pointer which was copied beforehand */
248         pthread_setspecific(gomp_tls_key, thread_tls_data);
249 #endif
250
251         return tslot->do_thread(tslot->callerdata);
252 }
253
254 int BLI_thread_is_main(void)
255 {
256         return pthread_equal(pthread_self(), mainid);
257 }
258
259 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
260 {
261         ThreadSlot *tslot;
262         
263         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
264                 if (tslot->avail) {
265                         tslot->avail = 0;
266                         tslot->callerdata = callerdata;
267                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
268                         return;
269                 }
270         }
271         printf("ERROR: could not insert thread slot\n");
272 }
273
274 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
275 {
276         ThreadSlot *tslot;
277         
278         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
279                 if (tslot->callerdata == callerdata) {
280                         pthread_join(tslot->pthread, NULL);
281                         tslot->callerdata = NULL;
282                         tslot->avail = 1;
283                 }
284         }
285 }
286
287 void BLI_remove_thread_index(ListBase *threadbase, int index)
288 {
289         ThreadSlot *tslot;
290         int counter = 0;
291         
292         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
293                 if (counter == index && tslot->avail == 0) {
294                         pthread_join(tslot->pthread, NULL);
295                         tslot->callerdata = NULL;
296                         tslot->avail = 1;
297                         break;
298                 }
299         }
300 }
301
302 void BLI_remove_threads(ListBase *threadbase)
303 {
304         ThreadSlot *tslot;
305         
306         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
307                 if (tslot->avail == 0) {
308                         pthread_join(tslot->pthread, NULL);
309                         tslot->callerdata = NULL;
310                         tslot->avail = 1;
311                 }
312         }
313 }
314
315 void BLI_end_threads(ListBase *threadbase)
316 {
317         ThreadSlot *tslot;
318         
319         /* only needed if there's actually some stuff to end
320          * this way we don't end up decrementing thread_levels on an empty threadbase 
321          * */
322         if (threadbase && (BLI_listbase_is_empty(threadbase) == false)) {
323                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
324                         if (tslot->avail == 0) {
325                                 pthread_join(tslot->pthread, NULL);
326                         }
327                 }
328                 BLI_freelistN(threadbase);
329         }
330
331         thread_levels--;
332         if (thread_levels == 0)
333                 MEM_set_lock_callback(NULL, NULL);
334 }
335
336 /* System Information */
337
338 /* how many threads are native on this system? */
339 int BLI_system_thread_count(void)
340 {
341         int t;
342 #ifdef WIN32
343         SYSTEM_INFO info;
344         GetSystemInfo(&info);
345         t = (int) info.dwNumberOfProcessors;
346 #else 
347 #   ifdef __APPLE__
348         int mib[2];
349         size_t len;
350         
351         mib[0] = CTL_HW;
352         mib[1] = HW_NCPU;
353         len = sizeof(t);
354         sysctl(mib, 2, &t, &len, NULL, 0);
355 #   else
356         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
357 #   endif
358 #endif
359
360         if (num_threads_override > 0)
361                 return num_threads_override;
362         
363         if (t > RE_MAX_THREAD)
364                 return RE_MAX_THREAD;
365         if (t < 1)
366                 return 1;
367         
368         return t;
369 }
370
371 void BLI_system_num_threads_override_set(int num)
372 {
373         num_threads_override = num;
374 }
375
376 int BLI_system_num_threads_override_get(void)
377 {
378         return num_threads_override;
379 }
380
381 /* Global Mutex Locks */
382
383 void BLI_lock_thread(int type)
384 {
385         if (type == LOCK_IMAGE)
386                 pthread_mutex_lock(&_image_lock);
387         else if (type == LOCK_DRAW_IMAGE)
388                 pthread_mutex_lock(&_image_draw_lock);
389         else if (type == LOCK_VIEWER)
390                 pthread_mutex_lock(&_viewer_lock);
391         else if (type == LOCK_CUSTOM1)
392                 pthread_mutex_lock(&_custom1_lock);
393         else if (type == LOCK_RCACHE)
394                 pthread_mutex_lock(&_rcache_lock);
395         else if (type == LOCK_OPENGL)
396                 pthread_mutex_lock(&_opengl_lock);
397         else if (type == LOCK_NODES)
398                 pthread_mutex_lock(&_nodes_lock);
399         else if (type == LOCK_MOVIECLIP)
400                 pthread_mutex_lock(&_movieclip_lock);
401         else if (type == LOCK_COLORMANAGE)
402                 pthread_mutex_lock(&_colormanage_lock);
403         else if (type == LOCK_FFTW)
404                 pthread_mutex_lock(&_fftw_lock);
405 }
406
407 void BLI_unlock_thread(int type)
408 {
409         if (type == LOCK_IMAGE)
410                 pthread_mutex_unlock(&_image_lock);
411         else if (type == LOCK_DRAW_IMAGE)
412                 pthread_mutex_unlock(&_image_draw_lock);
413         else if (type == LOCK_VIEWER)
414                 pthread_mutex_unlock(&_viewer_lock);
415         else if (type == LOCK_CUSTOM1)
416                 pthread_mutex_unlock(&_custom1_lock);
417         else if (type == LOCK_RCACHE)
418                 pthread_mutex_unlock(&_rcache_lock);
419         else if (type == LOCK_OPENGL)
420                 pthread_mutex_unlock(&_opengl_lock);
421         else if (type == LOCK_NODES)
422                 pthread_mutex_unlock(&_nodes_lock);
423         else if (type == LOCK_MOVIECLIP)
424                 pthread_mutex_unlock(&_movieclip_lock);
425         else if (type == LOCK_COLORMANAGE)
426                 pthread_mutex_unlock(&_colormanage_lock);
427         else if (type == LOCK_FFTW)
428                 pthread_mutex_unlock(&_fftw_lock);
429 }
430
431 /* Mutex Locks */
432
433 void BLI_mutex_init(ThreadMutex *mutex)
434 {
435         pthread_mutex_init(mutex, NULL);
436 }
437
438 void BLI_mutex_lock(ThreadMutex *mutex)
439 {
440         pthread_mutex_lock(mutex);
441 }
442
443 void BLI_mutex_unlock(ThreadMutex *mutex)
444 {
445         pthread_mutex_unlock(mutex);
446 }
447
448 bool BLI_mutex_trylock(ThreadMutex *mutex)
449 {
450         return (pthread_mutex_trylock(mutex) == 0);
451 }
452
453 void BLI_mutex_end(ThreadMutex *mutex)
454 {
455         pthread_mutex_destroy(mutex);
456 }
457
458 ThreadMutex *BLI_mutex_alloc(void)
459 {
460         ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex");
461         BLI_mutex_init(mutex);
462         return mutex;
463 }
464
465 void BLI_mutex_free(ThreadMutex *mutex)
466 {
467         BLI_mutex_end(mutex);
468         MEM_freeN(mutex);
469 }
470
471 /* Spin Locks */
472
473 void BLI_spin_init(SpinLock *spin)
474 {
475 #ifdef __APPLE__
476         *spin = OS_SPINLOCK_INIT;
477 #else
478         pthread_spin_init(spin, 0);
479 #endif
480 }
481
482 void BLI_spin_lock(SpinLock *spin)
483 {
484 #ifdef __APPLE__
485         OSSpinLockLock(spin);
486 #else
487         pthread_spin_lock(spin);
488 #endif
489 }
490
491 void BLI_spin_unlock(SpinLock *spin)
492 {
493 #ifdef __APPLE__
494         OSSpinLockUnlock(spin);
495 #else
496         pthread_spin_unlock(spin);
497 #endif
498 }
499
500 #ifndef __APPLE__
501 void BLI_spin_end(SpinLock *spin)
502 {
503         pthread_spin_destroy(spin);
504 }
505 #else
506 void BLI_spin_end(SpinLock *UNUSED(spin))
507 {
508 }
509 #endif
510
511 /* Read/Write Mutex Lock */
512
513 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
514 {
515         pthread_rwlock_init(mutex, NULL);
516 }
517
518 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
519 {
520         if (mode == THREAD_LOCK_READ)
521                 pthread_rwlock_rdlock(mutex);
522         else
523                 pthread_rwlock_wrlock(mutex);
524 }
525
526 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
527 {
528         pthread_rwlock_unlock(mutex);
529 }
530
531 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
532 {
533         pthread_rwlock_destroy(mutex);
534 }
535
536 ThreadRWMutex *BLI_rw_mutex_alloc(void)
537 {
538         ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex");
539         BLI_rw_mutex_init(mutex);
540         return mutex;
541 }
542
543 void BLI_rw_mutex_free(ThreadRWMutex *mutex)
544 {
545         BLI_rw_mutex_end(mutex);
546         MEM_freeN(mutex);
547 }
548
549 /* Ticket Mutex Lock */
550
551 struct TicketMutex {
552         pthread_cond_t cond;
553         pthread_mutex_t mutex;
554         unsigned int queue_head, queue_tail;
555 };
556
557 TicketMutex *BLI_ticket_mutex_alloc(void)
558 {
559         TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex");
560
561         pthread_cond_init(&ticket->cond, NULL);
562         pthread_mutex_init(&ticket->mutex, NULL);
563
564         return ticket;
565 }
566
567 void BLI_ticket_mutex_free(TicketMutex *ticket)
568 {
569         pthread_mutex_destroy(&ticket->mutex);
570         pthread_cond_destroy(&ticket->cond);
571         MEM_freeN(ticket);
572 }
573
574 void BLI_ticket_mutex_lock(TicketMutex *ticket)
575 {
576         unsigned int queue_me;
577
578         pthread_mutex_lock(&ticket->mutex);
579         queue_me = ticket->queue_tail++;
580
581         while (queue_me != ticket->queue_head)
582                 pthread_cond_wait(&ticket->cond, &ticket->mutex);
583
584         pthread_mutex_unlock(&ticket->mutex);
585 }
586
587 void BLI_ticket_mutex_unlock(TicketMutex *ticket)
588 {
589         pthread_mutex_lock(&ticket->mutex);
590         ticket->queue_head++;
591         pthread_cond_broadcast(&ticket->cond);
592         pthread_mutex_unlock(&ticket->mutex);
593 }
594
595 /* ************************************************ */
596
597 /* Condition */
598
599 void BLI_condition_init(ThreadCondition *cond)
600 {
601         pthread_cond_init(cond, NULL);
602 }
603
604 void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
605 {
606         pthread_cond_wait(cond, mutex);
607 }
608
609 void BLI_condition_notify_one(ThreadCondition *cond)
610 {
611         pthread_cond_signal(cond);
612 }
613
614 void BLI_condition_notify_all(ThreadCondition *cond)
615 {
616         pthread_cond_broadcast(cond);
617 }
618
619 void BLI_condition_end(ThreadCondition *cond)
620 {
621         pthread_cond_destroy(cond);
622 }
623
624 /* ************************************************ */
625
626 struct ThreadQueue {
627         GSQueue *queue;
628         pthread_mutex_t mutex;
629         pthread_cond_t push_cond;
630         pthread_cond_t finish_cond;
631         volatile int nowait;
632         volatile int canceled;
633 };
634
635 ThreadQueue *BLI_thread_queue_init(void)
636 {
637         ThreadQueue *queue;
638
639         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
640         queue->queue = BLI_gsqueue_new(sizeof(void *));
641
642         pthread_mutex_init(&queue->mutex, NULL);
643         pthread_cond_init(&queue->push_cond, NULL);
644         pthread_cond_init(&queue->finish_cond, NULL);
645
646         return queue;
647 }
648
649 void BLI_thread_queue_free(ThreadQueue *queue)
650 {
651         /* destroy everything, assumes no one is using queue anymore */
652         pthread_cond_destroy(&queue->finish_cond);
653         pthread_cond_destroy(&queue->push_cond);
654         pthread_mutex_destroy(&queue->mutex);
655
656         BLI_gsqueue_free(queue->queue);
657
658         MEM_freeN(queue);
659 }
660
661 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
662 {
663         pthread_mutex_lock(&queue->mutex);
664
665         BLI_gsqueue_push(queue->queue, &work);
666
667         /* signal threads waiting to pop */
668         pthread_cond_signal(&queue->push_cond);
669         pthread_mutex_unlock(&queue->mutex);
670 }
671
672 void *BLI_thread_queue_pop(ThreadQueue *queue)
673 {
674         void *work = NULL;
675
676         /* wait until there is work */
677         pthread_mutex_lock(&queue->mutex);
678         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
679                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
680         
681         /* if we have something, pop it */
682         if (!BLI_gsqueue_is_empty(queue->queue)) {
683                 BLI_gsqueue_pop(queue->queue, &work);
684                 
685                 if (BLI_gsqueue_is_empty(queue->queue))
686                         pthread_cond_broadcast(&queue->finish_cond);
687         }
688
689         pthread_mutex_unlock(&queue->mutex);
690
691         return work;
692 }
693
694 static void wait_timeout(struct timespec *timeout, int ms)
695 {
696         ldiv_t div_result;
697         long sec, usec, x;
698
699 #ifdef WIN32
700         {
701                 struct _timeb now;
702                 _ftime(&now);
703                 sec = now.time;
704                 usec = now.millitm * 1000; /* microsecond precision would be better */
705         }
706 #else
707         {
708                 struct timeval now;
709                 gettimeofday(&now, NULL);
710                 sec = now.tv_sec;
711                 usec = now.tv_usec;
712         }
713 #endif
714
715         /* add current time + millisecond offset */
716         div_result = ldiv(ms, 1000);
717         timeout->tv_sec = sec + div_result.quot;
718
719         x = usec + (div_result.rem * 1000);
720
721         if (x >= 1000000) {
722                 timeout->tv_sec++;
723                 x -= 1000000;
724         }
725
726         timeout->tv_nsec = x * 1000;
727 }
728
729 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
730 {
731         double t;
732         void *work = NULL;
733         struct timespec timeout;
734
735         t = PIL_check_seconds_timer();
736         wait_timeout(&timeout, ms);
737
738         /* wait until there is work */
739         pthread_mutex_lock(&queue->mutex);
740         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
741                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
742                         break;
743                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
744                         break;
745         }
746
747         /* if we have something, pop it */
748         if (!BLI_gsqueue_is_empty(queue->queue)) {
749                 BLI_gsqueue_pop(queue->queue, &work);
750                 
751                 if (BLI_gsqueue_is_empty(queue->queue))
752                         pthread_cond_broadcast(&queue->finish_cond);
753         }
754         
755         pthread_mutex_unlock(&queue->mutex);
756
757         return work;
758 }
759
760 int BLI_thread_queue_size(ThreadQueue *queue)
761 {
762         int size;
763
764         pthread_mutex_lock(&queue->mutex);
765         size = BLI_gsqueue_size(queue->queue);
766         pthread_mutex_unlock(&queue->mutex);
767
768         return size;
769 }
770
771 void BLI_thread_queue_nowait(ThreadQueue *queue)
772 {
773         pthread_mutex_lock(&queue->mutex);
774
775         queue->nowait = 1;
776
777         /* signal threads waiting to pop */
778         pthread_cond_broadcast(&queue->push_cond);
779         pthread_mutex_unlock(&queue->mutex);
780 }
781
782 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
783 {
784         /* wait for finish condition */
785         pthread_mutex_lock(&queue->mutex);
786
787         while (!BLI_gsqueue_is_empty(queue->queue))
788                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
789
790         pthread_mutex_unlock(&queue->mutex);
791 }
792
793 /* ************************************************ */
794
795 void BLI_begin_threaded_malloc(void)
796 {
797         /* Used for debug only */
798         /* BLI_assert(thread_levels >= 0); */
799
800         if (thread_levels == 0) {
801                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
802         }
803         thread_levels++;
804 }
805
806 void BLI_end_threaded_malloc(void)
807 {
808         /* Used for debug only */
809         /* BLI_assert(thread_levels >= 0); */
810
811         thread_levels--;
812         if (thread_levels == 0)
813                 MEM_set_lock_callback(NULL, NULL);
814 }
815