OSX/scons: allow for compiling with clang-openmp-3.4
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38 #include "BLI_listbase.h"
39 #include "BLI_gsqueue.h"
40 #include "BLI_task.h"
41 #include "BLI_threads.h"
42
43 #include "PIL_time.h"
44
45 /* for checking system threads - BLI_system_thread_count */
46 #ifdef WIN32
47 #  include <windows.h>
48 #  include <sys/timeb.h>
49 #elif defined(__APPLE__)
50 #  include <sys/types.h>
51 #  include <sys/sysctl.h>
52 #else
53 #  include <unistd.h>
54 #  include <sys/time.h>
55 #endif
56
57 #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && !defined(__clang__)
58 #  define USE_APPLE_OMP_FIX
59 #endif
60
61 #ifdef USE_APPLE_OMP_FIX
62 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
63 extern pthread_key_t gomp_tls_key;
64 static void *thread_tls_data;
65 #endif
66
67 /* We're using one global task scheduler for all kind of tasks. */
68 static TaskScheduler *task_scheduler = NULL;
69
70 /* ********** basic thread control API ************ 
71  * 
72  * Many thread cases have an X amount of jobs, and only an Y amount of
73  * threads are useful (typically amount of cpus)
74  *
75  * This code can be used to start a maximum amount of 'thread slots', which
76  * then can be filled in a loop with an idle timer. 
77  *
78  * A sample loop can look like this (pseudo c);
79  *
80  *     ListBase lb;
81  *     int maxthreads = 2;
82  *     int cont = 1;
83  * 
84  *     BLI_init_threads(&lb, do_something_func, maxthreads);
85  * 
86  *     while (cont) {
87  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
88  *             // get new job (data pointer)
89  *             // tag job 'processed 
90  *             BLI_insert_thread(&lb, job);
91  *         }
92  *         else PIL_sleep_ms(50);
93  *         
94  *         // find if a job is ready, this the do_something_func() should write in job somewhere
95  *         cont = 0;
96  *         for (go over all jobs)
97  *             if (job is ready) {
98  *                 if (job was not removed) {
99  *                     BLI_remove_thread(&lb, job);
100  *                 }
101  *             }
102  *             else cont = 1;
103  *         }
104  *         // conditions to exit loop 
105  *         if (if escape loop event) {
106  *             if (BLI_available_threadslots(&lb) == maxthreads)
107  *                 break;
108  *         }
109  *     }
110  * 
111  *     BLI_end_threads(&lb);
112  *
113  ************************************************ */
114 static SpinLock _malloc_lock;
115 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
116 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
117 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
118 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
119 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
120 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
121 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
122 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
123 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
124 static pthread_t mainid;
125 static int thread_levels = 0;  /* threads can be invoked inside threads */
126 static int num_threads_override = 0;
127
128 /* just a max for security reasons */
129 #define RE_MAX_THREAD BLENDER_MAX_THREADS
130
131 typedef struct ThreadSlot {
132         struct ThreadSlot *next, *prev;
133         void *(*do_thread)(void *);
134         void *callerdata;
135         pthread_t pthread;
136         int avail;
137 } ThreadSlot;
138
139 static void BLI_lock_malloc_thread(void)
140 {
141         BLI_spin_lock(&_malloc_lock);
142 }
143
144 static void BLI_unlock_malloc_thread(void)
145 {
146         BLI_spin_unlock(&_malloc_lock);
147 }
148
149 void BLI_threadapi_init(void)
150 {
151         mainid = pthread_self();
152
153         BLI_spin_init(&_malloc_lock);
154 }
155
156 void BLI_threadapi_exit(void)
157 {
158         if (task_scheduler) {
159                 BLI_task_scheduler_free(task_scheduler);
160         }
161         BLI_spin_end(&_malloc_lock);
162 }
163
164 TaskScheduler *BLI_task_scheduler_get(void)
165 {
166         if (task_scheduler == NULL) {
167                 int tot_thread = BLI_system_thread_count();
168
169                 /* Do a lazy initialization, so it happens after
170                  * command line arguments parsing
171                  */
172                 task_scheduler = BLI_task_scheduler_create(tot_thread);
173         }
174
175         return task_scheduler;
176 }
177
178 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
179  * problem otherwise: scene render will kill of the mutex!
180  */
181
182 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
183 {
184         int a;
185
186         if (threadbase != NULL && tot > 0) {
187                 BLI_listbase_clear(threadbase);
188         
189                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
190                 else if (tot < 1) tot = 1;
191         
192                 for (a = 0; a < tot; a++) {
193                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
194                         BLI_addtail(threadbase, tslot);
195                         tslot->do_thread = do_thread;
196                         tslot->avail = 1;
197                 }
198         }
199         
200         if (thread_levels == 0) {
201                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
202
203 #ifdef USE_APPLE_OMP_FIX
204                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
205                  * we copy gomp thread local storage pointer to setting it again
206                  * inside the thread that we start */
207                 thread_tls_data = pthread_getspecific(gomp_tls_key);
208 #endif
209         }
210
211         thread_levels++;
212 }
213
214 /* amount of available threads */
215 int BLI_available_threads(ListBase *threadbase)
216 {
217         ThreadSlot *tslot;
218         int counter = 0;
219         
220         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
221                 if (tslot->avail)
222                         counter++;
223         }
224         return counter;
225 }
226
227 /* returns thread number, for sample patterns or threadsafe tables */
228 int BLI_available_thread_index(ListBase *threadbase)
229 {
230         ThreadSlot *tslot;
231         int counter = 0;
232         
233         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
234                 if (tslot->avail)
235                         return counter;
236         }
237         return 0;
238 }
239
240 static void *tslot_thread_start(void *tslot_p)
241 {
242         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
243
244 #ifdef USE_APPLE_OMP_FIX
245         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
246          * set gomp thread local storage pointer which was copied beforehand */
247         pthread_setspecific(gomp_tls_key, thread_tls_data);
248 #endif
249
250         return tslot->do_thread(tslot->callerdata);
251 }
252
253 int BLI_thread_is_main(void)
254 {
255         return pthread_equal(pthread_self(), mainid);
256 }
257
258 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
259 {
260         ThreadSlot *tslot;
261         
262         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
263                 if (tslot->avail) {
264                         tslot->avail = 0;
265                         tslot->callerdata = callerdata;
266                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
267                         return;
268                 }
269         }
270         printf("ERROR: could not insert thread slot\n");
271 }
272
273 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
274 {
275         ThreadSlot *tslot;
276         
277         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
278                 if (tslot->callerdata == callerdata) {
279                         pthread_join(tslot->pthread, NULL);
280                         tslot->callerdata = NULL;
281                         tslot->avail = 1;
282                 }
283         }
284 }
285
286 void BLI_remove_thread_index(ListBase *threadbase, int index)
287 {
288         ThreadSlot *tslot;
289         int counter = 0;
290         
291         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
292                 if (counter == index && tslot->avail == 0) {
293                         pthread_join(tslot->pthread, NULL);
294                         tslot->callerdata = NULL;
295                         tslot->avail = 1;
296                         break;
297                 }
298         }
299 }
300
301 void BLI_remove_threads(ListBase *threadbase)
302 {
303         ThreadSlot *tslot;
304         
305         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
306                 if (tslot->avail == 0) {
307                         pthread_join(tslot->pthread, NULL);
308                         tslot->callerdata = NULL;
309                         tslot->avail = 1;
310                 }
311         }
312 }
313
314 void BLI_end_threads(ListBase *threadbase)
315 {
316         ThreadSlot *tslot;
317         
318         /* only needed if there's actually some stuff to end
319          * this way we don't end up decrementing thread_levels on an empty threadbase 
320          * */
321         if (threadbase && (BLI_listbase_is_empty(threadbase) == false)) {
322                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
323                         if (tslot->avail == 0) {
324                                 pthread_join(tslot->pthread, NULL);
325                         }
326                 }
327                 BLI_freelistN(threadbase);
328         }
329
330         thread_levels--;
331         if (thread_levels == 0)
332                 MEM_set_lock_callback(NULL, NULL);
333 }
334
335 /* System Information */
336
337 /* how many threads are native on this system? */
338 int BLI_system_thread_count(void)
339 {
340         int t;
341 #ifdef WIN32
342         SYSTEM_INFO info;
343         GetSystemInfo(&info);
344         t = (int) info.dwNumberOfProcessors;
345 #else 
346 #   ifdef __APPLE__
347         int mib[2];
348         size_t len;
349         
350         mib[0] = CTL_HW;
351         mib[1] = HW_NCPU;
352         len = sizeof(t);
353         sysctl(mib, 2, &t, &len, NULL, 0);
354 #   else
355         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
356 #   endif
357 #endif
358
359         if (num_threads_override > 0)
360                 return num_threads_override;
361         
362         if (t > RE_MAX_THREAD)
363                 return RE_MAX_THREAD;
364         if (t < 1)
365                 return 1;
366         
367         return t;
368 }
369
370 void BLI_system_num_threads_override_set(int num)
371 {
372         num_threads_override = num;
373 }
374
375 int BLI_system_num_threads_override_get(void)
376 {
377         return num_threads_override;
378 }
379
380 /* Global Mutex Locks */
381
382 void BLI_lock_thread(int type)
383 {
384         if (type == LOCK_IMAGE)
385                 pthread_mutex_lock(&_image_lock);
386         else if (type == LOCK_DRAW_IMAGE)
387                 pthread_mutex_lock(&_image_draw_lock);
388         else if (type == LOCK_VIEWER)
389                 pthread_mutex_lock(&_viewer_lock);
390         else if (type == LOCK_CUSTOM1)
391                 pthread_mutex_lock(&_custom1_lock);
392         else if (type == LOCK_RCACHE)
393                 pthread_mutex_lock(&_rcache_lock);
394         else if (type == LOCK_OPENGL)
395                 pthread_mutex_lock(&_opengl_lock);
396         else if (type == LOCK_NODES)
397                 pthread_mutex_lock(&_nodes_lock);
398         else if (type == LOCK_MOVIECLIP)
399                 pthread_mutex_lock(&_movieclip_lock);
400         else if (type == LOCK_COLORMANAGE)
401                 pthread_mutex_lock(&_colormanage_lock);
402 }
403
404 void BLI_unlock_thread(int type)
405 {
406         if (type == LOCK_IMAGE)
407                 pthread_mutex_unlock(&_image_lock);
408         else if (type == LOCK_DRAW_IMAGE)
409                 pthread_mutex_unlock(&_image_draw_lock);
410         else if (type == LOCK_VIEWER)
411                 pthread_mutex_unlock(&_viewer_lock);
412         else if (type == LOCK_CUSTOM1)
413                 pthread_mutex_unlock(&_custom1_lock);
414         else if (type == LOCK_RCACHE)
415                 pthread_mutex_unlock(&_rcache_lock);
416         else if (type == LOCK_OPENGL)
417                 pthread_mutex_unlock(&_opengl_lock);
418         else if (type == LOCK_NODES)
419                 pthread_mutex_unlock(&_nodes_lock);
420         else if (type == LOCK_MOVIECLIP)
421                 pthread_mutex_unlock(&_movieclip_lock);
422         else if (type == LOCK_COLORMANAGE)
423                 pthread_mutex_unlock(&_colormanage_lock);
424 }
425
426 /* Mutex Locks */
427
428 void BLI_mutex_init(ThreadMutex *mutex)
429 {
430         pthread_mutex_init(mutex, NULL);
431 }
432
433 void BLI_mutex_lock(ThreadMutex *mutex)
434 {
435         pthread_mutex_lock(mutex);
436 }
437
438 void BLI_mutex_unlock(ThreadMutex *mutex)
439 {
440         pthread_mutex_unlock(mutex);
441 }
442
443 bool BLI_mutex_trylock(ThreadMutex *mutex)
444 {
445         return (pthread_mutex_trylock(mutex) == 0);
446 }
447
448 void BLI_mutex_end(ThreadMutex *mutex)
449 {
450         pthread_mutex_destroy(mutex);
451 }
452
453 ThreadMutex *BLI_mutex_alloc(void)
454 {
455         ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex");
456         BLI_mutex_init(mutex);
457         return mutex;
458 }
459
460 void BLI_mutex_free(ThreadMutex *mutex)
461 {
462         BLI_mutex_end(mutex);
463         MEM_freeN(mutex);
464 }
465
466 /* Spin Locks */
467
468 void BLI_spin_init(SpinLock *spin)
469 {
470 #ifdef __APPLE__
471         *spin = OS_SPINLOCK_INIT;
472 #else
473         pthread_spin_init(spin, 0);
474 #endif
475 }
476
477 void BLI_spin_lock(SpinLock *spin)
478 {
479 #ifdef __APPLE__
480         OSSpinLockLock(spin);
481 #else
482         pthread_spin_lock(spin);
483 #endif
484 }
485
486 void BLI_spin_unlock(SpinLock *spin)
487 {
488 #ifdef __APPLE__
489         OSSpinLockUnlock(spin);
490 #else
491         pthread_spin_unlock(spin);
492 #endif
493 }
494
495 #ifndef __APPLE__
496 void BLI_spin_end(SpinLock *spin)
497 {
498         pthread_spin_destroy(spin);
499 }
500 #else
501 void BLI_spin_end(SpinLock *UNUSED(spin))
502 {
503 }
504 #endif
505
506 /* Read/Write Mutex Lock */
507
508 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
509 {
510         pthread_rwlock_init(mutex, NULL);
511 }
512
513 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
514 {
515         if (mode == THREAD_LOCK_READ)
516                 pthread_rwlock_rdlock(mutex);
517         else
518                 pthread_rwlock_wrlock(mutex);
519 }
520
521 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
522 {
523         pthread_rwlock_unlock(mutex);
524 }
525
526 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
527 {
528         pthread_rwlock_destroy(mutex);
529 }
530
531 ThreadRWMutex *BLI_rw_mutex_alloc(void)
532 {
533         ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex");
534         BLI_rw_mutex_init(mutex);
535         return mutex;
536 }
537
538 void BLI_rw_mutex_free(ThreadRWMutex *mutex)
539 {
540         BLI_rw_mutex_end(mutex);
541         MEM_freeN(mutex);
542 }
543
544 /* Ticket Mutex Lock */
545
546 struct TicketMutex {
547         pthread_cond_t cond;
548         pthread_mutex_t mutex;
549         unsigned int queue_head, queue_tail;
550 };
551
552 TicketMutex *BLI_ticket_mutex_alloc(void)
553 {
554         TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex");
555
556         pthread_cond_init(&ticket->cond, NULL);
557         pthread_mutex_init(&ticket->mutex, NULL);
558
559         return ticket;
560 }
561
562 void BLI_ticket_mutex_free(TicketMutex *ticket)
563 {
564         pthread_mutex_destroy(&ticket->mutex);
565         pthread_cond_destroy(&ticket->cond);
566         MEM_freeN(ticket);
567 }
568
569 void BLI_ticket_mutex_lock(TicketMutex *ticket)
570 {
571         unsigned int queue_me;
572
573         pthread_mutex_lock(&ticket->mutex);
574         queue_me = ticket->queue_tail++;
575
576         while (queue_me != ticket->queue_head)
577                 pthread_cond_wait(&ticket->cond, &ticket->mutex);
578
579         pthread_mutex_unlock(&ticket->mutex);
580 }
581
582 void BLI_ticket_mutex_unlock(TicketMutex *ticket)
583 {
584         pthread_mutex_lock(&ticket->mutex);
585         ticket->queue_head++;
586         pthread_cond_broadcast(&ticket->cond);
587         pthread_mutex_unlock(&ticket->mutex);
588 }
589
590 /* ************************************************ */
591
592 /* Condition */
593
594 void BLI_condition_init(ThreadCondition *cond)
595 {
596         pthread_cond_init(cond, NULL);
597 }
598
599 void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
600 {
601         pthread_cond_wait(cond, mutex);
602 }
603
604 void BLI_condition_notify_one(ThreadCondition *cond)
605 {
606         pthread_cond_signal(cond);
607 }
608
609 void BLI_condition_notify_all(ThreadCondition *cond)
610 {
611         pthread_cond_broadcast(cond);
612 }
613
614 void BLI_condition_end(ThreadCondition *cond)
615 {
616         pthread_cond_destroy(cond);
617 }
618
619 /* ************************************************ */
620
621 struct ThreadQueue {
622         GSQueue *queue;
623         pthread_mutex_t mutex;
624         pthread_cond_t push_cond;
625         pthread_cond_t finish_cond;
626         volatile int nowait;
627         volatile int canceled;
628 };
629
630 ThreadQueue *BLI_thread_queue_init(void)
631 {
632         ThreadQueue *queue;
633
634         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
635         queue->queue = BLI_gsqueue_new(sizeof(void *));
636
637         pthread_mutex_init(&queue->mutex, NULL);
638         pthread_cond_init(&queue->push_cond, NULL);
639         pthread_cond_init(&queue->finish_cond, NULL);
640
641         return queue;
642 }
643
644 void BLI_thread_queue_free(ThreadQueue *queue)
645 {
646         /* destroy everything, assumes no one is using queue anymore */
647         pthread_cond_destroy(&queue->finish_cond);
648         pthread_cond_destroy(&queue->push_cond);
649         pthread_mutex_destroy(&queue->mutex);
650
651         BLI_gsqueue_free(queue->queue);
652
653         MEM_freeN(queue);
654 }
655
656 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
657 {
658         pthread_mutex_lock(&queue->mutex);
659
660         BLI_gsqueue_push(queue->queue, &work);
661
662         /* signal threads waiting to pop */
663         pthread_cond_signal(&queue->push_cond);
664         pthread_mutex_unlock(&queue->mutex);
665 }
666
667 void *BLI_thread_queue_pop(ThreadQueue *queue)
668 {
669         void *work = NULL;
670
671         /* wait until there is work */
672         pthread_mutex_lock(&queue->mutex);
673         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
674                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
675         
676         /* if we have something, pop it */
677         if (!BLI_gsqueue_is_empty(queue->queue)) {
678                 BLI_gsqueue_pop(queue->queue, &work);
679                 
680                 if (BLI_gsqueue_is_empty(queue->queue))
681                         pthread_cond_broadcast(&queue->finish_cond);
682         }
683
684         pthread_mutex_unlock(&queue->mutex);
685
686         return work;
687 }
688
689 static void wait_timeout(struct timespec *timeout, int ms)
690 {
691         ldiv_t div_result;
692         long sec, usec, x;
693
694 #ifdef WIN32
695         {
696                 struct _timeb now;
697                 _ftime(&now);
698                 sec = now.time;
699                 usec = now.millitm * 1000; /* microsecond precision would be better */
700         }
701 #else
702         {
703                 struct timeval now;
704                 gettimeofday(&now, NULL);
705                 sec = now.tv_sec;
706                 usec = now.tv_usec;
707         }
708 #endif
709
710         /* add current time + millisecond offset */
711         div_result = ldiv(ms, 1000);
712         timeout->tv_sec = sec + div_result.quot;
713
714         x = usec + (div_result.rem * 1000);
715
716         if (x >= 1000000) {
717                 timeout->tv_sec++;
718                 x -= 1000000;
719         }
720
721         timeout->tv_nsec = x * 1000;
722 }
723
724 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
725 {
726         double t;
727         void *work = NULL;
728         struct timespec timeout;
729
730         t = PIL_check_seconds_timer();
731         wait_timeout(&timeout, ms);
732
733         /* wait until there is work */
734         pthread_mutex_lock(&queue->mutex);
735         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
736                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
737                         break;
738                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
739                         break;
740         }
741
742         /* if we have something, pop it */
743         if (!BLI_gsqueue_is_empty(queue->queue)) {
744                 BLI_gsqueue_pop(queue->queue, &work);
745                 
746                 if (BLI_gsqueue_is_empty(queue->queue))
747                         pthread_cond_broadcast(&queue->finish_cond);
748         }
749         
750         pthread_mutex_unlock(&queue->mutex);
751
752         return work;
753 }
754
755 int BLI_thread_queue_size(ThreadQueue *queue)
756 {
757         int size;
758
759         pthread_mutex_lock(&queue->mutex);
760         size = BLI_gsqueue_size(queue->queue);
761         pthread_mutex_unlock(&queue->mutex);
762
763         return size;
764 }
765
766 void BLI_thread_queue_nowait(ThreadQueue *queue)
767 {
768         pthread_mutex_lock(&queue->mutex);
769
770         queue->nowait = 1;
771
772         /* signal threads waiting to pop */
773         pthread_cond_broadcast(&queue->push_cond);
774         pthread_mutex_unlock(&queue->mutex);
775 }
776
777 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
778 {
779         /* wait for finish condition */
780         pthread_mutex_lock(&queue->mutex);
781
782         while (!BLI_gsqueue_is_empty(queue->queue))
783                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
784
785         pthread_mutex_unlock(&queue->mutex);
786 }
787
788 /* ************************************************ */
789
790 void BLI_begin_threaded_malloc(void)
791 {
792         /* Used for debug only */
793         /* BLI_assert(thread_levels >= 0); */
794
795         if (thread_levels == 0) {
796                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
797         }
798         thread_levels++;
799 }
800
801 void BLI_end_threaded_malloc(void)
802 {
803         /* Used for debug only */
804         /* BLI_assert(thread_levels >= 0); */
805
806         thread_levels--;
807         if (thread_levels == 0)
808                 MEM_set_lock_callback(NULL, NULL);
809 }
810