Sculpt/dyntopo: Make the omp threads configurable to overcome performance issues
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38 #include "BLI_listbase.h"
39 #include "BLI_gsqueue.h"
40 #include "BLI_task.h"
41 #include "BLI_threads.h"
42
43 #include "PIL_time.h"
44
45 /* for checking system threads - BLI_system_thread_count */
46 #ifdef WIN32
47 #  include <windows.h>
48 #  include <sys/timeb.h>
49 #elif defined(__APPLE__)
50 #  include <sys/types.h>
51 #  include <sys/sysctl.h>
52 #else
53 #  include <unistd.h>
54 #  include <sys/time.h>
55 #endif
56
57 #ifdef _OPENMP
58 #include <omp.h>
59 #endif
60
61 #if defined(__APPLE__)
62 #if defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && !defined(__clang__)
63 #  define USE_APPLE_OMP_FIX
64 #endif
65
66 /* how many cores not counting HT aka pysical cores */
67 static int system_physical_thread_count(void)
68 {
69         int ptcount;
70         size_t ptcount_len = sizeof(ptcount);
71         sysctlbyname("hw.physicalcpu", &ptcount, &ptcount_len, NULL, 0);
72         return ptcount;
73 }
74 #endif // __APPLE__
75
76 #ifdef USE_APPLE_OMP_FIX
77 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
78 extern pthread_key_t gomp_tls_key;
79 static void *thread_tls_data;
80 #endif
81
82 /* We're using one global task scheduler for all kind of tasks. */
83 static TaskScheduler *task_scheduler = NULL;
84
85 /* ********** basic thread control API ************ 
86  * 
87  * Many thread cases have an X amount of jobs, and only an Y amount of
88  * threads are useful (typically amount of cpus)
89  *
90  * This code can be used to start a maximum amount of 'thread slots', which
91  * then can be filled in a loop with an idle timer. 
92  *
93  * A sample loop can look like this (pseudo c);
94  *
95  *     ListBase lb;
96  *     int maxthreads = 2;
97  *     int cont = 1;
98  * 
99  *     BLI_init_threads(&lb, do_something_func, maxthreads);
100  * 
101  *     while (cont) {
102  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
103  *             // get new job (data pointer)
104  *             // tag job 'processed 
105  *             BLI_insert_thread(&lb, job);
106  *         }
107  *         else PIL_sleep_ms(50);
108  *         
109  *         // find if a job is ready, this the do_something_func() should write in job somewhere
110  *         cont = 0;
111  *         for (go over all jobs)
112  *             if (job is ready) {
113  *                 if (job was not removed) {
114  *                     BLI_remove_thread(&lb, job);
115  *                 }
116  *             }
117  *             else cont = 1;
118  *         }
119  *         // conditions to exit loop 
120  *         if (if escape loop event) {
121  *             if (BLI_available_threadslots(&lb) == maxthreads)
122  *                 break;
123  *         }
124  *     }
125  * 
126  *     BLI_end_threads(&lb);
127  *
128  ************************************************ */
129 static SpinLock _malloc_lock;
130 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
131 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
132 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
133 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
134 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
135 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
136 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
137 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
138 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
139 static pthread_mutex_t _fftw_lock = PTHREAD_MUTEX_INITIALIZER;
140 static pthread_t mainid;
141 static int thread_levels = 0;  /* threads can be invoked inside threads */
142 static int num_threads_override = 0;
143
144 /* just a max for security reasons */
145 #define RE_MAX_THREAD BLENDER_MAX_THREADS
146
147 typedef struct ThreadSlot {
148         struct ThreadSlot *next, *prev;
149         void *(*do_thread)(void *);
150         void *callerdata;
151         pthread_t pthread;
152         int avail;
153 } ThreadSlot;
154
155 static void BLI_lock_malloc_thread(void)
156 {
157         BLI_spin_lock(&_malloc_lock);
158 }
159
160 static void BLI_unlock_malloc_thread(void)
161 {
162         BLI_spin_unlock(&_malloc_lock);
163 }
164
165 void BLI_threadapi_init(void)
166 {
167         mainid = pthread_self();
168
169         BLI_spin_init(&_malloc_lock);
170 }
171
172 void BLI_threadapi_exit(void)
173 {
174         if (task_scheduler) {
175                 BLI_task_scheduler_free(task_scheduler);
176         }
177         BLI_spin_end(&_malloc_lock);
178 }
179
180 TaskScheduler *BLI_task_scheduler_get(void)
181 {
182         if (task_scheduler == NULL) {
183                 int tot_thread = BLI_system_thread_count();
184
185                 /* Do a lazy initialization, so it happens after
186                  * command line arguments parsing
187                  */
188                 task_scheduler = BLI_task_scheduler_create(tot_thread);
189         }
190
191         return task_scheduler;
192 }
193
194 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
195  * problem otherwise: scene render will kill of the mutex!
196  */
197
198 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
199 {
200         int a;
201
202         if (threadbase != NULL && tot > 0) {
203                 BLI_listbase_clear(threadbase);
204         
205                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
206                 else if (tot < 1) tot = 1;
207         
208                 for (a = 0; a < tot; a++) {
209                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
210                         BLI_addtail(threadbase, tslot);
211                         tslot->do_thread = do_thread;
212                         tslot->avail = 1;
213                 }
214         }
215         
216         if (thread_levels == 0) {
217                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
218
219 #ifdef USE_APPLE_OMP_FIX
220                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
221                  * we copy gomp thread local storage pointer to setting it again
222                  * inside the thread that we start */
223                 thread_tls_data = pthread_getspecific(gomp_tls_key);
224 #endif
225         }
226
227         thread_levels++;
228 }
229
230 /* amount of available threads */
231 int BLI_available_threads(ListBase *threadbase)
232 {
233         ThreadSlot *tslot;
234         int counter = 0;
235         
236         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
237                 if (tslot->avail)
238                         counter++;
239         }
240         return counter;
241 }
242
243 /* returns thread number, for sample patterns or threadsafe tables */
244 int BLI_available_thread_index(ListBase *threadbase)
245 {
246         ThreadSlot *tslot;
247         int counter = 0;
248         
249         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
250                 if (tslot->avail)
251                         return counter;
252         }
253         return 0;
254 }
255
256 static void *tslot_thread_start(void *tslot_p)
257 {
258         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
259
260 #ifdef USE_APPLE_OMP_FIX
261         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
262          * set gomp thread local storage pointer which was copied beforehand */
263         pthread_setspecific(gomp_tls_key, thread_tls_data);
264 #endif
265
266         return tslot->do_thread(tslot->callerdata);
267 }
268
269 int BLI_thread_is_main(void)
270 {
271         return pthread_equal(pthread_self(), mainid);
272 }
273
274 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
275 {
276         ThreadSlot *tslot;
277         
278         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
279                 if (tslot->avail) {
280                         tslot->avail = 0;
281                         tslot->callerdata = callerdata;
282                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
283                         return;
284                 }
285         }
286         printf("ERROR: could not insert thread slot\n");
287 }
288
289 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
290 {
291         ThreadSlot *tslot;
292         
293         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
294                 if (tslot->callerdata == callerdata) {
295                         pthread_join(tslot->pthread, NULL);
296                         tslot->callerdata = NULL;
297                         tslot->avail = 1;
298                 }
299         }
300 }
301
302 void BLI_remove_thread_index(ListBase *threadbase, int index)
303 {
304         ThreadSlot *tslot;
305         int counter = 0;
306         
307         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
308                 if (counter == index && tslot->avail == 0) {
309                         pthread_join(tslot->pthread, NULL);
310                         tslot->callerdata = NULL;
311                         tslot->avail = 1;
312                         break;
313                 }
314         }
315 }
316
317 void BLI_remove_threads(ListBase *threadbase)
318 {
319         ThreadSlot *tslot;
320         
321         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
322                 if (tslot->avail == 0) {
323                         pthread_join(tslot->pthread, NULL);
324                         tslot->callerdata = NULL;
325                         tslot->avail = 1;
326                 }
327         }
328 }
329
330 void BLI_end_threads(ListBase *threadbase)
331 {
332         ThreadSlot *tslot;
333         
334         /* only needed if there's actually some stuff to end
335          * this way we don't end up decrementing thread_levels on an empty threadbase 
336          * */
337         if (threadbase && (BLI_listbase_is_empty(threadbase) == false)) {
338                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
339                         if (tslot->avail == 0) {
340                                 pthread_join(tslot->pthread, NULL);
341                         }
342                 }
343                 BLI_freelistN(threadbase);
344         }
345
346         thread_levels--;
347         if (thread_levels == 0)
348                 MEM_set_lock_callback(NULL, NULL);
349 }
350
351 /* System Information */
352
353 /* gets the number of openmp threads the system can make use of */
354 int BLI_omp_thread_count(void)
355 {
356         int t;
357 #ifdef _OPENMP
358 #ifdef __APPLE__
359         t = system_physical_thread_count();
360 #else
361         t = omp_get_num_procs();
362 #endif
363 #else
364         t = 1;
365 #endif
366         return t;
367 }
368
369 /* how many threads are native on this system? */
370 int BLI_system_thread_count(void)
371 {
372         int t;
373 #ifdef WIN32
374         SYSTEM_INFO info;
375         GetSystemInfo(&info);
376         t = (int) info.dwNumberOfProcessors;
377 #else 
378 #   ifdef __APPLE__
379         int mib[2];
380         size_t len;
381         
382         mib[0] = CTL_HW;
383         mib[1] = HW_NCPU;
384         len = sizeof(t);
385         sysctl(mib, 2, &t, &len, NULL, 0);
386 #   else
387         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
388 #   endif
389 #endif
390
391         if (num_threads_override > 0)
392                 return num_threads_override;
393         
394         if (t > RE_MAX_THREAD)
395                 return RE_MAX_THREAD;
396         if (t < 1)
397                 return 1;
398         
399         return t;
400 }
401
402 void BLI_system_num_threads_override_set(int num)
403 {
404         num_threads_override = num;
405 }
406
407 int BLI_system_num_threads_override_get(void)
408 {
409         return num_threads_override;
410 }
411
412 /* Global Mutex Locks */
413
414 void BLI_lock_thread(int type)
415 {
416         if (type == LOCK_IMAGE)
417                 pthread_mutex_lock(&_image_lock);
418         else if (type == LOCK_DRAW_IMAGE)
419                 pthread_mutex_lock(&_image_draw_lock);
420         else if (type == LOCK_VIEWER)
421                 pthread_mutex_lock(&_viewer_lock);
422         else if (type == LOCK_CUSTOM1)
423                 pthread_mutex_lock(&_custom1_lock);
424         else if (type == LOCK_RCACHE)
425                 pthread_mutex_lock(&_rcache_lock);
426         else if (type == LOCK_OPENGL)
427                 pthread_mutex_lock(&_opengl_lock);
428         else if (type == LOCK_NODES)
429                 pthread_mutex_lock(&_nodes_lock);
430         else if (type == LOCK_MOVIECLIP)
431                 pthread_mutex_lock(&_movieclip_lock);
432         else if (type == LOCK_COLORMANAGE)
433                 pthread_mutex_lock(&_colormanage_lock);
434         else if (type == LOCK_FFTW)
435                 pthread_mutex_lock(&_fftw_lock);
436 }
437
438 void BLI_unlock_thread(int type)
439 {
440         if (type == LOCK_IMAGE)
441                 pthread_mutex_unlock(&_image_lock);
442         else if (type == LOCK_DRAW_IMAGE)
443                 pthread_mutex_unlock(&_image_draw_lock);
444         else if (type == LOCK_VIEWER)
445                 pthread_mutex_unlock(&_viewer_lock);
446         else if (type == LOCK_CUSTOM1)
447                 pthread_mutex_unlock(&_custom1_lock);
448         else if (type == LOCK_RCACHE)
449                 pthread_mutex_unlock(&_rcache_lock);
450         else if (type == LOCK_OPENGL)
451                 pthread_mutex_unlock(&_opengl_lock);
452         else if (type == LOCK_NODES)
453                 pthread_mutex_unlock(&_nodes_lock);
454         else if (type == LOCK_MOVIECLIP)
455                 pthread_mutex_unlock(&_movieclip_lock);
456         else if (type == LOCK_COLORMANAGE)
457                 pthread_mutex_unlock(&_colormanage_lock);
458         else if (type == LOCK_FFTW)
459                 pthread_mutex_unlock(&_fftw_lock);
460 }
461
462 /* Mutex Locks */
463
464 void BLI_mutex_init(ThreadMutex *mutex)
465 {
466         pthread_mutex_init(mutex, NULL);
467 }
468
469 void BLI_mutex_lock(ThreadMutex *mutex)
470 {
471         pthread_mutex_lock(mutex);
472 }
473
474 void BLI_mutex_unlock(ThreadMutex *mutex)
475 {
476         pthread_mutex_unlock(mutex);
477 }
478
479 bool BLI_mutex_trylock(ThreadMutex *mutex)
480 {
481         return (pthread_mutex_trylock(mutex) == 0);
482 }
483
484 void BLI_mutex_end(ThreadMutex *mutex)
485 {
486         pthread_mutex_destroy(mutex);
487 }
488
489 ThreadMutex *BLI_mutex_alloc(void)
490 {
491         ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex");
492         BLI_mutex_init(mutex);
493         return mutex;
494 }
495
496 void BLI_mutex_free(ThreadMutex *mutex)
497 {
498         BLI_mutex_end(mutex);
499         MEM_freeN(mutex);
500 }
501
502 /* Spin Locks */
503
504 void BLI_spin_init(SpinLock *spin)
505 {
506 #ifdef __APPLE__
507         *spin = OS_SPINLOCK_INIT;
508 #else
509         pthread_spin_init(spin, 0);
510 #endif
511 }
512
513 void BLI_spin_lock(SpinLock *spin)
514 {
515 #ifdef __APPLE__
516         OSSpinLockLock(spin);
517 #else
518         pthread_spin_lock(spin);
519 #endif
520 }
521
522 void BLI_spin_unlock(SpinLock *spin)
523 {
524 #ifdef __APPLE__
525         OSSpinLockUnlock(spin);
526 #else
527         pthread_spin_unlock(spin);
528 #endif
529 }
530
531 #ifndef __APPLE__
532 void BLI_spin_end(SpinLock *spin)
533 {
534         pthread_spin_destroy(spin);
535 }
536 #else
537 void BLI_spin_end(SpinLock *UNUSED(spin))
538 {
539 }
540 #endif
541
542 /* Read/Write Mutex Lock */
543
544 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
545 {
546         pthread_rwlock_init(mutex, NULL);
547 }
548
549 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
550 {
551         if (mode == THREAD_LOCK_READ)
552                 pthread_rwlock_rdlock(mutex);
553         else
554                 pthread_rwlock_wrlock(mutex);
555 }
556
557 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
558 {
559         pthread_rwlock_unlock(mutex);
560 }
561
562 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
563 {
564         pthread_rwlock_destroy(mutex);
565 }
566
567 ThreadRWMutex *BLI_rw_mutex_alloc(void)
568 {
569         ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex");
570         BLI_rw_mutex_init(mutex);
571         return mutex;
572 }
573
574 void BLI_rw_mutex_free(ThreadRWMutex *mutex)
575 {
576         BLI_rw_mutex_end(mutex);
577         MEM_freeN(mutex);
578 }
579
580 /* Ticket Mutex Lock */
581
582 struct TicketMutex {
583         pthread_cond_t cond;
584         pthread_mutex_t mutex;
585         unsigned int queue_head, queue_tail;
586 };
587
588 TicketMutex *BLI_ticket_mutex_alloc(void)
589 {
590         TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex");
591
592         pthread_cond_init(&ticket->cond, NULL);
593         pthread_mutex_init(&ticket->mutex, NULL);
594
595         return ticket;
596 }
597
598 void BLI_ticket_mutex_free(TicketMutex *ticket)
599 {
600         pthread_mutex_destroy(&ticket->mutex);
601         pthread_cond_destroy(&ticket->cond);
602         MEM_freeN(ticket);
603 }
604
605 void BLI_ticket_mutex_lock(TicketMutex *ticket)
606 {
607         unsigned int queue_me;
608
609         pthread_mutex_lock(&ticket->mutex);
610         queue_me = ticket->queue_tail++;
611
612         while (queue_me != ticket->queue_head)
613                 pthread_cond_wait(&ticket->cond, &ticket->mutex);
614
615         pthread_mutex_unlock(&ticket->mutex);
616 }
617
618 void BLI_ticket_mutex_unlock(TicketMutex *ticket)
619 {
620         pthread_mutex_lock(&ticket->mutex);
621         ticket->queue_head++;
622         pthread_cond_broadcast(&ticket->cond);
623         pthread_mutex_unlock(&ticket->mutex);
624 }
625
626 /* ************************************************ */
627
628 /* Condition */
629
630 void BLI_condition_init(ThreadCondition *cond)
631 {
632         pthread_cond_init(cond, NULL);
633 }
634
635 void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
636 {
637         pthread_cond_wait(cond, mutex);
638 }
639
640 void BLI_condition_notify_one(ThreadCondition *cond)
641 {
642         pthread_cond_signal(cond);
643 }
644
645 void BLI_condition_notify_all(ThreadCondition *cond)
646 {
647         pthread_cond_broadcast(cond);
648 }
649
650 void BLI_condition_end(ThreadCondition *cond)
651 {
652         pthread_cond_destroy(cond);
653 }
654
655 /* ************************************************ */
656
657 struct ThreadQueue {
658         GSQueue *queue;
659         pthread_mutex_t mutex;
660         pthread_cond_t push_cond;
661         pthread_cond_t finish_cond;
662         volatile int nowait;
663         volatile int canceled;
664 };
665
666 ThreadQueue *BLI_thread_queue_init(void)
667 {
668         ThreadQueue *queue;
669
670         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
671         queue->queue = BLI_gsqueue_new(sizeof(void *));
672
673         pthread_mutex_init(&queue->mutex, NULL);
674         pthread_cond_init(&queue->push_cond, NULL);
675         pthread_cond_init(&queue->finish_cond, NULL);
676
677         return queue;
678 }
679
680 void BLI_thread_queue_free(ThreadQueue *queue)
681 {
682         /* destroy everything, assumes no one is using queue anymore */
683         pthread_cond_destroy(&queue->finish_cond);
684         pthread_cond_destroy(&queue->push_cond);
685         pthread_mutex_destroy(&queue->mutex);
686
687         BLI_gsqueue_free(queue->queue);
688
689         MEM_freeN(queue);
690 }
691
692 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
693 {
694         pthread_mutex_lock(&queue->mutex);
695
696         BLI_gsqueue_push(queue->queue, &work);
697
698         /* signal threads waiting to pop */
699         pthread_cond_signal(&queue->push_cond);
700         pthread_mutex_unlock(&queue->mutex);
701 }
702
703 void *BLI_thread_queue_pop(ThreadQueue *queue)
704 {
705         void *work = NULL;
706
707         /* wait until there is work */
708         pthread_mutex_lock(&queue->mutex);
709         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
710                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
711         
712         /* if we have something, pop it */
713         if (!BLI_gsqueue_is_empty(queue->queue)) {
714                 BLI_gsqueue_pop(queue->queue, &work);
715                 
716                 if (BLI_gsqueue_is_empty(queue->queue))
717                         pthread_cond_broadcast(&queue->finish_cond);
718         }
719
720         pthread_mutex_unlock(&queue->mutex);
721
722         return work;
723 }
724
725 static void wait_timeout(struct timespec *timeout, int ms)
726 {
727         ldiv_t div_result;
728         long sec, usec, x;
729
730 #ifdef WIN32
731         {
732                 struct _timeb now;
733                 _ftime(&now);
734                 sec = now.time;
735                 usec = now.millitm * 1000; /* microsecond precision would be better */
736         }
737 #else
738         {
739                 struct timeval now;
740                 gettimeofday(&now, NULL);
741                 sec = now.tv_sec;
742                 usec = now.tv_usec;
743         }
744 #endif
745
746         /* add current time + millisecond offset */
747         div_result = ldiv(ms, 1000);
748         timeout->tv_sec = sec + div_result.quot;
749
750         x = usec + (div_result.rem * 1000);
751
752         if (x >= 1000000) {
753                 timeout->tv_sec++;
754                 x -= 1000000;
755         }
756
757         timeout->tv_nsec = x * 1000;
758 }
759
760 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
761 {
762         double t;
763         void *work = NULL;
764         struct timespec timeout;
765
766         t = PIL_check_seconds_timer();
767         wait_timeout(&timeout, ms);
768
769         /* wait until there is work */
770         pthread_mutex_lock(&queue->mutex);
771         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
772                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
773                         break;
774                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
775                         break;
776         }
777
778         /* if we have something, pop it */
779         if (!BLI_gsqueue_is_empty(queue->queue)) {
780                 BLI_gsqueue_pop(queue->queue, &work);
781                 
782                 if (BLI_gsqueue_is_empty(queue->queue))
783                         pthread_cond_broadcast(&queue->finish_cond);
784         }
785         
786         pthread_mutex_unlock(&queue->mutex);
787
788         return work;
789 }
790
791 int BLI_thread_queue_size(ThreadQueue *queue)
792 {
793         int size;
794
795         pthread_mutex_lock(&queue->mutex);
796         size = BLI_gsqueue_size(queue->queue);
797         pthread_mutex_unlock(&queue->mutex);
798
799         return size;
800 }
801
802 void BLI_thread_queue_nowait(ThreadQueue *queue)
803 {
804         pthread_mutex_lock(&queue->mutex);
805
806         queue->nowait = 1;
807
808         /* signal threads waiting to pop */
809         pthread_cond_broadcast(&queue->push_cond);
810         pthread_mutex_unlock(&queue->mutex);
811 }
812
813 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
814 {
815         /* wait for finish condition */
816         pthread_mutex_lock(&queue->mutex);
817
818         while (!BLI_gsqueue_is_empty(queue->queue))
819                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
820
821         pthread_mutex_unlock(&queue->mutex);
822 }
823
824 /* ************************************************ */
825
826 void BLI_begin_threaded_malloc(void)
827 {
828         /* Used for debug only */
829         /* BLI_assert(thread_levels >= 0); */
830
831         if (thread_levels == 0) {
832                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
833         }
834         thread_levels++;
835 }
836
837 void BLI_end_threaded_malloc(void)
838 {
839         /* Used for debug only */
840         /* BLI_assert(thread_levels >= 0); */
841
842         thread_levels--;
843         if (thread_levels == 0)
844                 MEM_set_lock_callback(NULL, NULL);
845 }
846