Merging r58073 through r58111 from trunk into soc-2013-depsgraph_mt
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38 #include "BLI_listbase.h"
39 #include "BLI_gsqueue.h"
40 #include "BLI_threads.h"
41
42 #include "PIL_time.h"
43
44 /* for checking system threads - BLI_system_thread_count */
45 #ifdef WIN32
46 #  include <windows.h>
47 #  include <sys/timeb.h>
48 #elif defined(__APPLE__)
49 #  include <sys/types.h>
50 #  include <sys/sysctl.h>
51 #else
52 #  include <unistd.h>
53 #  include <sys/time.h>
54 #endif
55
56 #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
57 #  define USE_APPLE_OMP_FIX
58 #endif
59
60 #ifdef USE_APPLE_OMP_FIX
61 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
62 extern pthread_key_t gomp_tls_key;
63 static void *thread_tls_data;
64 #endif
65
66 /* ********** basic thread control API ************ 
67  * 
68  * Many thread cases have an X amount of jobs, and only an Y amount of
69  * threads are useful (typically amount of cpus)
70  *
71  * This code can be used to start a maximum amount of 'thread slots', which
72  * then can be filled in a loop with an idle timer. 
73  *
74  * A sample loop can look like this (pseudo c);
75  *
76  *     ListBase lb;
77  *     int maxthreads = 2;
78  *     int cont = 1;
79  * 
80  *     BLI_init_threads(&lb, do_something_func, maxthreads);
81  * 
82  *     while (cont) {
83  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
84  *             // get new job (data pointer)
85  *             // tag job 'processed 
86  *             BLI_insert_thread(&lb, job);
87  *         }
88  *         else PIL_sleep_ms(50);
89  *         
90  *         // find if a job is ready, this the do_something_func() should write in job somewhere
91  *         cont = 0;
92  *         for (go over all jobs)
93  *             if (job is ready) {
94  *                 if (job was not removed) {
95  *                     BLI_remove_thread(&lb, job);
96  *                 }
97  *             }
98  *             else cont = 1;
99  *         }
100  *         // conditions to exit loop 
101  *         if (if escape loop event) {
102  *             if (BLI_available_threadslots(&lb) == maxthreads)
103  *                 break;
104  *         }
105  *     }
106  * 
107  *     BLI_end_threads(&lb);
108  *
109  ************************************************ */
110 static pthread_mutex_t _malloc_lock = PTHREAD_MUTEX_INITIALIZER;
111 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
112 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
113 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
114 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
115 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
116 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
117 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
118 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
119 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
120 static pthread_t mainid;
121 static int thread_levels = 0;  /* threads can be invoked inside threads */
122 static int num_threads_override = 0;
123
124 /* just a max for security reasons */
125 #define RE_MAX_THREAD BLENDER_MAX_THREADS
126
127 typedef struct ThreadSlot {
128         struct ThreadSlot *next, *prev;
129         void *(*do_thread)(void *);
130         void *callerdata;
131         pthread_t pthread;
132         int avail;
133 } ThreadSlot;
134
135 static void BLI_lock_malloc_thread(void)
136 {
137         pthread_mutex_lock(&_malloc_lock);
138 }
139
140 static void BLI_unlock_malloc_thread(void)
141 {
142         pthread_mutex_unlock(&_malloc_lock);
143 }
144
145 void BLI_threadapi_init(void)
146 {
147         mainid = pthread_self();
148 }
149
150 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
151  * problem otherwise: scene render will kill of the mutex!
152  */
153
154 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
155 {
156         int a;
157
158         if (threadbase != NULL && tot > 0) {
159                 threadbase->first = threadbase->last = NULL;
160         
161                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
162                 else if (tot < 1) tot = 1;
163         
164                 for (a = 0; a < tot; a++) {
165                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
166                         BLI_addtail(threadbase, tslot);
167                         tslot->do_thread = do_thread;
168                         tslot->avail = 1;
169                 }
170         }
171         
172         if (thread_levels == 0) {
173                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
174
175 #ifdef USE_APPLE_OMP_FIX
176                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
177                  * we copy gomp thread local storage pointer to setting it again
178                  * inside the thread that we start */
179                 thread_tls_data = pthread_getspecific(gomp_tls_key);
180 #endif
181         }
182
183         thread_levels++;
184 }
185
186 /* amount of available threads */
187 int BLI_available_threads(ListBase *threadbase)
188 {
189         ThreadSlot *tslot;
190         int counter = 0;
191         
192         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
193                 if (tslot->avail)
194                         counter++;
195         }
196         return counter;
197 }
198
199 /* returns thread number, for sample patterns or threadsafe tables */
200 int BLI_available_thread_index(ListBase *threadbase)
201 {
202         ThreadSlot *tslot;
203         int counter = 0;
204         
205         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
206                 if (tslot->avail)
207                         return counter;
208         }
209         return 0;
210 }
211
212 static void *tslot_thread_start(void *tslot_p)
213 {
214         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
215
216 #ifdef USE_APPLE_OMP_FIX
217         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
218          * set gomp thread local storage pointer which was copied beforehand */
219         pthread_setspecific(gomp_tls_key, thread_tls_data);
220 #endif
221
222         return tslot->do_thread(tslot->callerdata);
223 }
224
225 int BLI_thread_is_main(void)
226 {
227         return pthread_equal(pthread_self(), mainid);
228 }
229
230 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
231 {
232         ThreadSlot *tslot;
233         
234         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
235                 if (tslot->avail) {
236                         tslot->avail = 0;
237                         tslot->callerdata = callerdata;
238                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
239                         return;
240                 }
241         }
242         printf("ERROR: could not insert thread slot\n");
243 }
244
245 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
246 {
247         ThreadSlot *tslot;
248         
249         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
250                 if (tslot->callerdata == callerdata) {
251                         pthread_join(tslot->pthread, NULL);
252                         tslot->callerdata = NULL;
253                         tslot->avail = 1;
254                 }
255         }
256 }
257
258 void BLI_remove_thread_index(ListBase *threadbase, int index)
259 {
260         ThreadSlot *tslot;
261         int counter = 0;
262         
263         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
264                 if (counter == index && tslot->avail == 0) {
265                         pthread_join(tslot->pthread, NULL);
266                         tslot->callerdata = NULL;
267                         tslot->avail = 1;
268                         break;
269                 }
270         }
271 }
272
273 void BLI_remove_threads(ListBase *threadbase)
274 {
275         ThreadSlot *tslot;
276         
277         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
278                 if (tslot->avail == 0) {
279                         pthread_join(tslot->pthread, NULL);
280                         tslot->callerdata = NULL;
281                         tslot->avail = 1;
282                 }
283         }
284 }
285
286 void BLI_end_threads(ListBase *threadbase)
287 {
288         ThreadSlot *tslot;
289         
290         /* only needed if there's actually some stuff to end
291          * this way we don't end up decrementing thread_levels on an empty threadbase 
292          * */
293         if (threadbase && threadbase->first != NULL) {
294                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
295                         if (tslot->avail == 0) {
296                                 pthread_join(tslot->pthread, NULL);
297                         }
298                 }
299                 BLI_freelistN(threadbase);
300         }
301
302         thread_levels--;
303         if (thread_levels == 0)
304                 MEM_set_lock_callback(NULL, NULL);
305 }
306
307 /* System Information */
308
309 /* how many threads are native on this system? */
310 int BLI_system_thread_count(void)
311 {
312         int t;
313 #ifdef WIN32
314         SYSTEM_INFO info;
315         GetSystemInfo(&info);
316         t = (int) info.dwNumberOfProcessors;
317 #else 
318 #   ifdef __APPLE__
319         int mib[2];
320         size_t len;
321         
322         mib[0] = CTL_HW;
323         mib[1] = HW_NCPU;
324         len = sizeof(t);
325         sysctl(mib, 2, &t, &len, NULL, 0);
326 #   else
327         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
328 #   endif
329 #endif
330
331         if (num_threads_override > 0)
332                 return num_threads_override;
333         
334         if (t > RE_MAX_THREAD)
335                 return RE_MAX_THREAD;
336         if (t < 1)
337                 return 1;
338         
339         return t;
340 }
341
342 void BLI_system_num_threads_override_set(int num)
343 {
344         num_threads_override = num;
345 }
346
347 int BLI_system_num_threads_override_get(void)
348 {
349         return num_threads_override;
350 }
351
352 /* Global Mutex Locks */
353
354 void BLI_lock_thread(int type)
355 {
356         if (type == LOCK_IMAGE)
357                 pthread_mutex_lock(&_image_lock);
358         else if (type == LOCK_DRAW_IMAGE)
359                 pthread_mutex_lock(&_image_draw_lock);
360         else if (type == LOCK_VIEWER)
361                 pthread_mutex_lock(&_viewer_lock);
362         else if (type == LOCK_CUSTOM1)
363                 pthread_mutex_lock(&_custom1_lock);
364         else if (type == LOCK_RCACHE)
365                 pthread_mutex_lock(&_rcache_lock);
366         else if (type == LOCK_OPENGL)
367                 pthread_mutex_lock(&_opengl_lock);
368         else if (type == LOCK_NODES)
369                 pthread_mutex_lock(&_nodes_lock);
370         else if (type == LOCK_MOVIECLIP)
371                 pthread_mutex_lock(&_movieclip_lock);
372         else if (type == LOCK_COLORMANAGE)
373                 pthread_mutex_lock(&_colormanage_lock);
374 }
375
376 void BLI_unlock_thread(int type)
377 {
378         if (type == LOCK_IMAGE)
379                 pthread_mutex_unlock(&_image_lock);
380         else if (type == LOCK_DRAW_IMAGE)
381                 pthread_mutex_unlock(&_image_draw_lock);
382         else if (type == LOCK_VIEWER)
383                 pthread_mutex_unlock(&_viewer_lock);
384         else if (type == LOCK_CUSTOM1)
385                 pthread_mutex_unlock(&_custom1_lock);
386         else if (type == LOCK_RCACHE)
387                 pthread_mutex_unlock(&_rcache_lock);
388         else if (type == LOCK_OPENGL)
389                 pthread_mutex_unlock(&_opengl_lock);
390         else if (type == LOCK_NODES)
391                 pthread_mutex_unlock(&_nodes_lock);
392         else if (type == LOCK_MOVIECLIP)
393                 pthread_mutex_unlock(&_movieclip_lock);
394         else if (type == LOCK_COLORMANAGE)
395                 pthread_mutex_unlock(&_colormanage_lock);
396 }
397
398 /* Mutex Locks */
399
400 void BLI_mutex_init(ThreadMutex *mutex)
401 {
402         pthread_mutex_init(mutex, NULL);
403 }
404
405 void BLI_mutex_lock(ThreadMutex *mutex)
406 {
407         pthread_mutex_lock(mutex);
408 }
409
410 void BLI_mutex_unlock(ThreadMutex *mutex)
411 {
412         pthread_mutex_unlock(mutex);
413 }
414
415 bool BLI_mutex_trylock(ThreadMutex *mutex)
416 {
417         return (pthread_mutex_trylock(mutex) == 0);
418 }
419
420 void BLI_mutex_end(ThreadMutex *mutex)
421 {
422         pthread_mutex_destroy(mutex);
423 }
424
425 ThreadMutex *BLI_mutex_alloc(void)
426 {
427         ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex");
428         BLI_mutex_init(mutex);
429         return mutex;
430 }
431
432 void BLI_mutex_free(ThreadMutex *mutex)
433 {
434         BLI_mutex_end(mutex);
435         MEM_freeN(mutex);
436 }
437
438 /* Spin Locks */
439
440 void BLI_spin_init(SpinLock *spin)
441 {
442 #ifdef __APPLE__
443         *spin = OS_SPINLOCK_INIT;
444 #else
445         pthread_spin_init(spin, 0);
446 #endif
447 }
448
449 void BLI_spin_lock(SpinLock *spin)
450 {
451 #ifdef __APPLE__
452         OSSpinLockLock(spin);
453 #else
454         pthread_spin_lock(spin);
455 #endif
456 }
457
458 void BLI_spin_unlock(SpinLock *spin)
459 {
460 #ifdef __APPLE__
461         OSSpinLockUnlock(spin);
462 #else
463         pthread_spin_unlock(spin);
464 #endif
465 }
466
467 #ifndef __APPLE__
468 void BLI_spin_end(SpinLock *spin)
469 {
470         pthread_spin_destroy(spin);
471 }
472 #else
473 void BLI_spin_end(SpinLock *UNUSED(spin))
474 {
475 }
476 #endif
477
478 /* Read/Write Mutex Lock */
479
480 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
481 {
482         pthread_rwlock_init(mutex, NULL);
483 }
484
485 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
486 {
487         if (mode == THREAD_LOCK_READ)
488                 pthread_rwlock_rdlock(mutex);
489         else
490                 pthread_rwlock_wrlock(mutex);
491 }
492
493 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
494 {
495         pthread_rwlock_unlock(mutex);
496 }
497
498 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
499 {
500         pthread_rwlock_destroy(mutex);
501 }
502
503 ThreadRWMutex *BLI_rw_mutex_alloc(void)
504 {
505         ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex");
506         BLI_rw_mutex_init(mutex);
507         return mutex;
508 }
509
510 void BLI_rw_mutex_free(ThreadRWMutex *mutex)
511 {
512         BLI_rw_mutex_end(mutex);
513         MEM_freeN(mutex);
514 }
515
516 /* Ticket Mutex Lock */
517
518 struct TicketMutex {
519         pthread_cond_t cond;
520         pthread_mutex_t mutex;
521         unsigned int queue_head, queue_tail;
522 };
523
524 TicketMutex *BLI_ticket_mutex_alloc(void)
525 {
526         TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex");
527
528         pthread_cond_init(&ticket->cond, NULL);
529         pthread_mutex_init(&ticket->mutex, NULL);
530
531         return ticket;
532 }
533
534 void BLI_ticket_mutex_free(TicketMutex *ticket)
535 {
536         pthread_mutex_destroy(&ticket->mutex);
537         pthread_cond_destroy(&ticket->cond);
538         MEM_freeN(ticket);
539 }
540
541 void BLI_ticket_mutex_lock(TicketMutex *ticket)
542 {
543         unsigned int queue_me;
544
545         pthread_mutex_lock(&ticket->mutex);
546         queue_me = ticket->queue_tail++;
547
548         while (queue_me != ticket->queue_head)
549                 pthread_cond_wait(&ticket->cond, &ticket->mutex);
550
551         pthread_mutex_unlock(&ticket->mutex);
552 }
553
554 void BLI_ticket_mutex_unlock(TicketMutex *ticket)
555 {
556         pthread_mutex_lock(&ticket->mutex);
557         ticket->queue_head++;
558         pthread_cond_broadcast(&ticket->cond);
559         pthread_mutex_unlock(&ticket->mutex);
560 }
561
562 /* ************************************************ */
563
564 typedef struct ThreadedWorker {
565         ListBase threadbase;
566         void *(*work_fnct)(void *);
567         char busy[RE_MAX_THREAD];
568         int total;
569         int sleep_time;
570 } ThreadedWorker;
571
572 typedef struct WorkParam {
573         ThreadedWorker *worker;
574         void *param;
575         int index;
576 } WorkParam;
577
578 static void *exec_work_fnct(void *v_param)
579 {
580         WorkParam *p = (WorkParam *)v_param;
581         void *value;
582         
583         value = p->worker->work_fnct(p->param);
584         
585         p->worker->busy[p->index] = 0;
586         MEM_freeN(p);
587         
588         return value;
589 }
590
591 ThreadedWorker *BLI_create_worker(void *(*do_thread)(void *), int tot, int sleep_time)
592 {
593         ThreadedWorker *worker;
594         
595         (void)sleep_time; /* unused */
596         
597         worker = MEM_callocN(sizeof(ThreadedWorker), "threadedworker");
598         
599         if (tot > RE_MAX_THREAD) {
600                 tot = RE_MAX_THREAD;
601         }
602         else if (tot < 1) {
603                 tot = 1;
604         }
605         
606         worker->total = tot;
607         worker->work_fnct = do_thread;
608         
609         BLI_init_threads(&worker->threadbase, exec_work_fnct, tot);
610         
611         return worker;
612 }
613
614 void BLI_end_worker(ThreadedWorker *worker)
615 {
616         BLI_remove_threads(&worker->threadbase);
617 }
618
619 void BLI_destroy_worker(ThreadedWorker *worker)
620 {
621         BLI_end_worker(worker);
622         BLI_freelistN(&worker->threadbase);
623         MEM_freeN(worker);
624 }
625
626 void BLI_insert_work(ThreadedWorker *worker, void *param)
627 {
628         WorkParam *p = MEM_callocN(sizeof(WorkParam), "workparam");
629         int index;
630         
631         if (BLI_available_threads(&worker->threadbase) == 0) {
632                 index = worker->total;
633                 while (index == worker->total) {
634                         PIL_sleep_ms(worker->sleep_time);
635                         
636                         for (index = 0; index < worker->total; index++) {
637                                 if (worker->busy[index] == 0) {
638                                         BLI_remove_thread_index(&worker->threadbase, index);
639                                         break;
640                                 }
641                         }
642                 }
643         }
644         else {
645                 index = BLI_available_thread_index(&worker->threadbase);
646         }
647         
648         worker->busy[index] = 1;
649         
650         p->param = param;
651         p->index = index;
652         p->worker = worker;
653         
654         BLI_insert_thread(&worker->threadbase, p);
655 }
656
657 /* ************************************************ */
658
659 struct ThreadQueue {
660         GSQueue *queue;
661         pthread_mutex_t mutex;
662         pthread_cond_t push_cond;
663         pthread_cond_t finish_cond;
664         volatile int nowait;
665         volatile int cancelled;
666 };
667
668 ThreadQueue *BLI_thread_queue_init(void)
669 {
670         ThreadQueue *queue;
671
672         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
673         queue->queue = BLI_gsqueue_new(sizeof(void *));
674
675         pthread_mutex_init(&queue->mutex, NULL);
676         pthread_cond_init(&queue->push_cond, NULL);
677         pthread_cond_init(&queue->finish_cond, NULL);
678
679         return queue;
680 }
681
682 void BLI_thread_queue_free(ThreadQueue *queue)
683 {
684         /* destroy everything, assumes no one is using queue anymore */
685         pthread_cond_destroy(&queue->finish_cond);
686         pthread_cond_destroy(&queue->push_cond);
687         pthread_mutex_destroy(&queue->mutex);
688
689         BLI_gsqueue_free(queue->queue);
690
691         MEM_freeN(queue);
692 }
693
694 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
695 {
696         pthread_mutex_lock(&queue->mutex);
697
698         BLI_gsqueue_push(queue->queue, &work);
699
700         /* signal threads waiting to pop */
701         pthread_cond_signal(&queue->push_cond);
702         pthread_mutex_unlock(&queue->mutex);
703 }
704
705 void *BLI_thread_queue_pop(ThreadQueue *queue)
706 {
707         void *work = NULL;
708
709         /* wait until there is work */
710         pthread_mutex_lock(&queue->mutex);
711         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
712                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
713         
714         /* if we have something, pop it */
715         if (!BLI_gsqueue_is_empty(queue->queue)) {
716                 BLI_gsqueue_pop(queue->queue, &work);
717                 
718                 if (BLI_gsqueue_is_empty(queue->queue))
719                         pthread_cond_broadcast(&queue->finish_cond);
720         }
721
722         pthread_mutex_unlock(&queue->mutex);
723
724         return work;
725 }
726
727 static void wait_timeout(struct timespec *timeout, int ms)
728 {
729         ldiv_t div_result;
730         long sec, usec, x;
731
732 #ifdef WIN32
733         {
734                 struct _timeb now;
735                 _ftime(&now);
736                 sec = now.time;
737                 usec = now.millitm * 1000; /* microsecond precision would be better */
738         }
739 #else
740         {
741                 struct timeval now;
742                 gettimeofday(&now, NULL);
743                 sec = now.tv_sec;
744                 usec = now.tv_usec;
745         }
746 #endif
747
748         /* add current time + millisecond offset */
749         div_result = ldiv(ms, 1000);
750         timeout->tv_sec = sec + div_result.quot;
751
752         x = usec + (div_result.rem * 1000);
753
754         if (x >= 1000000) {
755                 timeout->tv_sec++;
756                 x -= 1000000;
757         }
758
759         timeout->tv_nsec = x * 1000;
760 }
761
762 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
763 {
764         double t;
765         void *work = NULL;
766         struct timespec timeout;
767
768         t = PIL_check_seconds_timer();
769         wait_timeout(&timeout, ms);
770
771         /* wait until there is work */
772         pthread_mutex_lock(&queue->mutex);
773         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
774                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
775                         break;
776                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
777                         break;
778         }
779
780         /* if we have something, pop it */
781         if (!BLI_gsqueue_is_empty(queue->queue)) {
782                 BLI_gsqueue_pop(queue->queue, &work);
783                 
784                 if (BLI_gsqueue_is_empty(queue->queue))
785                         pthread_cond_broadcast(&queue->finish_cond);
786         }
787         
788         pthread_mutex_unlock(&queue->mutex);
789
790         return work;
791 }
792
793 int BLI_thread_queue_size(ThreadQueue *queue)
794 {
795         int size;
796
797         pthread_mutex_lock(&queue->mutex);
798         size = BLI_gsqueue_size(queue->queue);
799         pthread_mutex_unlock(&queue->mutex);
800
801         return size;
802 }
803
804 void BLI_thread_queue_nowait(ThreadQueue *queue)
805 {
806         pthread_mutex_lock(&queue->mutex);
807
808         queue->nowait = 1;
809
810         /* signal threads waiting to pop */
811         pthread_cond_broadcast(&queue->push_cond);
812         pthread_mutex_unlock(&queue->mutex);
813 }
814
815 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
816 {
817         /* wait for finish condition */
818         pthread_mutex_lock(&queue->mutex);
819
820         while (!BLI_gsqueue_is_empty(queue->queue))
821                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
822
823         pthread_mutex_unlock(&queue->mutex);
824 }
825
826 /* Condition */
827 void BLI_condition_init(ThreadCondition *cond)
828 {
829         pthread_cond_init(cond, NULL);
830 }
831
832 void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex)
833 {
834         pthread_cond_wait(cond, mutex);
835 }
836
837 void BLI_condition_notify_one(ThreadCondition *cond)
838 {
839         pthread_cond_signal(cond);
840 }
841
842 void BLI_condition_notify_all(ThreadCondition *cond)
843 {
844         pthread_cond_broadcast(cond);
845 }
846
847 void BLI_condition_end(ThreadCondition *cond)
848 {
849         pthread_cond_destroy(cond);
850 }
851
852 /* ************************************************ */
853
854 void BLI_begin_threaded_malloc(void)
855 {
856         /* Used for debug only */
857         /* BLI_assert(thread_levels >= 0); */
858
859         if (thread_levels == 0) {
860                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
861         }
862         thread_levels++;
863 }
864
865 void BLI_end_threaded_malloc(void)
866 {
867         /* Used for debug only */
868         /* BLI_assert(thread_levels >= 0); */
869
870         thread_levels--;
871         if (thread_levels == 0)
872                 MEM_set_lock_callback(NULL, NULL);
873 }
874