Speedup for guarded allocator
[blender.git] / source / blender / blenlib / intern / threads.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version. 
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * The Original Code is Copyright (C) 2006 Blender Foundation
19  * All rights reserved.
20  *
21  * The Original Code is: all of this file.
22  *
23  * Contributor(s): none yet.
24  *
25  * ***** END GPL LICENSE BLOCK *****
26  */
27
28 /** \file blender/blenlib/intern/threads.c
29  *  \ingroup bli
30  */
31
32 #include <stdlib.h>
33 #include <errno.h>
34 #include <string.h>
35
36 #include "MEM_guardedalloc.h"
37
38 #include "BLI_listbase.h"
39 #include "BLI_gsqueue.h"
40 #include "BLI_threads.h"
41
42 #include "PIL_time.h"
43
44 /* for checking system threads - BLI_system_thread_count */
45 #ifdef WIN32
46 #  include <windows.h>
47 #  include <sys/timeb.h>
48 #elif defined(__APPLE__)
49 #  include <sys/types.h>
50 #  include <sys/sysctl.h>
51 #else
52 #  include <unistd.h>
53 #  include <sys/time.h>
54 #endif
55
56 #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
57 #  define USE_APPLE_OMP_FIX
58 #endif
59
60 #ifdef USE_APPLE_OMP_FIX
61 /* ************** libgomp (Apple gcc 4.2.1) TLS bug workaround *************** */
62 extern pthread_key_t gomp_tls_key;
63 static void *thread_tls_data;
64 #endif
65
66 /* ********** basic thread control API ************ 
67  * 
68  * Many thread cases have an X amount of jobs, and only an Y amount of
69  * threads are useful (typically amount of cpus)
70  *
71  * This code can be used to start a maximum amount of 'thread slots', which
72  * then can be filled in a loop with an idle timer. 
73  *
74  * A sample loop can look like this (pseudo c);
75  *
76  *     ListBase lb;
77  *     int maxthreads = 2;
78  *     int cont = 1;
79  * 
80  *     BLI_init_threads(&lb, do_something_func, maxthreads);
81  * 
82  *     while (cont) {
83  *         if (BLI_available_threads(&lb) && !(escape loop event)) {
84  *             // get new job (data pointer)
85  *             // tag job 'processed 
86  *             BLI_insert_thread(&lb, job);
87  *         }
88  *         else PIL_sleep_ms(50);
89  *         
90  *         // find if a job is ready, this the do_something_func() should write in job somewhere
91  *         cont = 0;
92  *         for (go over all jobs)
93  *             if (job is ready) {
94  *                 if (job was not removed) {
95  *                     BLI_remove_thread(&lb, job);
96  *                 }
97  *             }
98  *             else cont = 1;
99  *         }
100  *         // conditions to exit loop 
101  *         if (if escape loop event) {
102  *             if (BLI_available_threadslots(&lb) == maxthreads)
103  *                 break;
104  *         }
105  *     }
106  * 
107  *     BLI_end_threads(&lb);
108  *
109  ************************************************ */
110 static SpinLock _malloc_lock;
111 static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
112 static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
113 static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
114 static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
115 static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
116 static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
117 static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
118 static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
119 static pthread_mutex_t _colormanage_lock = PTHREAD_MUTEX_INITIALIZER;
120 static pthread_t mainid;
121 static int thread_levels = 0;  /* threads can be invoked inside threads */
122 static int num_threads_override = 0;
123
124 /* just a max for security reasons */
125 #define RE_MAX_THREAD BLENDER_MAX_THREADS
126
127 typedef struct ThreadSlot {
128         struct ThreadSlot *next, *prev;
129         void *(*do_thread)(void *);
130         void *callerdata;
131         pthread_t pthread;
132         int avail;
133 } ThreadSlot;
134
135 static void BLI_lock_malloc_thread(void)
136 {
137         BLI_spin_lock(&_malloc_lock);
138 }
139
140 static void BLI_unlock_malloc_thread(void)
141 {
142         BLI_spin_unlock(&_malloc_lock);
143 }
144
145 void BLI_threadapi_init(void)
146 {
147         mainid = pthread_self();
148
149         BLI_spin_init(&_malloc_lock);
150 }
151
152 void BLI_threadapi_exit(void)
153 {
154         BLI_spin_end(&_malloc_lock);
155 }
156
157 /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
158  * problem otherwise: scene render will kill of the mutex!
159  */
160
161 void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
162 {
163         int a;
164
165         if (threadbase != NULL && tot > 0) {
166                 threadbase->first = threadbase->last = NULL;
167         
168                 if (tot > RE_MAX_THREAD) tot = RE_MAX_THREAD;
169                 else if (tot < 1) tot = 1;
170         
171                 for (a = 0; a < tot; a++) {
172                         ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot");
173                         BLI_addtail(threadbase, tslot);
174                         tslot->do_thread = do_thread;
175                         tslot->avail = 1;
176                 }
177         }
178         
179         if (thread_levels == 0) {
180                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
181
182 #ifdef USE_APPLE_OMP_FIX
183                 /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
184                  * we copy gomp thread local storage pointer to setting it again
185                  * inside the thread that we start */
186                 thread_tls_data = pthread_getspecific(gomp_tls_key);
187 #endif
188         }
189
190         thread_levels++;
191 }
192
193 /* amount of available threads */
194 int BLI_available_threads(ListBase *threadbase)
195 {
196         ThreadSlot *tslot;
197         int counter = 0;
198         
199         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
200                 if (tslot->avail)
201                         counter++;
202         }
203         return counter;
204 }
205
206 /* returns thread number, for sample patterns or threadsafe tables */
207 int BLI_available_thread_index(ListBase *threadbase)
208 {
209         ThreadSlot *tslot;
210         int counter = 0;
211         
212         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
213                 if (tslot->avail)
214                         return counter;
215         }
216         return 0;
217 }
218
219 static void *tslot_thread_start(void *tslot_p)
220 {
221         ThreadSlot *tslot = (ThreadSlot *)tslot_p;
222
223 #ifdef USE_APPLE_OMP_FIX
224         /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
225          * set gomp thread local storage pointer which was copied beforehand */
226         pthread_setspecific(gomp_tls_key, thread_tls_data);
227 #endif
228
229         return tslot->do_thread(tslot->callerdata);
230 }
231
232 int BLI_thread_is_main(void)
233 {
234         return pthread_equal(pthread_self(), mainid);
235 }
236
237 void BLI_insert_thread(ListBase *threadbase, void *callerdata)
238 {
239         ThreadSlot *tslot;
240         
241         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
242                 if (tslot->avail) {
243                         tslot->avail = 0;
244                         tslot->callerdata = callerdata;
245                         pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
246                         return;
247                 }
248         }
249         printf("ERROR: could not insert thread slot\n");
250 }
251
252 void BLI_remove_thread(ListBase *threadbase, void *callerdata)
253 {
254         ThreadSlot *tslot;
255         
256         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
257                 if (tslot->callerdata == callerdata) {
258                         pthread_join(tslot->pthread, NULL);
259                         tslot->callerdata = NULL;
260                         tslot->avail = 1;
261                 }
262         }
263 }
264
265 void BLI_remove_thread_index(ListBase *threadbase, int index)
266 {
267         ThreadSlot *tslot;
268         int counter = 0;
269         
270         for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
271                 if (counter == index && tslot->avail == 0) {
272                         pthread_join(tslot->pthread, NULL);
273                         tslot->callerdata = NULL;
274                         tslot->avail = 1;
275                         break;
276                 }
277         }
278 }
279
280 void BLI_remove_threads(ListBase *threadbase)
281 {
282         ThreadSlot *tslot;
283         
284         for (tslot = threadbase->first; tslot; tslot = tslot->next) {
285                 if (tslot->avail == 0) {
286                         pthread_join(tslot->pthread, NULL);
287                         tslot->callerdata = NULL;
288                         tslot->avail = 1;
289                 }
290         }
291 }
292
293 void BLI_end_threads(ListBase *threadbase)
294 {
295         ThreadSlot *tslot;
296         
297         /* only needed if there's actually some stuff to end
298          * this way we don't end up decrementing thread_levels on an empty threadbase 
299          * */
300         if (threadbase && threadbase->first != NULL) {
301                 for (tslot = threadbase->first; tslot; tslot = tslot->next) {
302                         if (tslot->avail == 0) {
303                                 pthread_join(tslot->pthread, NULL);
304                         }
305                 }
306                 BLI_freelistN(threadbase);
307         }
308
309         thread_levels--;
310         if (thread_levels == 0)
311                 MEM_set_lock_callback(NULL, NULL);
312 }
313
314 /* System Information */
315
316 /* how many threads are native on this system? */
317 int BLI_system_thread_count(void)
318 {
319         int t;
320 #ifdef WIN32
321         SYSTEM_INFO info;
322         GetSystemInfo(&info);
323         t = (int) info.dwNumberOfProcessors;
324 #else 
325 #   ifdef __APPLE__
326         int mib[2];
327         size_t len;
328         
329         mib[0] = CTL_HW;
330         mib[1] = HW_NCPU;
331         len = sizeof(t);
332         sysctl(mib, 2, &t, &len, NULL, 0);
333 #   else
334         t = (int)sysconf(_SC_NPROCESSORS_ONLN);
335 #   endif
336 #endif
337
338         if (num_threads_override > 0)
339                 return num_threads_override;
340         
341         if (t > RE_MAX_THREAD)
342                 return RE_MAX_THREAD;
343         if (t < 1)
344                 return 1;
345         
346         return t;
347 }
348
349 void BLI_system_num_threads_override_set(int num)
350 {
351         num_threads_override = num;
352 }
353
354 int BLI_system_num_threads_override_get(void)
355 {
356         return num_threads_override;
357 }
358
359 /* Global Mutex Locks */
360
361 void BLI_lock_thread(int type)
362 {
363         if (type == LOCK_IMAGE)
364                 pthread_mutex_lock(&_image_lock);
365         else if (type == LOCK_DRAW_IMAGE)
366                 pthread_mutex_lock(&_image_draw_lock);
367         else if (type == LOCK_VIEWER)
368                 pthread_mutex_lock(&_viewer_lock);
369         else if (type == LOCK_CUSTOM1)
370                 pthread_mutex_lock(&_custom1_lock);
371         else if (type == LOCK_RCACHE)
372                 pthread_mutex_lock(&_rcache_lock);
373         else if (type == LOCK_OPENGL)
374                 pthread_mutex_lock(&_opengl_lock);
375         else if (type == LOCK_NODES)
376                 pthread_mutex_lock(&_nodes_lock);
377         else if (type == LOCK_MOVIECLIP)
378                 pthread_mutex_lock(&_movieclip_lock);
379         else if (type == LOCK_COLORMANAGE)
380                 pthread_mutex_lock(&_colormanage_lock);
381 }
382
383 void BLI_unlock_thread(int type)
384 {
385         if (type == LOCK_IMAGE)
386                 pthread_mutex_unlock(&_image_lock);
387         else if (type == LOCK_DRAW_IMAGE)
388                 pthread_mutex_unlock(&_image_draw_lock);
389         else if (type == LOCK_VIEWER)
390                 pthread_mutex_unlock(&_viewer_lock);
391         else if (type == LOCK_CUSTOM1)
392                 pthread_mutex_unlock(&_custom1_lock);
393         else if (type == LOCK_RCACHE)
394                 pthread_mutex_unlock(&_rcache_lock);
395         else if (type == LOCK_OPENGL)
396                 pthread_mutex_unlock(&_opengl_lock);
397         else if (type == LOCK_NODES)
398                 pthread_mutex_unlock(&_nodes_lock);
399         else if (type == LOCK_MOVIECLIP)
400                 pthread_mutex_unlock(&_movieclip_lock);
401         else if (type == LOCK_COLORMANAGE)
402                 pthread_mutex_unlock(&_colormanage_lock);
403 }
404
405 /* Mutex Locks */
406
407 void BLI_mutex_init(ThreadMutex *mutex)
408 {
409         pthread_mutex_init(mutex, NULL);
410 }
411
412 void BLI_mutex_lock(ThreadMutex *mutex)
413 {
414         pthread_mutex_lock(mutex);
415 }
416
417 void BLI_mutex_unlock(ThreadMutex *mutex)
418 {
419         pthread_mutex_unlock(mutex);
420 }
421
422 void BLI_mutex_end(ThreadMutex *mutex)
423 {
424         pthread_mutex_destroy(mutex);
425 }
426
427 ThreadMutex *BLI_mutex_alloc(void)
428 {
429         ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex");
430         BLI_mutex_init(mutex);
431         return mutex;
432 }
433
434 void BLI_mutex_free(ThreadMutex *mutex)
435 {
436         BLI_mutex_end(mutex);
437         MEM_freeN(mutex);
438 }
439
440 /* Spin Locks */
441
442 void BLI_spin_init(SpinLock *spin)
443 {
444 #ifdef __APPLE__
445         *spin = OS_SPINLOCK_INIT;
446 #else
447         pthread_spin_init(spin, 0);
448 #endif
449 }
450
451 void BLI_spin_lock(SpinLock *spin)
452 {
453 #ifdef __APPLE__
454         OSSpinLockLock(spin);
455 #else
456         pthread_spin_lock(spin);
457 #endif
458 }
459
460 void BLI_spin_unlock(SpinLock *spin)
461 {
462 #ifdef __APPLE__
463         OSSpinLockUnlock(spin);
464 #else
465         pthread_spin_unlock(spin);
466 #endif
467 }
468
469 #ifndef __APPLE__
470 void BLI_spin_end(SpinLock *spin)
471 {
472         pthread_spin_destroy(spin);
473 }
474 #else
475 void BLI_spin_end(SpinLock *UNUSED(spin))
476 {
477 }
478 #endif
479
480 /* Read/Write Mutex Lock */
481
482 void BLI_rw_mutex_init(ThreadRWMutex *mutex)
483 {
484         pthread_rwlock_init(mutex, NULL);
485 }
486
487 void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
488 {
489         if (mode == THREAD_LOCK_READ)
490                 pthread_rwlock_rdlock(mutex);
491         else
492                 pthread_rwlock_wrlock(mutex);
493 }
494
495 void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
496 {
497         pthread_rwlock_unlock(mutex);
498 }
499
500 void BLI_rw_mutex_end(ThreadRWMutex *mutex)
501 {
502         pthread_rwlock_destroy(mutex);
503 }
504
505 ThreadRWMutex *BLI_rw_mutex_alloc(void)
506 {
507         ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex");
508         BLI_rw_mutex_init(mutex);
509         return mutex;
510 }
511
512 void BLI_rw_mutex_free(ThreadRWMutex *mutex)
513 {
514         BLI_rw_mutex_end(mutex);
515         MEM_freeN(mutex);
516 }
517
518 /* Ticket Mutex Lock */
519
520 struct TicketMutex {
521         pthread_cond_t cond;
522         pthread_mutex_t mutex;
523         unsigned int queue_head, queue_tail;
524 };
525
526 TicketMutex *BLI_ticket_mutex_alloc(void)
527 {
528         TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex");
529
530         pthread_cond_init(&ticket->cond, NULL);
531         pthread_mutex_init(&ticket->mutex, NULL);
532
533         return ticket;
534 }
535
536 void BLI_ticket_mutex_free(TicketMutex *ticket)
537 {
538         pthread_mutex_destroy(&ticket->mutex);
539         pthread_cond_destroy(&ticket->cond);
540         MEM_freeN(ticket);
541 }
542
543 void BLI_ticket_mutex_lock(TicketMutex *ticket)
544 {
545         unsigned int queue_me;
546
547         pthread_mutex_lock(&ticket->mutex);
548         queue_me = ticket->queue_tail++;
549
550         while (queue_me != ticket->queue_head)
551                 pthread_cond_wait(&ticket->cond, &ticket->mutex);
552
553         pthread_mutex_unlock(&ticket->mutex);
554 }
555
556 void BLI_ticket_mutex_unlock(TicketMutex *ticket)
557 {
558         pthread_mutex_lock(&ticket->mutex);
559         ticket->queue_head++;
560         pthread_cond_broadcast(&ticket->cond);
561         pthread_mutex_unlock(&ticket->mutex);
562 }
563
564 /* ************************************************ */
565
566 typedef struct ThreadedWorker {
567         ListBase threadbase;
568         void *(*work_fnct)(void *);
569         char busy[RE_MAX_THREAD];
570         int total;
571         int sleep_time;
572 } ThreadedWorker;
573
574 typedef struct WorkParam {
575         ThreadedWorker *worker;
576         void *param;
577         int index;
578 } WorkParam;
579
580 static void *exec_work_fnct(void *v_param)
581 {
582         WorkParam *p = (WorkParam *)v_param;
583         void *value;
584         
585         value = p->worker->work_fnct(p->param);
586         
587         p->worker->busy[p->index] = 0;
588         MEM_freeN(p);
589         
590         return value;
591 }
592
593 ThreadedWorker *BLI_create_worker(void *(*do_thread)(void *), int tot, int sleep_time)
594 {
595         ThreadedWorker *worker;
596         
597         (void)sleep_time; /* unused */
598         
599         worker = MEM_callocN(sizeof(ThreadedWorker), "threadedworker");
600         
601         if (tot > RE_MAX_THREAD) {
602                 tot = RE_MAX_THREAD;
603         }
604         else if (tot < 1) {
605                 tot = 1;
606         }
607         
608         worker->total = tot;
609         worker->work_fnct = do_thread;
610         
611         BLI_init_threads(&worker->threadbase, exec_work_fnct, tot);
612         
613         return worker;
614 }
615
616 void BLI_end_worker(ThreadedWorker *worker)
617 {
618         BLI_remove_threads(&worker->threadbase);
619 }
620
621 void BLI_destroy_worker(ThreadedWorker *worker)
622 {
623         BLI_end_worker(worker);
624         BLI_freelistN(&worker->threadbase);
625         MEM_freeN(worker);
626 }
627
628 void BLI_insert_work(ThreadedWorker *worker, void *param)
629 {
630         WorkParam *p = MEM_callocN(sizeof(WorkParam), "workparam");
631         int index;
632         
633         if (BLI_available_threads(&worker->threadbase) == 0) {
634                 index = worker->total;
635                 while (index == worker->total) {
636                         PIL_sleep_ms(worker->sleep_time);
637                         
638                         for (index = 0; index < worker->total; index++) {
639                                 if (worker->busy[index] == 0) {
640                                         BLI_remove_thread_index(&worker->threadbase, index);
641                                         break;
642                                 }
643                         }
644                 }
645         }
646         else {
647                 index = BLI_available_thread_index(&worker->threadbase);
648         }
649         
650         worker->busy[index] = 1;
651         
652         p->param = param;
653         p->index = index;
654         p->worker = worker;
655         
656         BLI_insert_thread(&worker->threadbase, p);
657 }
658
659 /* ************************************************ */
660
661 struct ThreadQueue {
662         GSQueue *queue;
663         pthread_mutex_t mutex;
664         pthread_cond_t push_cond;
665         pthread_cond_t finish_cond;
666         volatile int nowait;
667         volatile int cancelled;
668 };
669
670 ThreadQueue *BLI_thread_queue_init(void)
671 {
672         ThreadQueue *queue;
673
674         queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue");
675         queue->queue = BLI_gsqueue_new(sizeof(void *));
676
677         pthread_mutex_init(&queue->mutex, NULL);
678         pthread_cond_init(&queue->push_cond, NULL);
679         pthread_cond_init(&queue->finish_cond, NULL);
680
681         return queue;
682 }
683
684 void BLI_thread_queue_free(ThreadQueue *queue)
685 {
686         /* destroy everything, assumes no one is using queue anymore */
687         pthread_cond_destroy(&queue->finish_cond);
688         pthread_cond_destroy(&queue->push_cond);
689         pthread_mutex_destroy(&queue->mutex);
690
691         BLI_gsqueue_free(queue->queue);
692
693         MEM_freeN(queue);
694 }
695
696 void BLI_thread_queue_push(ThreadQueue *queue, void *work)
697 {
698         pthread_mutex_lock(&queue->mutex);
699
700         BLI_gsqueue_push(queue->queue, &work);
701
702         /* signal threads waiting to pop */
703         pthread_cond_signal(&queue->push_cond);
704         pthread_mutex_unlock(&queue->mutex);
705 }
706
707 void *BLI_thread_queue_pop(ThreadQueue *queue)
708 {
709         void *work = NULL;
710
711         /* wait until there is work */
712         pthread_mutex_lock(&queue->mutex);
713         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
714                 pthread_cond_wait(&queue->push_cond, &queue->mutex);
715         
716         /* if we have something, pop it */
717         if (!BLI_gsqueue_is_empty(queue->queue)) {
718                 BLI_gsqueue_pop(queue->queue, &work);
719                 
720                 if (BLI_gsqueue_is_empty(queue->queue))
721                         pthread_cond_broadcast(&queue->finish_cond);
722         }
723
724         pthread_mutex_unlock(&queue->mutex);
725
726         return work;
727 }
728
729 static void wait_timeout(struct timespec *timeout, int ms)
730 {
731         ldiv_t div_result;
732         long sec, usec, x;
733
734 #ifdef WIN32
735         {
736                 struct _timeb now;
737                 _ftime(&now);
738                 sec = now.time;
739                 usec = now.millitm * 1000; /* microsecond precision would be better */
740         }
741 #else
742         {
743                 struct timeval now;
744                 gettimeofday(&now, NULL);
745                 sec = now.tv_sec;
746                 usec = now.tv_usec;
747         }
748 #endif
749
750         /* add current time + millisecond offset */
751         div_result = ldiv(ms, 1000);
752         timeout->tv_sec = sec + div_result.quot;
753
754         x = usec + (div_result.rem * 1000);
755
756         if (x >= 1000000) {
757                 timeout->tv_sec++;
758                 x -= 1000000;
759         }
760
761         timeout->tv_nsec = x * 1000;
762 }
763
764 void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms)
765 {
766         double t;
767         void *work = NULL;
768         struct timespec timeout;
769
770         t = PIL_check_seconds_timer();
771         wait_timeout(&timeout, ms);
772
773         /* wait until there is work */
774         pthread_mutex_lock(&queue->mutex);
775         while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
776                 if (pthread_cond_timedwait(&queue->push_cond, &queue->mutex, &timeout) == ETIMEDOUT)
777                         break;
778                 else if (PIL_check_seconds_timer() - t >= ms * 0.001)
779                         break;
780         }
781
782         /* if we have something, pop it */
783         if (!BLI_gsqueue_is_empty(queue->queue)) {
784                 BLI_gsqueue_pop(queue->queue, &work);
785                 
786                 if (BLI_gsqueue_is_empty(queue->queue))
787                         pthread_cond_broadcast(&queue->finish_cond);
788         }
789         
790         pthread_mutex_unlock(&queue->mutex);
791
792         return work;
793 }
794
795 int BLI_thread_queue_size(ThreadQueue *queue)
796 {
797         int size;
798
799         pthread_mutex_lock(&queue->mutex);
800         size = BLI_gsqueue_size(queue->queue);
801         pthread_mutex_unlock(&queue->mutex);
802
803         return size;
804 }
805
806 void BLI_thread_queue_nowait(ThreadQueue *queue)
807 {
808         pthread_mutex_lock(&queue->mutex);
809
810         queue->nowait = 1;
811
812         /* signal threads waiting to pop */
813         pthread_cond_broadcast(&queue->push_cond);
814         pthread_mutex_unlock(&queue->mutex);
815 }
816
817 void BLI_thread_queue_wait_finish(ThreadQueue *queue)
818 {
819         /* wait for finish condition */
820         pthread_mutex_lock(&queue->mutex);
821
822         while (!BLI_gsqueue_is_empty(queue->queue))
823                 pthread_cond_wait(&queue->finish_cond, &queue->mutex);
824
825         pthread_mutex_unlock(&queue->mutex);
826 }
827
828 /* ************************************************ */
829
830 void BLI_begin_threaded_malloc(void)
831 {
832         /* Used for debug only */
833         /* BLI_assert(thread_levels >= 0); */
834
835         if (thread_levels == 0) {
836                 MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
837         }
838         thread_levels++;
839 }
840
841 void BLI_end_threaded_malloc(void)
842 {
843         /* Used for debug only */
844         /* BLI_assert(thread_levels >= 0); */
845
846         thread_levels--;
847         if (thread_levels == 0)
848                 MEM_set_lock_callback(NULL, NULL);
849 }
850