c92881bb7412ff75ad693cf444f4c2e545388355
[blender.git] / source / blender / blenlib / intern / task.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/blenlib/intern/task.c
22  *  \ingroup bli
23  *
24  * A generic task system which can be used for any task based subsystem.
25  */
26
27 #include <stdlib.h>
28
29 #include "MEM_guardedalloc.h"
30
31 #include "DNA_listBase.h"
32
33 #include "BLI_listbase.h"
34 #include "BLI_math.h"
35 #include "BLI_task.h"
36 #include "BLI_threads.h"
37
38 #include "atomic_ops.h"
39
40 /* Define this to enable some detailed statistic print. */
41 #undef DEBUG_STATS
42
43 /* Types */
44
45 /* Number of per-thread pre-allocated tasks.
46  *
47  * For more details see description of TaskMemPool.
48  */
49 #define MEMPOOL_SIZE 256
50
51 /* Number of tasks which are pushed directly to local thread queue.
52  *
53  * This allows thread to fetch next task without locking the whole queue.
54  */
55 #define LOCALQUEUE_SIZE 1
56
57 #ifndef NDEBUG
58 #  define ASSERT_THREAD_ID(scheduler, thread_id)                              \
59         do {                                                                      \
60                 if (!BLI_thread_is_main()) {                                          \
61                         TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);  \
62                         if (thread == NULL) {                                             \
63                                 BLI_assert(thread_id == 0);                                   \
64                         }                                                                 \
65                         else {                                                            \
66                                 BLI_assert(thread_id == thread->id);                          \
67                         }                                                                 \
68                 }                                                                     \
69                 else {                                                                \
70                         BLI_assert(thread_id == 0);                                       \
71                 }                                                                     \
72         } while (false)
73 #else
74 #  define ASSERT_THREAD_ID(scheduler, thread_id)
75 #endif
76
77 typedef struct Task {
78         struct Task *next, *prev;
79
80         TaskRunFunction run;
81         void *taskdata;
82         bool free_taskdata;
83         TaskFreeFunction freedata;
84         TaskPool *pool;
85 } Task;
86
87 /* This is a per-thread storage of pre-allocated tasks.
88  *
89  * The idea behind this is simple: reduce amount of malloc() calls when pushing
90  * new task to the pool. This is done by keeping memory from the tasks which
91  * were finished already, so instead of freeing that memory we put it to the
92  * pool for the later re-use.
93  *
94  * The tricky part here is to avoid any inter-thread synchronization, hence no
95  * lock must exist around this pool. The pool will become an owner of the pointer
96  * from freed task, and only corresponding thread will be able to use this pool
97  * (no memory stealing and such).
98  *
99  * This leads to the following use of the pool:
100  *
101  * - task_push() should provide proper thread ID from which the task is being
102  *   pushed from.
103  *
104  * - Task allocation function which check corresponding memory pool and if there
105  *   is any memory in there it'll mark memory as re-used, remove it from the pool
106  *   and use that memory for the new task.
107  *
108  *   At this moment task queue owns the memory.
109  *
110  * - When task is done and task_free() is called the memory will be put to the
111  *  pool which corresponds to a thread which handled the task.
112  */
113 typedef struct TaskMemPool {
114         /* Number of pre-allocated tasks in the pool. */
115         int num_tasks;
116         /* Pre-allocated task memory pointers. */
117         Task *tasks[MEMPOOL_SIZE];
118 } TaskMemPool;
119
120 #ifdef DEBUG_STATS
121 typedef struct TaskMemPoolStats {
122         /* Number of allocations. */
123         int num_alloc;
124         /* Number of avoided allocations (pointer was re-used from the pool). */
125         int num_reuse;
126         /* Number of discarded memory due to pool saturation, */
127         int num_discard;
128 } TaskMemPoolStats;
129 #endif
130
131 typedef struct TaskThreadLocalStorage {
132         TaskMemPool task_mempool;
133         int num_local_queue;
134         Task *local_queue[LOCALQUEUE_SIZE];
135 } TaskThreadLocalStorage;
136
137 struct TaskPool {
138         TaskScheduler *scheduler;
139
140         volatile size_t num;
141         ThreadMutex num_mutex;
142         ThreadCondition num_cond;
143
144         void *userdata;
145         ThreadMutex user_mutex;
146
147         volatile bool do_cancel;
148         volatile bool do_work;
149
150         volatile bool is_suspended;
151         ListBase suspended_queue;
152         size_t num_suspended;
153
154         /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
155          * has to use its special background fallback thread in case we are in
156          * single-threaded situation.
157          */
158         bool run_in_background;
159
160         /* This is a task scheduler's ID of a thread at which pool was constructed.
161          * It will be used to access task TLS.
162          */
163         int thread_id;
164
165         /* For the pools which are created from non-main thread which is not a
166          * scheduler worker thread we can't re-use any of scheduler's threads TLS
167          * and have to use our own one.
168          */
169         bool use_local_tls;
170         TaskThreadLocalStorage local_tls;
171
172 #ifdef DEBUG_STATS
173         TaskMemPoolStats *mempool_stats;
174 #endif
175 };
176
177 struct TaskScheduler {
178         pthread_t *threads;
179         struct TaskThread *task_threads;
180         int num_threads;
181         bool background_thread_only;
182
183         ListBase queue;
184         ThreadMutex queue_mutex;
185         ThreadCondition queue_cond;
186
187         volatile bool do_exit;
188
189         /* NOTE: In pthread's TLS we store the whole TaskThread structure. */
190         pthread_key_t tls_id_key;
191 };
192
193 typedef struct TaskThread {
194         TaskScheduler *scheduler;
195         int id;
196         TaskThreadLocalStorage tls;
197 } TaskThread;
198
199 /* Helper */
200 BLI_INLINE void task_data_free(Task *task, const int thread_id)
201 {
202         if (task->free_taskdata) {
203                 if (task->freedata) {
204                         task->freedata(task->pool, task->taskdata, thread_id);
205                 }
206                 else {
207                         MEM_freeN(task->taskdata);
208                 }
209         }
210 }
211
212 BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
213 {
214         memset(tls, 0, sizeof(TaskThreadLocalStorage));
215 }
216
217 BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool,
218                                                 const int thread_id)
219 {
220         TaskScheduler *scheduler = pool->scheduler;
221         BLI_assert(thread_id >= 0);
222         BLI_assert(thread_id <= scheduler->num_threads);
223         if (pool->use_local_tls) {
224                 BLI_assert(pool->thread_id == 0);
225                 return &pool->local_tls;
226         }
227         if (thread_id == 0) {
228                 return &scheduler->task_threads[pool->thread_id].tls;
229         }
230         return &scheduler->task_threads[thread_id].tls;
231 }
232
233 BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
234 {
235         TaskMemPool *task_mempool = &tls->task_mempool;
236         for (int i = 0; i < task_mempool->num_tasks; ++i) {
237                 MEM_freeN(task_mempool->tasks[i]);
238         }
239 }
240
241 static Task *task_alloc(TaskPool *pool, const int thread_id)
242 {
243         BLI_assert(thread_id <= pool->scheduler->num_threads);
244         if (thread_id != -1) {
245                 BLI_assert(thread_id >= 0);
246                 BLI_assert(thread_id <= pool->scheduler->num_threads);
247                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
248                 TaskMemPool *task_mempool = &tls->task_mempool;
249                 /* Try to re-use task memory from a thread local storage. */
250                 if (task_mempool->num_tasks > 0) {
251                         --task_mempool->num_tasks;
252                         /* Success! We've just avoided task allocation. */
253 #ifdef DEBUG_STATS
254                         pool->mempool_stats[thread_id].num_reuse++;
255 #endif
256                         return task_mempool->tasks[task_mempool->num_tasks];
257                 }
258                 /* We are doomed to allocate new task data. */
259 #ifdef DEBUG_STATS
260                 pool->mempool_stats[thread_id].num_alloc++;
261 #endif
262         }
263         return MEM_mallocN(sizeof(Task), "New task");
264 }
265
266 static void task_free(TaskPool *pool, Task *task, const int thread_id)
267 {
268         task_data_free(task, thread_id);
269         BLI_assert(thread_id >= 0);
270         BLI_assert(thread_id <= pool->scheduler->num_threads);
271         TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
272         TaskMemPool *task_mempool = &tls->task_mempool;
273         if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
274                 /* Successfully allowed the task to be re-used later. */
275                 task_mempool->tasks[task_mempool->num_tasks] = task;
276                 ++task_mempool->num_tasks;
277         }
278         else {
279                 /* Local storage saturated, no other way than just discard
280                  * the memory.
281                  *
282                  * TODO(sergey): We can perhaps store such pointer in a global
283                  * scheduler pool, maybe it'll be faster than discarding and
284                  * allocating again.
285                  */
286                 MEM_freeN(task);
287 #ifdef DEBUG_STATS
288                 pool->mempool_stats[thread_id].num_discard++;
289 #endif
290         }
291 }
292
293 /* Task Scheduler */
294
295 static void task_pool_num_decrease(TaskPool *pool, size_t done)
296 {
297         BLI_mutex_lock(&pool->num_mutex);
298
299         BLI_assert(pool->num >= done);
300
301         pool->num -= done;
302
303         if (pool->num == 0)
304                 BLI_condition_notify_all(&pool->num_cond);
305
306         BLI_mutex_unlock(&pool->num_mutex);
307 }
308
309 static void task_pool_num_increase(TaskPool *pool, size_t new)
310 {
311         BLI_mutex_lock(&pool->num_mutex);
312
313         pool->num += new;
314         BLI_condition_notify_all(&pool->num_cond);
315
316         BLI_mutex_unlock(&pool->num_mutex);
317 }
318
319 static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
320 {
321         bool found_task = false;
322         BLI_mutex_lock(&scheduler->queue_mutex);
323
324         while (!scheduler->queue.first && !scheduler->do_exit)
325                 BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
326
327         do {
328                 Task *current_task;
329
330                 /* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after
331                  * our worker thread has been woken up from a condition_wait(), which only happens after a new task was
332                  * added to the queue), but it is wrong.
333                  * Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some
334                  * race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread
335                  * reaches this point...
336                  * See http://stackoverflow.com/questions/8594591
337                  *
338                  * So we only abort here if do_exit is set.
339                  */
340                 if (scheduler->do_exit) {
341                         BLI_mutex_unlock(&scheduler->queue_mutex);
342                         return false;
343                 }
344
345                 for (current_task = scheduler->queue.first;
346                      current_task != NULL;
347                      current_task = current_task->next)
348                 {
349                         TaskPool *pool = current_task->pool;
350
351                         if (scheduler->background_thread_only && !pool->run_in_background) {
352                                 continue;
353                         }
354
355                         *task = current_task;
356                         found_task = true;
357                         BLI_remlink(&scheduler->queue, *task);
358                         break;
359                 }
360                 if (!found_task)
361                         BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
362         } while (!found_task);
363
364         BLI_mutex_unlock(&scheduler->queue_mutex);
365
366         return true;
367 }
368
369 BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls,
370                                    const int thread_id)
371 {
372         while (tls->num_local_queue > 0) {
373                 /* We pop task from queue before handling it so handler of the task can
374                  * push next job to the local queue.
375                  */
376                 tls->num_local_queue--;
377                 Task *local_task = tls->local_queue[tls->num_local_queue];
378                 /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
379                  * pool tasks.
380                  */
381                 TaskPool *local_pool = local_task->pool;
382                 local_task->run(local_pool, local_task->taskdata, thread_id);
383                 task_free(local_pool, local_task, thread_id);
384         }
385 }
386
387 static void *task_scheduler_thread_run(void *thread_p)
388 {
389         TaskThread *thread = (TaskThread *) thread_p;
390         TaskThreadLocalStorage *tls = &thread->tls;
391         TaskScheduler *scheduler = thread->scheduler;
392         int thread_id = thread->id;
393         Task *task;
394
395         pthread_setspecific(scheduler->tls_id_key, thread);
396
397         /* keep popping off tasks */
398         while (task_scheduler_thread_wait_pop(scheduler, &task)) {
399                 TaskPool *pool = task->pool;
400
401                 /* run task */
402                 task->run(pool, task->taskdata, thread_id);
403
404                 /* delete task */
405                 task_free(pool, task, thread_id);
406
407                 /* Handle all tasks from local queue. */
408                 handle_local_queue(tls, thread_id);
409
410                 /* notify pool task was done */
411                 task_pool_num_decrease(pool, 1);
412         }
413
414         return NULL;
415 }
416
417 TaskScheduler *BLI_task_scheduler_create(int num_threads)
418 {
419         TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
420
421         /* multiple places can use this task scheduler, sharing the same
422          * threads, so we keep track of the number of users. */
423         scheduler->do_exit = false;
424
425         BLI_listbase_clear(&scheduler->queue);
426         BLI_mutex_init(&scheduler->queue_mutex);
427         BLI_condition_init(&scheduler->queue_cond);
428
429         if (num_threads == 0) {
430                 /* automatic number of threads will be main thread + num cores */
431                 num_threads = BLI_system_thread_count();
432         }
433
434         /* main thread will also work, so we count it too */
435         num_threads -= 1;
436
437         /* Add background-only thread if needed. */
438         if (num_threads == 0) {
439                 scheduler->background_thread_only = true;
440                 num_threads = 1;
441         }
442
443         scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
444                                               "TaskScheduler task threads");
445
446         /* Initialize TLS for main thread. */
447         initialize_task_tls(&scheduler->task_threads[0].tls);
448
449         pthread_key_create(&scheduler->tls_id_key, NULL);
450
451         /* launch threads that will be waiting for work */
452         if (num_threads > 0) {
453                 int i;
454
455                 scheduler->num_threads = num_threads;
456                 scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads");
457
458                 for (i = 0; i < num_threads; i++) {
459                         TaskThread *thread = &scheduler->task_threads[i + 1];
460                         thread->scheduler = scheduler;
461                         thread->id = i + 1;
462                         initialize_task_tls(&thread->tls);
463
464                         if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
465                                 fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
466                         }
467                 }
468         }
469
470         return scheduler;
471 }
472
473 void BLI_task_scheduler_free(TaskScheduler *scheduler)
474 {
475         Task *task;
476
477         /* stop all waiting threads */
478         BLI_mutex_lock(&scheduler->queue_mutex);
479         scheduler->do_exit = true;
480         BLI_condition_notify_all(&scheduler->queue_cond);
481         BLI_mutex_unlock(&scheduler->queue_mutex);
482
483         pthread_key_delete(scheduler->tls_id_key);
484
485         /* delete threads */
486         if (scheduler->threads) {
487                 int i;
488
489                 for (i = 0; i < scheduler->num_threads; i++) {
490                         if (pthread_join(scheduler->threads[i], NULL) != 0)
491                                 fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
492                 }
493
494                 MEM_freeN(scheduler->threads);
495         }
496
497         /* Delete task thread data */
498         if (scheduler->task_threads) {
499                 for (int i = 0; i < scheduler->num_threads + 1; ++i) {
500                         TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
501                         free_task_tls(tls);
502                 }
503
504                 MEM_freeN(scheduler->task_threads);
505         }
506
507         /* delete leftover tasks */
508         for (task = scheduler->queue.first; task; task = task->next) {
509                 task_data_free(task, 0);
510         }
511         BLI_freelistN(&scheduler->queue);
512
513         /* delete mutex/condition */
514         BLI_mutex_end(&scheduler->queue_mutex);
515         BLI_condition_end(&scheduler->queue_cond);
516
517         MEM_freeN(scheduler);
518 }
519
520 int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
521 {
522         return scheduler->num_threads + 1;
523 }
524
525 static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
526 {
527         task_pool_num_increase(task->pool, 1);
528
529         /* add task to queue */
530         BLI_mutex_lock(&scheduler->queue_mutex);
531
532         if (priority == TASK_PRIORITY_HIGH)
533                 BLI_addhead(&scheduler->queue, task);
534         else
535                 BLI_addtail(&scheduler->queue, task);
536
537         BLI_condition_notify_one(&scheduler->queue_cond);
538         BLI_mutex_unlock(&scheduler->queue_mutex);
539 }
540
541 static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
542 {
543         Task *task, *nexttask;
544         size_t done = 0;
545
546         BLI_mutex_lock(&scheduler->queue_mutex);
547
548         /* free all tasks from this pool from the queue */
549         for (task = scheduler->queue.first; task; task = nexttask) {
550                 nexttask = task->next;
551
552                 if (task->pool == pool) {
553                         task_data_free(task, pool->thread_id);
554                         BLI_freelinkN(&scheduler->queue, task);
555
556                         done++;
557                 }
558         }
559
560         BLI_mutex_unlock(&scheduler->queue_mutex);
561
562         /* notify done */
563         task_pool_num_decrease(pool, done);
564 }
565
566 /* Task Pool */
567
568 static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
569                                      void *userdata,
570                                      const bool is_background,
571                                      const bool is_suspended)
572 {
573         TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool");
574
575 #ifndef NDEBUG
576         /* Assert we do not try to create a background pool from some parent task - those only work OK from main thread. */
577         if (is_background) {
578                 const pthread_t thread_id = pthread_self();
579                 int i = scheduler->num_threads;
580
581                 while (i--) {
582                         BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
583                 }
584         }
585 #endif
586
587         pool->scheduler = scheduler;
588         pool->num = 0;
589         pool->do_cancel = false;
590         pool->do_work = false;
591         pool->is_suspended = is_suspended;
592         pool->num_suspended = 0;
593         pool->suspended_queue.first = pool->suspended_queue.last = NULL;
594         pool->run_in_background = is_background;
595         pool->use_local_tls = false;
596
597         BLI_mutex_init(&pool->num_mutex);
598         BLI_condition_init(&pool->num_cond);
599
600         pool->userdata = userdata;
601         BLI_mutex_init(&pool->user_mutex);
602
603         if (BLI_thread_is_main()) {
604                 pool->thread_id = 0;
605         }
606         else {
607                 TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);
608                 if (thread == NULL) {
609                         /* NOTE: Task pool is created from non-main thread which is not
610                          * managed by the task scheduler. We identify ourselves as thread ID
611                          * 0 but we do not use scheduler's TLS storage and use our own
612                          * instead to avoid any possible threading conflicts.
613                          */
614                         pool->thread_id = 0;
615                         pool->use_local_tls = true;
616                         initialize_task_tls(&pool->local_tls);
617                 }
618                 else {
619                         pool->thread_id = thread->id;
620                 }
621         }
622
623 #ifdef DEBUG_STATS
624         pool->mempool_stats =
625                 MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1),
626                             "per-taskpool mempool stats");
627 #endif
628
629         /* Ensure malloc will go fine from threads,
630          *
631          * This is needed because we could be in main thread here
632          * and malloc could be non-threda safe at this point because
633          * no other jobs are running.
634          */
635         BLI_begin_threaded_malloc();
636
637         return pool;
638 }
639
640 /**
641  * Create a normal task pool.
642  * This means that in single-threaded context, it will not be executed at all until you call
643  * \a BLI_task_pool_work_and_wait() on it.
644  */
645 TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
646 {
647         return task_pool_create_ex(scheduler, userdata, false, false);
648 }
649
650 /**
651  * Create a background task pool.
652  * In multi-threaded context, there is no differences with \a BLI_task_pool_create(), but in single-threaded case
653  * it is ensured to have at least one worker thread to run on (i.e. you do not have to call
654  * \a BLI_task_pool_work_and_wait() on it to be sure it will be processed).
655  *
656  * \note Background pools are non-recursive (that is, you should not create other background pools in tasks assigned
657  *       to a background pool, they could end never being executed, since the 'fallback' background thread is already
658  *       busy with parent task in single-threaded context).
659  */
660 TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata)
661 {
662         return task_pool_create_ex(scheduler, userdata, true, false);
663 }
664
665 /**
666  * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
667  * for until BLI_task_pool_work_and_wait() is called. This helps reducing therading
668  * overhead when pushing huge amount of small initial tasks from the main thread.
669  */
670 TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata)
671 {
672         return task_pool_create_ex(scheduler, userdata, false, true);
673 }
674
675 void BLI_task_pool_free(TaskPool *pool)
676 {
677         BLI_task_pool_cancel(pool);
678
679         BLI_mutex_end(&pool->num_mutex);
680         BLI_condition_end(&pool->num_cond);
681
682         BLI_mutex_end(&pool->user_mutex);
683
684 #ifdef DEBUG_STATS
685         printf("Thread ID    Allocated   Reused   Discarded\n");
686         for (int i = 0; i < pool->scheduler->num_threads + 1; ++i) {
687                 printf("%02d           %05d       %05d    %05d\n",
688                        i,
689                        pool->mempool_stats[i].num_alloc,
690                        pool->mempool_stats[i].num_reuse,
691                        pool->mempool_stats[i].num_discard);
692         }
693         MEM_freeN(pool->mempool_stats);
694 #endif
695
696         if (pool->use_local_tls) {
697                 free_task_tls(&pool->local_tls);
698         }
699
700         MEM_freeN(pool);
701
702         BLI_end_threaded_malloc();
703 }
704
705 static void task_pool_push(
706         TaskPool *pool, TaskRunFunction run, void *taskdata,
707         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority,
708         int thread_id)
709 {
710         Task *task = task_alloc(pool, thread_id);
711
712         task->run = run;
713         task->taskdata = taskdata;
714         task->free_taskdata = free_taskdata;
715         task->freedata = freedata;
716         task->pool = pool;
717
718         if (pool->is_suspended) {
719                 BLI_addhead(&pool->suspended_queue, task);
720                 atomic_fetch_and_add_z(&pool->num_suspended, 1);
721                 return;
722         }
723
724         if (thread_id != -1 &&
725             (thread_id != pool->thread_id || pool->do_work))
726         {
727                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
728
729                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
730                 if (tls->num_local_queue < LOCALQUEUE_SIZE) {
731                         tls->local_queue[tls->num_local_queue] = task;
732                         tls->num_local_queue++;
733                         return;
734                 }
735         }
736
737         task_scheduler_push(pool->scheduler, task, priority);
738 }
739
740 void BLI_task_pool_push_ex(
741         TaskPool *pool, TaskRunFunction run, void *taskdata,
742         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority)
743 {
744         task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1);
745 }
746
747 void BLI_task_pool_push(
748         TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
749 {
750         BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
751 }
752
753 void BLI_task_pool_push_from_thread(TaskPool *pool, TaskRunFunction run,
754         void *taskdata, bool free_taskdata, TaskPriority priority, int thread_id)
755 {
756         task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id);
757 }
758
759 void BLI_task_pool_work_and_wait(TaskPool *pool)
760 {
761         TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
762         TaskScheduler *scheduler = pool->scheduler;
763
764         if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
765                 if (pool->num_suspended) {
766                         task_pool_num_increase(pool, pool->num_suspended);
767                         BLI_mutex_lock(&scheduler->queue_mutex);
768
769                         BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
770
771                         BLI_condition_notify_all(&scheduler->queue_cond);
772                         BLI_mutex_unlock(&scheduler->queue_mutex);
773                 }
774         }
775
776         pool->do_work = true;
777
778         ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
779
780         BLI_mutex_lock(&pool->num_mutex);
781
782         while (pool->num != 0) {
783                 Task *task, *work_task = NULL;
784                 bool found_task = false;
785
786                 BLI_mutex_unlock(&pool->num_mutex);
787
788                 BLI_mutex_lock(&scheduler->queue_mutex);
789
790                 /* find task from this pool. if we get a task from another pool,
791                  * we can get into deadlock */
792
793                 for (task = scheduler->queue.first; task; task = task->next) {
794                         if (task->pool == pool) {
795                                 work_task = task;
796                                 found_task = true;
797                                 BLI_remlink(&scheduler->queue, task);
798                                 break;
799                         }
800                 }
801
802                 BLI_mutex_unlock(&scheduler->queue_mutex);
803
804                 /* if found task, do it, otherwise wait until other tasks are done */
805                 if (found_task) {
806                         /* run task */
807                         work_task->run(pool, work_task->taskdata, pool->thread_id);
808
809                         /* delete task */
810                         task_free(pool, task, pool->thread_id);
811
812                         /* Handle all tasks from local queue. */
813                         handle_local_queue(tls, pool->thread_id);
814
815                         /* notify pool task was done */
816                         task_pool_num_decrease(pool, 1);
817                 }
818
819                 BLI_mutex_lock(&pool->num_mutex);
820                 if (pool->num == 0)
821                         break;
822
823                 if (!found_task)
824                         BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
825         }
826
827         BLI_mutex_unlock(&pool->num_mutex);
828
829         handle_local_queue(tls, pool->thread_id);
830 }
831
832 void BLI_task_pool_cancel(TaskPool *pool)
833 {
834         pool->do_cancel = true;
835
836         task_scheduler_clear(pool->scheduler, pool);
837
838         /* wait until all entries are cleared */
839         BLI_mutex_lock(&pool->num_mutex);
840         while (pool->num)
841                 BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
842         BLI_mutex_unlock(&pool->num_mutex);
843
844         pool->do_cancel = false;
845 }
846
847 bool BLI_task_pool_canceled(TaskPool *pool)
848 {
849         return pool->do_cancel;
850 }
851
852 void *BLI_task_pool_userdata(TaskPool *pool)
853 {
854         return pool->userdata;
855 }
856
857 ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
858 {
859         return &pool->user_mutex;
860 }
861
862 /* Parallel range routines */
863
864 /**
865  *
866  * Main functions:
867  * - #BLI_task_parallel_range
868  * - #BLI_task_parallel_listbase (#ListBase - double linked list)
869  *
870  * TODO:
871  * - #BLI_task_parallel_foreach_link (#Link - single linked list)
872  * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
873  * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
874  *
875  */
876
877 /* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
878 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
879 #define MALLOCA_FREE(_mem, _size) if (((_mem) != NULL) && ((_size) > 8192)) MEM_freeN((_mem))
880
881 typedef struct ParallelRangeState {
882         int start, stop;
883         void *userdata;
884
885         TaskParallelRangeFunc func;
886         TaskParallelRangeFuncEx func_ex;
887
888         int iter;
889         int chunk_size;
890 } ParallelRangeState;
891
892 BLI_INLINE bool parallel_range_next_iter_get(
893         ParallelRangeState * __restrict state,
894         int * __restrict iter, int * __restrict count)
895 {
896         uint32_t uval = atomic_fetch_and_add_uint32((uint32_t *)(&state->iter), state->chunk_size);
897         int previter = *(int32_t *)&uval;
898
899         *iter = previter;
900         *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
901
902         return (previter < state->stop);
903 }
904
905 static void parallel_range_func(
906         TaskPool * __restrict pool,
907         void *userdata_chunk,
908         int threadid)
909 {
910         ParallelRangeState * __restrict state = BLI_task_pool_userdata(pool);
911         int iter, count;
912
913         while (parallel_range_next_iter_get(state, &iter, &count)) {
914                 int i;
915
916                 if (state->func_ex) {
917                         for (i = 0; i < count; ++i) {
918                                 state->func_ex(state->userdata, userdata_chunk, iter + i, threadid);
919                         }
920                 }
921                 else {
922                         for (i = 0; i < count; ++i) {
923                                 state->func(state->userdata, iter + i);
924                         }
925                 }
926         }
927 }
928
929 /**
930  * This function allows to parallelized for loops in a similar way to OpenMP's 'parallel for' statement.
931  *
932  * See public API doc for description of parameters.
933  */
934 static void task_parallel_range_ex(
935         int start, int stop,
936         void *userdata,
937         void *userdata_chunk,
938         const size_t userdata_chunk_size,
939         TaskParallelRangeFunc func,
940         TaskParallelRangeFuncEx func_ex,
941         TaskParallelRangeFuncFinalize func_finalize,
942         const bool use_threading,
943         const bool use_dynamic_scheduling)
944 {
945         TaskScheduler *task_scheduler;
946         TaskPool *task_pool;
947         ParallelRangeState state;
948         int i, num_threads, num_tasks;
949
950         void *userdata_chunk_local = NULL;
951         void *userdata_chunk_array = NULL;
952         const bool use_userdata_chunk = (func_ex != NULL) && (userdata_chunk_size != 0) && (userdata_chunk != NULL);
953
954         if (start == stop) {
955                 return;
956         }
957
958         BLI_assert(start < stop);
959         if (userdata_chunk_size != 0) {
960                 BLI_assert(func_ex != NULL && func == NULL);
961                 BLI_assert(userdata_chunk != NULL);
962         }
963
964         /* If it's not enough data to be crunched, don't bother with tasks at all,
965          * do everything from the main thread.
966          */
967         if (!use_threading) {
968                 if (func_ex) {
969                         if (use_userdata_chunk) {
970                                 userdata_chunk_local = MALLOCA(userdata_chunk_size);
971                                 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
972                         }
973
974                         for (i = start; i < stop; ++i) {
975                                 func_ex(userdata, userdata_chunk_local, i, 0);
976                         }
977
978                         if (func_finalize) {
979                                 func_finalize(userdata, userdata_chunk_local);
980                         }
981
982                         MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
983                 }
984                 else {
985                         for (i = start; i < stop; ++i) {
986                                 func(userdata, i);
987                         }
988                 }
989
990                 return;
991         }
992
993         task_scheduler = BLI_task_scheduler_get();
994         task_pool = BLI_task_pool_create(task_scheduler, &state);
995         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
996
997         /* The idea here is to prevent creating task for each of the loop iterations
998          * and instead have tasks which are evenly distributed across CPU cores and
999          * pull next iter to be crunched using the queue.
1000          */
1001         num_tasks = num_threads * 2;
1002
1003         state.start = start;
1004         state.stop = stop;
1005         state.userdata = userdata;
1006         state.func = func;
1007         state.func_ex = func_ex;
1008         state.iter = start;
1009         if (use_dynamic_scheduling) {
1010                 state.chunk_size = 32;
1011         }
1012         else {
1013                 state.chunk_size = max_ii(1, (stop - start) / (num_tasks));
1014         }
1015
1016         num_tasks = min_ii(num_tasks, (stop - start) / state.chunk_size);
1017         atomic_fetch_and_add_uint32((uint32_t *)(&state.iter), 0);
1018
1019         if (use_userdata_chunk) {
1020         userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
1021         }
1022
1023         for (i = 0; i < num_tasks; i++) {
1024                 if (use_userdata_chunk) {
1025                         userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1026                         memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1027                 }
1028                 /* Use this pool's pre-allocated tasks. */
1029                 BLI_task_pool_push_from_thread(task_pool,
1030                                                parallel_range_func,
1031                                                userdata_chunk_local, false,
1032                                                TASK_PRIORITY_HIGH,
1033                                                task_pool->thread_id);
1034         }
1035
1036         BLI_task_pool_work_and_wait(task_pool);
1037         BLI_task_pool_free(task_pool);
1038
1039         if (use_userdata_chunk) {
1040                 if (func_finalize) {
1041                         for (i = 0; i < num_tasks; i++) {
1042                                 userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1043                                 func_finalize(userdata, userdata_chunk_local);
1044                         }
1045                 }
1046                 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
1047         }
1048 }
1049
1050 /**
1051  * This function allows to parallelize for loops in a similar way to OpenMP's 'parallel for' statement.
1052  *
1053  * \param start First index to process.
1054  * \param stop Index to stop looping (excluded).
1055  * \param userdata Common userdata passed to all instances of \a func.
1056  * \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
1057  *                       (similar to OpenMP's firstprivate).
1058  * \param userdata_chunk_size Memory size of \a userdata_chunk.
1059  * \param func_ex Callback function (advanced version).
1060  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1061  *                      (allows caller to use any kind of test to switch on parallelization or not).
1062  * \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
1063  *                               otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
1064  */
1065 void BLI_task_parallel_range_ex(
1066         int start, int stop,
1067         void *userdata,
1068         void *userdata_chunk,
1069         const size_t userdata_chunk_size,
1070         TaskParallelRangeFuncEx func_ex,
1071         const bool use_threading,
1072         const bool use_dynamic_scheduling)
1073 {
1074         task_parallel_range_ex(
1075                     start, stop, userdata, userdata_chunk, userdata_chunk_size, NULL, func_ex, NULL,
1076                     use_threading, use_dynamic_scheduling);
1077 }
1078
1079 /**
1080  * A simpler version of \a BLI_task_parallel_range_ex, which does not use \a use_dynamic_scheduling,
1081  * and does not handle 'firstprivate'-like \a userdata_chunk.
1082  *
1083  * \param start First index to process.
1084  * \param stop Index to stop looping (excluded).
1085  * \param userdata Common userdata passed to all instances of \a func.
1086  * \param func Callback function (simple version).
1087  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1088  *                      (allows caller to use any kind of test to switch on parallelization or not).
1089  */
1090 void BLI_task_parallel_range(
1091         int start, int stop,
1092         void *userdata,
1093         TaskParallelRangeFunc func,
1094         const bool use_threading)
1095 {
1096         task_parallel_range_ex(start, stop, userdata, NULL, 0, func, NULL, NULL, use_threading, false);
1097 }
1098
1099 /**
1100  * This function allows to parallelize for loops in a similar way to OpenMP's 'parallel for' statement,
1101  * with an additional 'finalize' func called from calling thread once whole range have been processed.
1102  *
1103  * \param start First index to process.
1104  * \param stop Index to stop looping (excluded).
1105  * \param userdata Common userdata passed to all instances of \a func.
1106  * \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
1107  *                       (similar to OpenMP's firstprivate).
1108  * \param userdata_chunk_size Memory size of \a userdata_chunk.
1109  * \param func_ex Callback function (advanced version).
1110  * \param func_finalize Callback function, called after all workers have finished,
1111  * useful to finalize accumulative tasks.
1112  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1113  *                      (allows caller to use any kind of test to switch on parallelization or not).
1114  * \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
1115  *                               otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
1116  */
1117 void BLI_task_parallel_range_finalize(
1118         int start, int stop,
1119         void *userdata,
1120         void *userdata_chunk,
1121         const size_t userdata_chunk_size,
1122         TaskParallelRangeFuncEx func_ex,
1123         TaskParallelRangeFuncFinalize func_finalize,
1124         const bool use_threading,
1125         const bool use_dynamic_scheduling)
1126 {
1127         task_parallel_range_ex(
1128                     start, stop, userdata, userdata_chunk, userdata_chunk_size, NULL, func_ex, func_finalize,
1129                     use_threading, use_dynamic_scheduling);
1130 }
1131
1132 #undef MALLOCA
1133 #undef MALLOCA_FREE
1134
1135 typedef struct ParallelListbaseState {
1136         void *userdata;
1137         TaskParallelListbaseFunc func;
1138
1139         int chunk_size;
1140         int index;
1141         Link *link;
1142         SpinLock lock;
1143 } ParallelListState;
1144
1145 BLI_INLINE Link *parallel_listbase_next_iter_get(
1146         ParallelListState * __restrict state,
1147         int * __restrict index,
1148         int * __restrict count)
1149 {
1150         int task_count = 0;
1151         BLI_spin_lock(&state->lock);
1152         Link *result = state->link;
1153         if (LIKELY(result != NULL)) {
1154                 *index = state->index;
1155                 while (state->link != NULL && task_count < state->chunk_size) {
1156                         ++task_count;
1157                         state->link = state->link->next;
1158                 }
1159                 state->index += task_count;
1160         }
1161         BLI_spin_unlock(&state->lock);
1162         *count = task_count;
1163         return result;
1164 }
1165
1166 static void parallel_listbase_func(
1167         TaskPool * __restrict pool,
1168         void *UNUSED(taskdata),
1169         int UNUSED(threadid))
1170 {
1171         ParallelListState * __restrict state = BLI_task_pool_userdata(pool);
1172         Link *link;
1173         int index, count;
1174
1175         while ((link = parallel_listbase_next_iter_get(state, &index, &count)) != NULL) {
1176                 for (int i = 0; i < count; ++i) {
1177                         state->func(state->userdata, link, index + i);
1178                         link = link->next;
1179                 }
1180         }
1181 }
1182
1183 /**
1184  * This function allows to parallelize for loops over ListBase items.
1185  *
1186  * \param listbase The double linked list to loop over.
1187  * \param userdata Common userdata passed to all instances of \a func.
1188  * \param func Callback function.
1189  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1190  *                      (allows caller to use any kind of test to switch on parallelization or not).
1191  *
1192  * \note There is no static scheduling here, since it would need another full loop over items to count them...
1193  */
1194 void BLI_task_parallel_listbase(
1195         struct ListBase *listbase,
1196         void *userdata,
1197         TaskParallelListbaseFunc func,
1198         const bool use_threading)
1199 {
1200         TaskScheduler *task_scheduler;
1201         TaskPool *task_pool;
1202         ParallelListState state;
1203         int i, num_threads, num_tasks;
1204
1205         if (BLI_listbase_is_empty(listbase)) {
1206                 return;
1207         }
1208
1209         if (!use_threading) {
1210                 i = 0;
1211                 for (Link *link = listbase->first; link != NULL; link = link->next, ++i) {
1212                         func(userdata, link, i);
1213                 }
1214                 return;
1215         }
1216
1217         task_scheduler = BLI_task_scheduler_get();
1218         task_pool = BLI_task_pool_create(task_scheduler, &state);
1219         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1220
1221         /* The idea here is to prevent creating task for each of the loop iterations
1222          * and instead have tasks which are evenly distributed across CPU cores and
1223          * pull next iter to be crunched using the queue.
1224          */
1225         num_tasks = num_threads * 2;
1226
1227         state.index = 0;
1228         state.link = listbase->first;
1229         state.userdata = userdata;
1230         state.func = func;
1231         state.chunk_size = 32;
1232         BLI_spin_init(&state.lock);
1233
1234         for (i = 0; i < num_tasks; i++) {
1235                 /* Use this pool's pre-allocated tasks. */
1236                 BLI_task_pool_push_from_thread(task_pool,
1237                                                parallel_listbase_func,
1238                                                NULL, false,
1239                                                TASK_PRIORITY_HIGH,
1240                                                task_pool->thread_id);
1241         }
1242
1243         BLI_task_pool_work_and_wait(task_pool);
1244         BLI_task_pool_free(task_pool);
1245
1246         BLI_spin_end(&state.lock);
1247 }