Merge branch 'master' into blender2.8
[blender.git] / source / blender / blenlib / intern / task.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/blenlib/intern/task.c
22  *  \ingroup bli
23  *
24  * A generic task system which can be used for any task based subsystem.
25  */
26
27 #include <stdlib.h>
28
29 #include "MEM_guardedalloc.h"
30
31 #include "DNA_listBase.h"
32
33 #include "BLI_listbase.h"
34 #include "BLI_math.h"
35 #include "BLI_task.h"
36 #include "BLI_threads.h"
37
38 #include "atomic_ops.h"
39
40 /* Define this to enable some detailed statistic print. */
41 #undef DEBUG_STATS
42
43 /* Types */
44
45 /* Number of per-thread pre-allocated tasks.
46  *
47  * For more details see description of TaskMemPool.
48  */
49 #define MEMPOOL_SIZE 256
50
51 /* Number of tasks which are pushed directly to local thread queue.
52  *
53  * This allows thread to fetch next task without locking the whole queue.
54  */
55 #define LOCALQUEUE_SIZE 1
56
57 #ifndef NDEBUG
58 #  define ASSERT_THREAD_ID(scheduler, thread_id)                              \
59         do {                                                                      \
60                 if (!BLI_thread_is_main()) {                                          \
61                         TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);  \
62                         if (thread == NULL) {                                             \
63                                 BLI_assert(thread_id == 0);                                   \
64                         }                                                                 \
65                         else {                                                            \
66                                 BLI_assert(thread_id == thread->id);                          \
67                         }                                                                 \
68                 }                                                                     \
69                 else {                                                                \
70                         BLI_assert(thread_id == 0);                                       \
71                 }                                                                     \
72         } while (false)
73 #else
74 #  define ASSERT_THREAD_ID(scheduler, thread_id)
75 #endif
76
77 typedef struct Task {
78         struct Task *next, *prev;
79
80         TaskRunFunction run;
81         void *taskdata;
82         bool free_taskdata;
83         TaskFreeFunction freedata;
84         TaskPool *pool;
85 } Task;
86
87 /* This is a per-thread storage of pre-allocated tasks.
88  *
89  * The idea behind this is simple: reduce amount of malloc() calls when pushing
90  * new task to the pool. This is done by keeping memory from the tasks which
91  * were finished already, so instead of freeing that memory we put it to the
92  * pool for the later re-use.
93  *
94  * The tricky part here is to avoid any inter-thread synchronization, hence no
95  * lock must exist around this pool. The pool will become an owner of the pointer
96  * from freed task, and only corresponding thread will be able to use this pool
97  * (no memory stealing and such).
98  *
99  * This leads to the following use of the pool:
100  *
101  * - task_push() should provide proper thread ID from which the task is being
102  *   pushed from.
103  *
104  * - Task allocation function which check corresponding memory pool and if there
105  *   is any memory in there it'll mark memory as re-used, remove it from the pool
106  *   and use that memory for the new task.
107  *
108  *   At this moment task queue owns the memory.
109  *
110  * - When task is done and task_free() is called the memory will be put to the
111  *  pool which corresponds to a thread which handled the task.
112  */
113 typedef struct TaskMemPool {
114         /* Number of pre-allocated tasks in the pool. */
115         int num_tasks;
116         /* Pre-allocated task memory pointers. */
117         Task *tasks[MEMPOOL_SIZE];
118 } TaskMemPool;
119
120 #ifdef DEBUG_STATS
121 typedef struct TaskMemPoolStats {
122         /* Number of allocations. */
123         int num_alloc;
124         /* Number of avoided allocations (pointer was re-used from the pool). */
125         int num_reuse;
126         /* Number of discarded memory due to pool saturation, */
127         int num_discard;
128 } TaskMemPoolStats;
129 #endif
130
131 typedef struct TaskThreadLocalStorage {
132         TaskMemPool task_mempool;
133         int num_local_queue;
134         Task *local_queue[LOCALQUEUE_SIZE];
135 } TaskThreadLocalStorage;
136
137 struct TaskPool {
138         TaskScheduler *scheduler;
139
140         volatile size_t num;
141         ThreadMutex num_mutex;
142         ThreadCondition num_cond;
143
144         void *userdata;
145         ThreadMutex user_mutex;
146
147         volatile bool do_cancel;
148         volatile bool do_work;
149
150         volatile bool is_suspended;
151         ListBase suspended_queue;
152         size_t num_suspended;
153
154         /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
155          * has to use its special background fallback thread in case we are in
156          * single-threaded situation.
157          */
158         bool run_in_background;
159
160         /* This is a task scheduler's ID of a thread at which pool was constructed.
161          * It will be used to access task TLS.
162          */
163         int thread_id;
164
165         /* For the pools which are created from non-main thread which is not a
166          * scheduler worker thread we can't re-use any of scheduler's threads TLS
167          * and have to use our own one.
168          */
169         bool use_local_tls;
170         TaskThreadLocalStorage local_tls;
171 #ifndef NDEBUG
172         pthread_t creator_thread_id;
173 #endif
174
175 #ifdef DEBUG_STATS
176         TaskMemPoolStats *mempool_stats;
177 #endif
178 };
179
180 struct TaskScheduler {
181         pthread_t *threads;
182         struct TaskThread *task_threads;
183         int num_threads;
184         bool background_thread_only;
185
186         ListBase queue;
187         ThreadMutex queue_mutex;
188         ThreadCondition queue_cond;
189
190         volatile bool do_exit;
191
192         /* NOTE: In pthread's TLS we store the whole TaskThread structure. */
193         pthread_key_t tls_id_key;
194 };
195
196 typedef struct TaskThread {
197         TaskScheduler *scheduler;
198         int id;
199         TaskThreadLocalStorage tls;
200 } TaskThread;
201
202 /* Helper */
203 BLI_INLINE void task_data_free(Task *task, const int thread_id)
204 {
205         if (task->free_taskdata) {
206                 if (task->freedata) {
207                         task->freedata(task->pool, task->taskdata, thread_id);
208                 }
209                 else {
210                         MEM_freeN(task->taskdata);
211                 }
212         }
213 }
214
215 BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
216 {
217         memset(tls, 0, sizeof(TaskThreadLocalStorage));
218 }
219
220 BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool,
221                                                 const int thread_id)
222 {
223         TaskScheduler *scheduler = pool->scheduler;
224         BLI_assert(thread_id >= 0);
225         BLI_assert(thread_id <= scheduler->num_threads);
226         if (pool->use_local_tls && thread_id == 0) {
227                 BLI_assert(pool->thread_id == 0);
228                 BLI_assert(!BLI_thread_is_main());
229                 BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id));
230                 return &pool->local_tls;
231         }
232         if (thread_id == 0) {
233                 BLI_assert(BLI_thread_is_main());
234                 return &scheduler->task_threads[pool->thread_id].tls;
235         }
236         return &scheduler->task_threads[thread_id].tls;
237 }
238
239 BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
240 {
241         TaskMemPool *task_mempool = &tls->task_mempool;
242         for (int i = 0; i < task_mempool->num_tasks; ++i) {
243                 MEM_freeN(task_mempool->tasks[i]);
244         }
245 }
246
247 static Task *task_alloc(TaskPool *pool, const int thread_id)
248 {
249         BLI_assert(thread_id <= pool->scheduler->num_threads);
250         if (thread_id != -1) {
251                 BLI_assert(thread_id >= 0);
252                 BLI_assert(thread_id <= pool->scheduler->num_threads);
253                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
254                 TaskMemPool *task_mempool = &tls->task_mempool;
255                 /* Try to re-use task memory from a thread local storage. */
256                 if (task_mempool->num_tasks > 0) {
257                         --task_mempool->num_tasks;
258                         /* Success! We've just avoided task allocation. */
259 #ifdef DEBUG_STATS
260                         pool->mempool_stats[thread_id].num_reuse++;
261 #endif
262                         return task_mempool->tasks[task_mempool->num_tasks];
263                 }
264                 /* We are doomed to allocate new task data. */
265 #ifdef DEBUG_STATS
266                 pool->mempool_stats[thread_id].num_alloc++;
267 #endif
268         }
269         return MEM_mallocN(sizeof(Task), "New task");
270 }
271
272 static void task_free(TaskPool *pool, Task *task, const int thread_id)
273 {
274         task_data_free(task, thread_id);
275         BLI_assert(thread_id >= 0);
276         BLI_assert(thread_id <= pool->scheduler->num_threads);
277         if (thread_id == 0) {
278                 BLI_assert(pool->use_local_tls || BLI_thread_is_main());
279         }
280         TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
281         TaskMemPool *task_mempool = &tls->task_mempool;
282         if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
283                 /* Successfully allowed the task to be re-used later. */
284                 task_mempool->tasks[task_mempool->num_tasks] = task;
285                 ++task_mempool->num_tasks;
286         }
287         else {
288                 /* Local storage saturated, no other way than just discard
289                  * the memory.
290                  *
291                  * TODO(sergey): We can perhaps store such pointer in a global
292                  * scheduler pool, maybe it'll be faster than discarding and
293                  * allocating again.
294                  */
295                 MEM_freeN(task);
296 #ifdef DEBUG_STATS
297                 pool->mempool_stats[thread_id].num_discard++;
298 #endif
299         }
300 }
301
302 /* Task Scheduler */
303
304 static void task_pool_num_decrease(TaskPool *pool, size_t done)
305 {
306         BLI_mutex_lock(&pool->num_mutex);
307
308         BLI_assert(pool->num >= done);
309
310         pool->num -= done;
311
312         if (pool->num == 0)
313                 BLI_condition_notify_all(&pool->num_cond);
314
315         BLI_mutex_unlock(&pool->num_mutex);
316 }
317
318 static void task_pool_num_increase(TaskPool *pool, size_t new)
319 {
320         BLI_mutex_lock(&pool->num_mutex);
321
322         pool->num += new;
323         BLI_condition_notify_all(&pool->num_cond);
324
325         BLI_mutex_unlock(&pool->num_mutex);
326 }
327
328 static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
329 {
330         bool found_task = false;
331         BLI_mutex_lock(&scheduler->queue_mutex);
332
333         while (!scheduler->queue.first && !scheduler->do_exit)
334                 BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
335
336         do {
337                 Task *current_task;
338
339                 /* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after
340                  * our worker thread has been woken up from a condition_wait(), which only happens after a new task was
341                  * added to the queue), but it is wrong.
342                  * Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some
343                  * race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread
344                  * reaches this point...
345                  * See http://stackoverflow.com/questions/8594591
346                  *
347                  * So we only abort here if do_exit is set.
348                  */
349                 if (scheduler->do_exit) {
350                         BLI_mutex_unlock(&scheduler->queue_mutex);
351                         return false;
352                 }
353
354                 for (current_task = scheduler->queue.first;
355                      current_task != NULL;
356                      current_task = current_task->next)
357                 {
358                         TaskPool *pool = current_task->pool;
359
360                         if (scheduler->background_thread_only && !pool->run_in_background) {
361                                 continue;
362                         }
363
364                         *task = current_task;
365                         found_task = true;
366                         BLI_remlink(&scheduler->queue, *task);
367                         break;
368                 }
369                 if (!found_task)
370                         BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
371         } while (!found_task);
372
373         BLI_mutex_unlock(&scheduler->queue_mutex);
374
375         return true;
376 }
377
378 BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls,
379                                    const int thread_id)
380 {
381         while (tls->num_local_queue > 0) {
382                 /* We pop task from queue before handling it so handler of the task can
383                  * push next job to the local queue.
384                  */
385                 tls->num_local_queue--;
386                 Task *local_task = tls->local_queue[tls->num_local_queue];
387                 /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
388                  * pool tasks.
389                  */
390                 TaskPool *local_pool = local_task->pool;
391                 local_task->run(local_pool, local_task->taskdata, thread_id);
392                 task_free(local_pool, local_task, thread_id);
393         }
394 }
395
396 static void *task_scheduler_thread_run(void *thread_p)
397 {
398         TaskThread *thread = (TaskThread *) thread_p;
399         TaskThreadLocalStorage *tls = &thread->tls;
400         TaskScheduler *scheduler = thread->scheduler;
401         int thread_id = thread->id;
402         Task *task;
403
404         pthread_setspecific(scheduler->tls_id_key, thread);
405
406         /* keep popping off tasks */
407         while (task_scheduler_thread_wait_pop(scheduler, &task)) {
408                 TaskPool *pool = task->pool;
409
410                 /* run task */
411                 task->run(pool, task->taskdata, thread_id);
412
413                 /* delete task */
414                 task_free(pool, task, thread_id);
415
416                 /* Handle all tasks from local queue. */
417                 handle_local_queue(tls, thread_id);
418
419                 /* notify pool task was done */
420                 task_pool_num_decrease(pool, 1);
421         }
422
423         return NULL;
424 }
425
426 TaskScheduler *BLI_task_scheduler_create(int num_threads)
427 {
428         TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
429
430         /* multiple places can use this task scheduler, sharing the same
431          * threads, so we keep track of the number of users. */
432         scheduler->do_exit = false;
433
434         BLI_listbase_clear(&scheduler->queue);
435         BLI_mutex_init(&scheduler->queue_mutex);
436         BLI_condition_init(&scheduler->queue_cond);
437
438         if (num_threads == 0) {
439                 /* automatic number of threads will be main thread + num cores */
440                 num_threads = BLI_system_thread_count();
441         }
442
443         /* main thread will also work, so we count it too */
444         num_threads -= 1;
445
446         /* Add background-only thread if needed. */
447         if (num_threads == 0) {
448                 scheduler->background_thread_only = true;
449                 num_threads = 1;
450         }
451
452         scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
453                                               "TaskScheduler task threads");
454
455         /* Initialize TLS for main thread. */
456         initialize_task_tls(&scheduler->task_threads[0].tls);
457
458         pthread_key_create(&scheduler->tls_id_key, NULL);
459
460         /* launch threads that will be waiting for work */
461         if (num_threads > 0) {
462                 int i;
463
464                 scheduler->num_threads = num_threads;
465                 scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads");
466
467                 for (i = 0; i < num_threads; i++) {
468                         TaskThread *thread = &scheduler->task_threads[i + 1];
469                         thread->scheduler = scheduler;
470                         thread->id = i + 1;
471                         initialize_task_tls(&thread->tls);
472
473                         if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
474                                 fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
475                         }
476                 }
477         }
478
479         return scheduler;
480 }
481
482 void BLI_task_scheduler_free(TaskScheduler *scheduler)
483 {
484         Task *task;
485
486         /* stop all waiting threads */
487         BLI_mutex_lock(&scheduler->queue_mutex);
488         scheduler->do_exit = true;
489         BLI_condition_notify_all(&scheduler->queue_cond);
490         BLI_mutex_unlock(&scheduler->queue_mutex);
491
492         pthread_key_delete(scheduler->tls_id_key);
493
494         /* delete threads */
495         if (scheduler->threads) {
496                 int i;
497
498                 for (i = 0; i < scheduler->num_threads; i++) {
499                         if (pthread_join(scheduler->threads[i], NULL) != 0)
500                                 fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
501                 }
502
503                 MEM_freeN(scheduler->threads);
504         }
505
506         /* Delete task thread data */
507         if (scheduler->task_threads) {
508                 for (int i = 0; i < scheduler->num_threads + 1; ++i) {
509                         TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
510                         free_task_tls(tls);
511                 }
512
513                 MEM_freeN(scheduler->task_threads);
514         }
515
516         /* delete leftover tasks */
517         for (task = scheduler->queue.first; task; task = task->next) {
518                 task_data_free(task, 0);
519         }
520         BLI_freelistN(&scheduler->queue);
521
522         /* delete mutex/condition */
523         BLI_mutex_end(&scheduler->queue_mutex);
524         BLI_condition_end(&scheduler->queue_cond);
525
526         MEM_freeN(scheduler);
527 }
528
529 int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
530 {
531         return scheduler->num_threads + 1;
532 }
533
534 static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
535 {
536         task_pool_num_increase(task->pool, 1);
537
538         /* add task to queue */
539         BLI_mutex_lock(&scheduler->queue_mutex);
540
541         if (priority == TASK_PRIORITY_HIGH)
542                 BLI_addhead(&scheduler->queue, task);
543         else
544                 BLI_addtail(&scheduler->queue, task);
545
546         BLI_condition_notify_one(&scheduler->queue_cond);
547         BLI_mutex_unlock(&scheduler->queue_mutex);
548 }
549
550 static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
551 {
552         Task *task, *nexttask;
553         size_t done = 0;
554
555         BLI_mutex_lock(&scheduler->queue_mutex);
556
557         /* free all tasks from this pool from the queue */
558         for (task = scheduler->queue.first; task; task = nexttask) {
559                 nexttask = task->next;
560
561                 if (task->pool == pool) {
562                         task_data_free(task, pool->thread_id);
563                         BLI_freelinkN(&scheduler->queue, task);
564
565                         done++;
566                 }
567         }
568
569         BLI_mutex_unlock(&scheduler->queue_mutex);
570
571         /* notify done */
572         task_pool_num_decrease(pool, done);
573 }
574
575 /* Task Pool */
576
577 static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
578                                      void *userdata,
579                                      const bool is_background,
580                                      const bool is_suspended)
581 {
582         TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool");
583
584 #ifndef NDEBUG
585         /* Assert we do not try to create a background pool from some parent task - those only work OK from main thread. */
586         if (is_background) {
587                 const pthread_t thread_id = pthread_self();
588                 int i = scheduler->num_threads;
589
590                 while (i--) {
591                         BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
592                 }
593         }
594 #endif
595
596         pool->scheduler = scheduler;
597         pool->num = 0;
598         pool->do_cancel = false;
599         pool->do_work = false;
600         pool->is_suspended = is_suspended;
601         pool->num_suspended = 0;
602         pool->suspended_queue.first = pool->suspended_queue.last = NULL;
603         pool->run_in_background = is_background;
604         pool->use_local_tls = false;
605
606         BLI_mutex_init(&pool->num_mutex);
607         BLI_condition_init(&pool->num_cond);
608
609         pool->userdata = userdata;
610         BLI_mutex_init(&pool->user_mutex);
611
612         if (BLI_thread_is_main()) {
613                 pool->thread_id = 0;
614         }
615         else {
616                 TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);
617                 if (thread == NULL) {
618                         /* NOTE: Task pool is created from non-main thread which is not
619                          * managed by the task scheduler. We identify ourselves as thread ID
620                          * 0 but we do not use scheduler's TLS storage and use our own
621                          * instead to avoid any possible threading conflicts.
622                          */
623                         pool->thread_id = 0;
624                         pool->use_local_tls = true;
625 #ifndef NDEBUG
626                         pool->creator_thread_id = pthread_self();
627 #endif
628                         initialize_task_tls(&pool->local_tls);
629                 }
630                 else {
631                         pool->thread_id = thread->id;
632                 }
633         }
634
635 #ifdef DEBUG_STATS
636         pool->mempool_stats =
637                 MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1),
638                             "per-taskpool mempool stats");
639 #endif
640
641         /* Ensure malloc will go fine from threads,
642          *
643          * This is needed because we could be in main thread here
644          * and malloc could be non-threda safe at this point because
645          * no other jobs are running.
646          */
647         BLI_begin_threaded_malloc();
648
649         return pool;
650 }
651
652 /**
653  * Create a normal task pool.
654  * This means that in single-threaded context, it will not be executed at all until you call
655  * \a BLI_task_pool_work_and_wait() on it.
656  */
657 TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
658 {
659         return task_pool_create_ex(scheduler, userdata, false, false);
660 }
661
662 /**
663  * Create a background task pool.
664  * In multi-threaded context, there is no differences with \a BLI_task_pool_create(), but in single-threaded case
665  * it is ensured to have at least one worker thread to run on (i.e. you do not have to call
666  * \a BLI_task_pool_work_and_wait() on it to be sure it will be processed).
667  *
668  * \note Background pools are non-recursive (that is, you should not create other background pools in tasks assigned
669  *       to a background pool, they could end never being executed, since the 'fallback' background thread is already
670  *       busy with parent task in single-threaded context).
671  */
672 TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata)
673 {
674         return task_pool_create_ex(scheduler, userdata, true, false);
675 }
676
677 /**
678  * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
679  * for until BLI_task_pool_work_and_wait() is called. This helps reducing therading
680  * overhead when pushing huge amount of small initial tasks from the main thread.
681  */
682 TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata)
683 {
684         return task_pool_create_ex(scheduler, userdata, false, true);
685 }
686
687 void BLI_task_pool_free(TaskPool *pool)
688 {
689         BLI_task_pool_cancel(pool);
690
691         BLI_mutex_end(&pool->num_mutex);
692         BLI_condition_end(&pool->num_cond);
693
694         BLI_mutex_end(&pool->user_mutex);
695
696 #ifdef DEBUG_STATS
697         printf("Thread ID    Allocated   Reused   Discarded\n");
698         for (int i = 0; i < pool->scheduler->num_threads + 1; ++i) {
699                 printf("%02d           %05d       %05d    %05d\n",
700                        i,
701                        pool->mempool_stats[i].num_alloc,
702                        pool->mempool_stats[i].num_reuse,
703                        pool->mempool_stats[i].num_discard);
704         }
705         MEM_freeN(pool->mempool_stats);
706 #endif
707
708         if (pool->use_local_tls) {
709                 free_task_tls(&pool->local_tls);
710         }
711
712         MEM_freeN(pool);
713
714         BLI_end_threaded_malloc();
715 }
716
717 static void task_pool_push(
718         TaskPool *pool, TaskRunFunction run, void *taskdata,
719         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority,
720         int thread_id)
721 {
722         Task *task = task_alloc(pool, thread_id);
723
724         task->run = run;
725         task->taskdata = taskdata;
726         task->free_taskdata = free_taskdata;
727         task->freedata = freedata;
728         task->pool = pool;
729
730         if (pool->is_suspended) {
731                 BLI_addhead(&pool->suspended_queue, task);
732                 atomic_fetch_and_add_z(&pool->num_suspended, 1);
733                 return;
734         }
735
736         if (thread_id != -1 &&
737             (thread_id != pool->thread_id || pool->do_work))
738         {
739                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
740
741                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
742                 if (tls->num_local_queue < LOCALQUEUE_SIZE) {
743                         tls->local_queue[tls->num_local_queue] = task;
744                         tls->num_local_queue++;
745                         return;
746                 }
747         }
748
749         task_scheduler_push(pool->scheduler, task, priority);
750 }
751
752 void BLI_task_pool_push_ex(
753         TaskPool *pool, TaskRunFunction run, void *taskdata,
754         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority)
755 {
756         task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1);
757 }
758
759 void BLI_task_pool_push(
760         TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
761 {
762         BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
763 }
764
765 void BLI_task_pool_push_from_thread(TaskPool *pool, TaskRunFunction run,
766         void *taskdata, bool free_taskdata, TaskPriority priority, int thread_id)
767 {
768         task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id);
769 }
770
771 void BLI_task_pool_work_and_wait(TaskPool *pool)
772 {
773         TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
774         TaskScheduler *scheduler = pool->scheduler;
775
776         if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
777                 if (pool->num_suspended) {
778                         task_pool_num_increase(pool, pool->num_suspended);
779                         BLI_mutex_lock(&scheduler->queue_mutex);
780
781                         BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
782
783                         BLI_condition_notify_all(&scheduler->queue_cond);
784                         BLI_mutex_unlock(&scheduler->queue_mutex);
785                 }
786         }
787
788         pool->do_work = true;
789
790         ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
791
792         BLI_mutex_lock(&pool->num_mutex);
793
794         while (pool->num != 0) {
795                 Task *task, *work_task = NULL;
796                 bool found_task = false;
797
798                 BLI_mutex_unlock(&pool->num_mutex);
799
800                 BLI_mutex_lock(&scheduler->queue_mutex);
801
802                 /* find task from this pool. if we get a task from another pool,
803                  * we can get into deadlock */
804
805                 for (task = scheduler->queue.first; task; task = task->next) {
806                         if (task->pool == pool) {
807                                 work_task = task;
808                                 found_task = true;
809                                 BLI_remlink(&scheduler->queue, task);
810                                 break;
811                         }
812                 }
813
814                 BLI_mutex_unlock(&scheduler->queue_mutex);
815
816                 /* if found task, do it, otherwise wait until other tasks are done */
817                 if (found_task) {
818                         /* run task */
819                         work_task->run(pool, work_task->taskdata, pool->thread_id);
820
821                         /* delete task */
822                         task_free(pool, task, pool->thread_id);
823
824                         /* Handle all tasks from local queue. */
825                         handle_local_queue(tls, pool->thread_id);
826
827                         /* notify pool task was done */
828                         task_pool_num_decrease(pool, 1);
829                 }
830
831                 BLI_mutex_lock(&pool->num_mutex);
832                 if (pool->num == 0)
833                         break;
834
835                 if (!found_task)
836                         BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
837         }
838
839         BLI_mutex_unlock(&pool->num_mutex);
840
841         handle_local_queue(tls, pool->thread_id);
842 }
843
844 void BLI_task_pool_cancel(TaskPool *pool)
845 {
846         pool->do_cancel = true;
847
848         task_scheduler_clear(pool->scheduler, pool);
849
850         /* wait until all entries are cleared */
851         BLI_mutex_lock(&pool->num_mutex);
852         while (pool->num)
853                 BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
854         BLI_mutex_unlock(&pool->num_mutex);
855
856         pool->do_cancel = false;
857 }
858
859 bool BLI_task_pool_canceled(TaskPool *pool)
860 {
861         return pool->do_cancel;
862 }
863
864 void *BLI_task_pool_userdata(TaskPool *pool)
865 {
866         return pool->userdata;
867 }
868
869 ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
870 {
871         return &pool->user_mutex;
872 }
873
874 /* Parallel range routines */
875
876 /**
877  *
878  * Main functions:
879  * - #BLI_task_parallel_range
880  * - #BLI_task_parallel_listbase (#ListBase - double linked list)
881  *
882  * TODO:
883  * - #BLI_task_parallel_foreach_link (#Link - single linked list)
884  * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
885  * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
886  *
887  */
888
889 /* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
890 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
891 #define MALLOCA_FREE(_mem, _size) if (((_mem) != NULL) && ((_size) > 8192)) MEM_freeN((_mem))
892
893 typedef struct ParallelRangeState {
894         int start, stop;
895         void *userdata;
896
897         TaskParallelRangeFunc func;
898         TaskParallelRangeFuncEx func_ex;
899
900         int iter;
901         int chunk_size;
902 } ParallelRangeState;
903
904 BLI_INLINE bool parallel_range_next_iter_get(
905         ParallelRangeState * __restrict state,
906         int * __restrict iter, int * __restrict count)
907 {
908         uint32_t uval = atomic_fetch_and_add_uint32((uint32_t *)(&state->iter), state->chunk_size);
909         int previter = *(int32_t *)&uval;
910
911         *iter = previter;
912         *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
913
914         return (previter < state->stop);
915 }
916
917 static void parallel_range_func(
918         TaskPool * __restrict pool,
919         void *userdata_chunk,
920         int threadid)
921 {
922         ParallelRangeState * __restrict state = BLI_task_pool_userdata(pool);
923         int iter, count;
924
925         while (parallel_range_next_iter_get(state, &iter, &count)) {
926                 int i;
927
928                 if (state->func_ex) {
929                         for (i = 0; i < count; ++i) {
930                                 state->func_ex(state->userdata, userdata_chunk, iter + i, threadid);
931                         }
932                 }
933                 else {
934                         for (i = 0; i < count; ++i) {
935                                 state->func(state->userdata, iter + i);
936                         }
937                 }
938         }
939 }
940
941 /**
942  * This function allows to parallelized for loops in a similar way to OpenMP's 'parallel for' statement.
943  *
944  * See public API doc for description of parameters.
945  */
946 static void task_parallel_range_ex(
947         int start, int stop,
948         void *userdata,
949         void *userdata_chunk,
950         const size_t userdata_chunk_size,
951         TaskParallelRangeFunc func,
952         TaskParallelRangeFuncEx func_ex,
953         TaskParallelRangeFuncFinalize func_finalize,
954         const bool use_threading,
955         const bool use_dynamic_scheduling)
956 {
957         TaskScheduler *task_scheduler;
958         TaskPool *task_pool;
959         ParallelRangeState state;
960         int i, num_threads, num_tasks;
961
962         void *userdata_chunk_local = NULL;
963         void *userdata_chunk_array = NULL;
964         const bool use_userdata_chunk = (func_ex != NULL) && (userdata_chunk_size != 0) && (userdata_chunk != NULL);
965
966         if (start == stop) {
967                 return;
968         }
969
970         BLI_assert(start < stop);
971         if (userdata_chunk_size != 0) {
972                 BLI_assert(func_ex != NULL && func == NULL);
973                 BLI_assert(userdata_chunk != NULL);
974         }
975
976         /* If it's not enough data to be crunched, don't bother with tasks at all,
977          * do everything from the main thread.
978          */
979         if (!use_threading) {
980                 if (func_ex) {
981                         if (use_userdata_chunk) {
982                                 userdata_chunk_local = MALLOCA(userdata_chunk_size);
983                                 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
984                         }
985
986                         for (i = start; i < stop; ++i) {
987                                 func_ex(userdata, userdata_chunk_local, i, 0);
988                         }
989
990                         if (func_finalize) {
991                                 func_finalize(userdata, userdata_chunk_local);
992                         }
993
994                         MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
995                 }
996                 else {
997                         for (i = start; i < stop; ++i) {
998                                 func(userdata, i);
999                         }
1000                 }
1001
1002                 return;
1003         }
1004
1005         task_scheduler = BLI_task_scheduler_get();
1006         task_pool = BLI_task_pool_create(task_scheduler, &state);
1007         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1008
1009         /* The idea here is to prevent creating task for each of the loop iterations
1010          * and instead have tasks which are evenly distributed across CPU cores and
1011          * pull next iter to be crunched using the queue.
1012          */
1013         num_tasks = num_threads * 2;
1014
1015         state.start = start;
1016         state.stop = stop;
1017         state.userdata = userdata;
1018         state.func = func;
1019         state.func_ex = func_ex;
1020         state.iter = start;
1021         if (use_dynamic_scheduling) {
1022                 state.chunk_size = 32;
1023         }
1024         else {
1025                 state.chunk_size = max_ii(1, (stop - start) / (num_tasks));
1026         }
1027
1028         num_tasks = min_ii(num_tasks, (stop - start) / state.chunk_size);
1029         atomic_fetch_and_add_uint32((uint32_t *)(&state.iter), 0);
1030
1031         if (use_userdata_chunk) {
1032         userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
1033         }
1034
1035         for (i = 0; i < num_tasks; i++) {
1036                 if (use_userdata_chunk) {
1037                         userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1038                         memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1039                 }
1040                 /* Use this pool's pre-allocated tasks. */
1041                 BLI_task_pool_push_from_thread(task_pool,
1042                                                parallel_range_func,
1043                                                userdata_chunk_local, false,
1044                                                TASK_PRIORITY_HIGH,
1045                                                task_pool->thread_id);
1046         }
1047
1048         BLI_task_pool_work_and_wait(task_pool);
1049         BLI_task_pool_free(task_pool);
1050
1051         if (use_userdata_chunk) {
1052                 if (func_finalize) {
1053                         for (i = 0; i < num_tasks; i++) {
1054                                 userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1055                                 func_finalize(userdata, userdata_chunk_local);
1056                         }
1057                 }
1058                 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
1059         }
1060 }
1061
1062 /**
1063  * This function allows to parallelize for loops in a similar way to OpenMP's 'parallel for' statement.
1064  *
1065  * \param start First index to process.
1066  * \param stop Index to stop looping (excluded).
1067  * \param userdata Common userdata passed to all instances of \a func.
1068  * \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
1069  *                       (similar to OpenMP's firstprivate).
1070  * \param userdata_chunk_size Memory size of \a userdata_chunk.
1071  * \param func_ex Callback function (advanced version).
1072  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1073  *                      (allows caller to use any kind of test to switch on parallelization or not).
1074  * \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
1075  *                               otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
1076  */
1077 void BLI_task_parallel_range_ex(
1078         int start, int stop,
1079         void *userdata,
1080         void *userdata_chunk,
1081         const size_t userdata_chunk_size,
1082         TaskParallelRangeFuncEx func_ex,
1083         const bool use_threading,
1084         const bool use_dynamic_scheduling)
1085 {
1086         task_parallel_range_ex(
1087                     start, stop, userdata, userdata_chunk, userdata_chunk_size, NULL, func_ex, NULL,
1088                     use_threading, use_dynamic_scheduling);
1089 }
1090
1091 /**
1092  * A simpler version of \a BLI_task_parallel_range_ex, which does not use \a use_dynamic_scheduling,
1093  * and does not handle 'firstprivate'-like \a userdata_chunk.
1094  *
1095  * \param start First index to process.
1096  * \param stop Index to stop looping (excluded).
1097  * \param userdata Common userdata passed to all instances of \a func.
1098  * \param func Callback function (simple version).
1099  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1100  *                      (allows caller to use any kind of test to switch on parallelization or not).
1101  */
1102 void BLI_task_parallel_range(
1103         int start, int stop,
1104         void *userdata,
1105         TaskParallelRangeFunc func,
1106         const bool use_threading)
1107 {
1108         task_parallel_range_ex(start, stop, userdata, NULL, 0, func, NULL, NULL, use_threading, false);
1109 }
1110
1111 /**
1112  * This function allows to parallelize for loops in a similar way to OpenMP's 'parallel for' statement,
1113  * with an additional 'finalize' func called from calling thread once whole range have been processed.
1114  *
1115  * \param start First index to process.
1116  * \param stop Index to stop looping (excluded).
1117  * \param userdata Common userdata passed to all instances of \a func.
1118  * \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
1119  *                       (similar to OpenMP's firstprivate).
1120  * \param userdata_chunk_size Memory size of \a userdata_chunk.
1121  * \param func_ex Callback function (advanced version).
1122  * \param func_finalize Callback function, called after all workers have finished,
1123  * useful to finalize accumulative tasks.
1124  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1125  *                      (allows caller to use any kind of test to switch on parallelization or not).
1126  * \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
1127  *                               otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
1128  */
1129 void BLI_task_parallel_range_finalize(
1130         int start, int stop,
1131         void *userdata,
1132         void *userdata_chunk,
1133         const size_t userdata_chunk_size,
1134         TaskParallelRangeFuncEx func_ex,
1135         TaskParallelRangeFuncFinalize func_finalize,
1136         const bool use_threading,
1137         const bool use_dynamic_scheduling)
1138 {
1139         task_parallel_range_ex(
1140                     start, stop, userdata, userdata_chunk, userdata_chunk_size, NULL, func_ex, func_finalize,
1141                     use_threading, use_dynamic_scheduling);
1142 }
1143
1144 #undef MALLOCA
1145 #undef MALLOCA_FREE
1146
1147 typedef struct ParallelListbaseState {
1148         void *userdata;
1149         TaskParallelListbaseFunc func;
1150
1151         int chunk_size;
1152         int index;
1153         Link *link;
1154         SpinLock lock;
1155 } ParallelListState;
1156
1157 BLI_INLINE Link *parallel_listbase_next_iter_get(
1158         ParallelListState * __restrict state,
1159         int * __restrict index,
1160         int * __restrict count)
1161 {
1162         int task_count = 0;
1163         BLI_spin_lock(&state->lock);
1164         Link *result = state->link;
1165         if (LIKELY(result != NULL)) {
1166                 *index = state->index;
1167                 while (state->link != NULL && task_count < state->chunk_size) {
1168                         ++task_count;
1169                         state->link = state->link->next;
1170                 }
1171                 state->index += task_count;
1172         }
1173         BLI_spin_unlock(&state->lock);
1174         *count = task_count;
1175         return result;
1176 }
1177
1178 static void parallel_listbase_func(
1179         TaskPool * __restrict pool,
1180         void *UNUSED(taskdata),
1181         int UNUSED(threadid))
1182 {
1183         ParallelListState * __restrict state = BLI_task_pool_userdata(pool);
1184         Link *link;
1185         int index, count;
1186
1187         while ((link = parallel_listbase_next_iter_get(state, &index, &count)) != NULL) {
1188                 for (int i = 0; i < count; ++i) {
1189                         state->func(state->userdata, link, index + i);
1190                         link = link->next;
1191                 }
1192         }
1193 }
1194
1195 /**
1196  * This function allows to parallelize for loops over ListBase items.
1197  *
1198  * \param listbase The double linked list to loop over.
1199  * \param userdata Common userdata passed to all instances of \a func.
1200  * \param func Callback function.
1201  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1202  *                      (allows caller to use any kind of test to switch on parallelization or not).
1203  *
1204  * \note There is no static scheduling here, since it would need another full loop over items to count them...
1205  */
1206 void BLI_task_parallel_listbase(
1207         struct ListBase *listbase,
1208         void *userdata,
1209         TaskParallelListbaseFunc func,
1210         const bool use_threading)
1211 {
1212         TaskScheduler *task_scheduler;
1213         TaskPool *task_pool;
1214         ParallelListState state;
1215         int i, num_threads, num_tasks;
1216
1217         if (BLI_listbase_is_empty(listbase)) {
1218                 return;
1219         }
1220
1221         if (!use_threading) {
1222                 i = 0;
1223                 for (Link *link = listbase->first; link != NULL; link = link->next, ++i) {
1224                         func(userdata, link, i);
1225                 }
1226                 return;
1227         }
1228
1229         task_scheduler = BLI_task_scheduler_get();
1230         task_pool = BLI_task_pool_create(task_scheduler, &state);
1231         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1232
1233         /* The idea here is to prevent creating task for each of the loop iterations
1234          * and instead have tasks which are evenly distributed across CPU cores and
1235          * pull next iter to be crunched using the queue.
1236          */
1237         num_tasks = num_threads * 2;
1238
1239         state.index = 0;
1240         state.link = listbase->first;
1241         state.userdata = userdata;
1242         state.func = func;
1243         state.chunk_size = 32;
1244         BLI_spin_init(&state.lock);
1245
1246         for (i = 0; i < num_tasks; i++) {
1247                 /* Use this pool's pre-allocated tasks. */
1248                 BLI_task_pool_push_from_thread(task_pool,
1249                                                parallel_listbase_func,
1250                                                NULL, false,
1251                                                TASK_PRIORITY_HIGH,
1252                                                task_pool->thread_id);
1253         }
1254
1255         BLI_task_pool_work_and_wait(task_pool);
1256         BLI_task_pool_free(task_pool);
1257
1258         BLI_spin_end(&state.lock);
1259 }