BLI_math_rotation: properly name the quaternion power function.
[blender.git] / source / blender / blenlib / intern / task.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/blenlib/intern/task.c
22  *  \ingroup bli
23  *
24  * A generic task system which can be used for any task based subsystem.
25  */
26
27 #include <stdlib.h>
28
29 #include "MEM_guardedalloc.h"
30
31 #include "DNA_listBase.h"
32
33 #include "BLI_listbase.h"
34 #include "BLI_math.h"
35 #include "BLI_mempool.h"
36 #include "BLI_task.h"
37 #include "BLI_threads.h"
38
39 #include "atomic_ops.h"
40
41 /* Define this to enable some detailed statistic print. */
42 #undef DEBUG_STATS
43
44 /* Types */
45
46 /* Number of per-thread pre-allocated tasks.
47  *
48  * For more details see description of TaskMemPool.
49  */
50 #define MEMPOOL_SIZE 256
51
52 /* Number of tasks which are pushed directly to local thread queue.
53  *
54  * This allows thread to fetch next task without locking the whole queue.
55  */
56 #define LOCAL_QUEUE_SIZE 1
57
58 /* Number of tasks which are allowed to be scheduled in a delayed manner.
59  *
60  * This allows to use less locks per graph node children schedule. More details
61  * could be found at TaskThreadLocalStorage::do_delayed_push.
62  */
63 #define DELAYED_QUEUE_SIZE 4096
64
65 #ifndef NDEBUG
66 #  define ASSERT_THREAD_ID(scheduler, thread_id)                              \
67         do {                                                                      \
68                 if (!BLI_thread_is_main()) {                                          \
69                         TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);  \
70                         if (thread == NULL) {                                             \
71                                 BLI_assert(thread_id == 0);                                   \
72                         }                                                                 \
73                         else {                                                            \
74                                 BLI_assert(thread_id == thread->id);                          \
75                         }                                                                 \
76                 }                                                                     \
77                 else {                                                                \
78                         BLI_assert(thread_id == 0);                                       \
79                 }                                                                     \
80         } while (false)
81 #else
82 #  define ASSERT_THREAD_ID(scheduler, thread_id)
83 #endif
84
85 typedef struct Task {
86         struct Task *next, *prev;
87
88         TaskRunFunction run;
89         void *taskdata;
90         bool free_taskdata;
91         TaskFreeFunction freedata;
92         TaskPool *pool;
93 } Task;
94
95 /* This is a per-thread storage of pre-allocated tasks.
96  *
97  * The idea behind this is simple: reduce amount of malloc() calls when pushing
98  * new task to the pool. This is done by keeping memory from the tasks which
99  * were finished already, so instead of freeing that memory we put it to the
100  * pool for the later re-use.
101  *
102  * The tricky part here is to avoid any inter-thread synchronization, hence no
103  * lock must exist around this pool. The pool will become an owner of the pointer
104  * from freed task, and only corresponding thread will be able to use this pool
105  * (no memory stealing and such).
106  *
107  * This leads to the following use of the pool:
108  *
109  * - task_push() should provide proper thread ID from which the task is being
110  *   pushed from.
111  *
112  * - Task allocation function which check corresponding memory pool and if there
113  *   is any memory in there it'll mark memory as re-used, remove it from the pool
114  *   and use that memory for the new task.
115  *
116  *   At this moment task queue owns the memory.
117  *
118  * - When task is done and task_free() is called the memory will be put to the
119  *   pool which corresponds to a thread which handled the task.
120  */
121 typedef struct TaskMemPool {
122         /* Number of pre-allocated tasks in the pool. */
123         int num_tasks;
124         /* Pre-allocated task memory pointers. */
125         Task *tasks[MEMPOOL_SIZE];
126 } TaskMemPool;
127
128 #ifdef DEBUG_STATS
129 typedef struct TaskMemPoolStats {
130         /* Number of allocations. */
131         int num_alloc;
132         /* Number of avoided allocations (pointer was re-used from the pool). */
133         int num_reuse;
134         /* Number of discarded memory due to pool saturation, */
135         int num_discard;
136 } TaskMemPoolStats;
137 #endif
138
139 typedef struct TaskThreadLocalStorage {
140         /* Memory pool for faster task allocation.
141          * The idea is to re-use memory of finished/discarded tasks by this thread.
142          */
143         TaskMemPool task_mempool;
144
145         /* Local queue keeps thread alive by keeping small amount of tasks ready
146          * to be picked up without causing global thread locks for synchronization.
147          */
148         int num_local_queue;
149         Task *local_queue[LOCAL_QUEUE_SIZE];
150
151         /* Thread can be marked for delayed tasks push. This is helpful when it's
152          * know that lots of subsequent task pushed will happen from the same thread
153          * without "interrupting" for task execution.
154          *
155          * We try to accumulate as much tasks as possible in a local queue without
156          * any locks first, and then we push all of them into a scheduler's queue
157          * from within a single mutex lock.
158          */
159         bool do_delayed_push;
160         int num_delayed_queue;
161         Task *delayed_queue[DELAYED_QUEUE_SIZE];
162 } TaskThreadLocalStorage;
163
164 struct TaskPool {
165         TaskScheduler *scheduler;
166
167         volatile size_t num;
168         ThreadMutex num_mutex;
169         ThreadCondition num_cond;
170
171         void *userdata;
172         ThreadMutex user_mutex;
173
174         volatile bool do_cancel;
175         volatile bool do_work;
176
177         volatile bool is_suspended;
178         bool start_suspended;
179         ListBase suspended_queue;
180         size_t num_suspended;
181
182         /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
183          * has to use its special background fallback thread in case we are in
184          * single-threaded situation.
185          */
186         bool run_in_background;
187
188         /* This is a task scheduler's ID of a thread at which pool was constructed.
189          * It will be used to access task TLS.
190          */
191         int thread_id;
192
193         /* For the pools which are created from non-main thread which is not a
194          * scheduler worker thread we can't re-use any of scheduler's threads TLS
195          * and have to use our own one.
196          */
197         bool use_local_tls;
198         TaskThreadLocalStorage local_tls;
199 #ifndef NDEBUG
200         pthread_t creator_thread_id;
201 #endif
202
203 #ifdef DEBUG_STATS
204         TaskMemPoolStats *mempool_stats;
205 #endif
206 };
207
208 struct TaskScheduler {
209         pthread_t *threads;
210         struct TaskThread *task_threads;
211         int num_threads;
212         bool background_thread_only;
213
214         ListBase queue;
215         ThreadMutex queue_mutex;
216         ThreadCondition queue_cond;
217
218         volatile bool do_exit;
219
220         /* NOTE: In pthread's TLS we store the whole TaskThread structure. */
221         pthread_key_t tls_id_key;
222 };
223
224 typedef struct TaskThread {
225         TaskScheduler *scheduler;
226         int id;
227         TaskThreadLocalStorage tls;
228 } TaskThread;
229
230 /* Helper */
231 BLI_INLINE void task_data_free(Task *task, const int thread_id)
232 {
233         if (task->free_taskdata) {
234                 if (task->freedata) {
235                         task->freedata(task->pool, task->taskdata, thread_id);
236                 }
237                 else {
238                         MEM_freeN(task->taskdata);
239                 }
240         }
241 }
242
243 BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
244 {
245         memset(tls, 0, sizeof(TaskThreadLocalStorage));
246 }
247
248 BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool,
249                                                 const int thread_id)
250 {
251         TaskScheduler *scheduler = pool->scheduler;
252         BLI_assert(thread_id >= 0);
253         BLI_assert(thread_id <= scheduler->num_threads);
254         if (pool->use_local_tls && thread_id == 0) {
255                 BLI_assert(pool->thread_id == 0);
256                 BLI_assert(!BLI_thread_is_main());
257                 BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id));
258                 return &pool->local_tls;
259         }
260         if (thread_id == 0) {
261                 BLI_assert(BLI_thread_is_main());
262                 return &scheduler->task_threads[pool->thread_id].tls;
263         }
264         return &scheduler->task_threads[thread_id].tls;
265 }
266
267 BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
268 {
269         TaskMemPool *task_mempool = &tls->task_mempool;
270         for (int i = 0; i < task_mempool->num_tasks; ++i) {
271                 MEM_freeN(task_mempool->tasks[i]);
272         }
273 }
274
275 static Task *task_alloc(TaskPool *pool, const int thread_id)
276 {
277         BLI_assert(thread_id <= pool->scheduler->num_threads);
278         if (thread_id != -1) {
279                 BLI_assert(thread_id >= 0);
280                 BLI_assert(thread_id <= pool->scheduler->num_threads);
281                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
282                 TaskMemPool *task_mempool = &tls->task_mempool;
283                 /* Try to re-use task memory from a thread local storage. */
284                 if (task_mempool->num_tasks > 0) {
285                         --task_mempool->num_tasks;
286                         /* Success! We've just avoided task allocation. */
287 #ifdef DEBUG_STATS
288                         pool->mempool_stats[thread_id].num_reuse++;
289 #endif
290                         return task_mempool->tasks[task_mempool->num_tasks];
291                 }
292                 /* We are doomed to allocate new task data. */
293 #ifdef DEBUG_STATS
294                 pool->mempool_stats[thread_id].num_alloc++;
295 #endif
296         }
297         return MEM_mallocN(sizeof(Task), "New task");
298 }
299
300 static void task_free(TaskPool *pool, Task *task, const int thread_id)
301 {
302         task_data_free(task, thread_id);
303         BLI_assert(thread_id >= 0);
304         BLI_assert(thread_id <= pool->scheduler->num_threads);
305         if (thread_id == 0) {
306                 BLI_assert(pool->use_local_tls || BLI_thread_is_main());
307         }
308         TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
309         TaskMemPool *task_mempool = &tls->task_mempool;
310         if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
311                 /* Successfully allowed the task to be re-used later. */
312                 task_mempool->tasks[task_mempool->num_tasks] = task;
313                 ++task_mempool->num_tasks;
314         }
315         else {
316                 /* Local storage saturated, no other way than just discard
317                  * the memory.
318                  *
319                  * TODO(sergey): We can perhaps store such pointer in a global
320                  * scheduler pool, maybe it'll be faster than discarding and
321                  * allocating again.
322                  */
323                 MEM_freeN(task);
324 #ifdef DEBUG_STATS
325                 pool->mempool_stats[thread_id].num_discard++;
326 #endif
327         }
328 }
329
330 /* Task Scheduler */
331
332 static void task_pool_num_decrease(TaskPool *pool, size_t done)
333 {
334         BLI_mutex_lock(&pool->num_mutex);
335
336         BLI_assert(pool->num >= done);
337
338         pool->num -= done;
339
340         if (pool->num == 0)
341                 BLI_condition_notify_all(&pool->num_cond);
342
343         BLI_mutex_unlock(&pool->num_mutex);
344 }
345
346 static void task_pool_num_increase(TaskPool *pool, size_t new)
347 {
348         BLI_mutex_lock(&pool->num_mutex);
349
350         pool->num += new;
351         BLI_condition_notify_all(&pool->num_cond);
352
353         BLI_mutex_unlock(&pool->num_mutex);
354 }
355
356 static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
357 {
358         bool found_task = false;
359         BLI_mutex_lock(&scheduler->queue_mutex);
360
361         while (!scheduler->queue.first && !scheduler->do_exit)
362                 BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
363
364         do {
365                 Task *current_task;
366
367                 /* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after
368                  * our worker thread has been woken up from a condition_wait(), which only happens after a new task was
369                  * added to the queue), but it is wrong.
370                  * Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some
371                  * race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread
372                  * reaches this point...
373                  * See http://stackoverflow.com/questions/8594591
374                  *
375                  * So we only abort here if do_exit is set.
376                  */
377                 if (scheduler->do_exit) {
378                         BLI_mutex_unlock(&scheduler->queue_mutex);
379                         return false;
380                 }
381
382                 for (current_task = scheduler->queue.first;
383                      current_task != NULL;
384                      current_task = current_task->next)
385                 {
386                         TaskPool *pool = current_task->pool;
387
388                         if (scheduler->background_thread_only && !pool->run_in_background) {
389                                 continue;
390                         }
391
392                         *task = current_task;
393                         found_task = true;
394                         BLI_remlink(&scheduler->queue, *task);
395                         break;
396                 }
397                 if (!found_task)
398                         BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
399         } while (!found_task);
400
401         BLI_mutex_unlock(&scheduler->queue_mutex);
402
403         return true;
404 }
405
406 BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls,
407                                    const int thread_id)
408 {
409         BLI_assert(!tls->do_delayed_push);
410         while (tls->num_local_queue > 0) {
411                 /* We pop task from queue before handling it so handler of the task can
412                  * push next job to the local queue.
413                  */
414                 tls->num_local_queue--;
415                 Task *local_task = tls->local_queue[tls->num_local_queue];
416                 /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
417                  * pool tasks.
418                  */
419                 TaskPool *local_pool = local_task->pool;
420                 local_task->run(local_pool, local_task->taskdata, thread_id);
421                 task_free(local_pool, local_task, thread_id);
422         }
423         BLI_assert(!tls->do_delayed_push);
424 }
425
426 static void *task_scheduler_thread_run(void *thread_p)
427 {
428         TaskThread *thread = (TaskThread *) thread_p;
429         TaskThreadLocalStorage *tls = &thread->tls;
430         TaskScheduler *scheduler = thread->scheduler;
431         int thread_id = thread->id;
432         Task *task;
433
434         pthread_setspecific(scheduler->tls_id_key, thread);
435
436         /* keep popping off tasks */
437         while (task_scheduler_thread_wait_pop(scheduler, &task)) {
438                 TaskPool *pool = task->pool;
439
440                 /* run task */
441                 BLI_assert(!tls->do_delayed_push);
442                 task->run(pool, task->taskdata, thread_id);
443                 BLI_assert(!tls->do_delayed_push);
444
445                 /* delete task */
446                 task_free(pool, task, thread_id);
447
448                 /* Handle all tasks from local queue. */
449                 handle_local_queue(tls, thread_id);
450
451                 /* notify pool task was done */
452                 task_pool_num_decrease(pool, 1);
453         }
454
455         return NULL;
456 }
457
458 TaskScheduler *BLI_task_scheduler_create(int num_threads)
459 {
460         TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
461
462         /* multiple places can use this task scheduler, sharing the same
463          * threads, so we keep track of the number of users. */
464         scheduler->do_exit = false;
465
466         BLI_listbase_clear(&scheduler->queue);
467         BLI_mutex_init(&scheduler->queue_mutex);
468         BLI_condition_init(&scheduler->queue_cond);
469
470         if (num_threads == 0) {
471                 /* automatic number of threads will be main thread + num cores */
472                 num_threads = BLI_system_thread_count();
473         }
474
475         /* main thread will also work, so we count it too */
476         num_threads -= 1;
477
478         /* Add background-only thread if needed. */
479         if (num_threads == 0) {
480                 scheduler->background_thread_only = true;
481                 num_threads = 1;
482         }
483
484         scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
485                                               "TaskScheduler task threads");
486
487         /* Initialize TLS for main thread. */
488         initialize_task_tls(&scheduler->task_threads[0].tls);
489
490         pthread_key_create(&scheduler->tls_id_key, NULL);
491
492         /* launch threads that will be waiting for work */
493         if (num_threads > 0) {
494                 int i;
495
496                 scheduler->num_threads = num_threads;
497                 scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads");
498
499                 for (i = 0; i < num_threads; i++) {
500                         TaskThread *thread = &scheduler->task_threads[i + 1];
501                         thread->scheduler = scheduler;
502                         thread->id = i + 1;
503                         initialize_task_tls(&thread->tls);
504
505                         if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
506                                 fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
507                         }
508                 }
509         }
510
511         return scheduler;
512 }
513
514 void BLI_task_scheduler_free(TaskScheduler *scheduler)
515 {
516         Task *task;
517
518         /* stop all waiting threads */
519         BLI_mutex_lock(&scheduler->queue_mutex);
520         scheduler->do_exit = true;
521         BLI_condition_notify_all(&scheduler->queue_cond);
522         BLI_mutex_unlock(&scheduler->queue_mutex);
523
524         pthread_key_delete(scheduler->tls_id_key);
525
526         /* delete threads */
527         if (scheduler->threads) {
528                 int i;
529
530                 for (i = 0; i < scheduler->num_threads; i++) {
531                         if (pthread_join(scheduler->threads[i], NULL) != 0)
532                                 fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
533                 }
534
535                 MEM_freeN(scheduler->threads);
536         }
537
538         /* Delete task thread data */
539         if (scheduler->task_threads) {
540                 for (int i = 0; i < scheduler->num_threads + 1; ++i) {
541                         TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
542                         free_task_tls(tls);
543                 }
544
545                 MEM_freeN(scheduler->task_threads);
546         }
547
548         /* delete leftover tasks */
549         for (task = scheduler->queue.first; task; task = task->next) {
550                 task_data_free(task, 0);
551         }
552         BLI_freelistN(&scheduler->queue);
553
554         /* delete mutex/condition */
555         BLI_mutex_end(&scheduler->queue_mutex);
556         BLI_condition_end(&scheduler->queue_cond);
557
558         MEM_freeN(scheduler);
559 }
560
561 int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
562 {
563         return scheduler->num_threads + 1;
564 }
565
566 static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
567 {
568         task_pool_num_increase(task->pool, 1);
569
570         /* add task to queue */
571         BLI_mutex_lock(&scheduler->queue_mutex);
572
573         if (priority == TASK_PRIORITY_HIGH)
574                 BLI_addhead(&scheduler->queue, task);
575         else
576                 BLI_addtail(&scheduler->queue, task);
577
578         BLI_condition_notify_one(&scheduler->queue_cond);
579         BLI_mutex_unlock(&scheduler->queue_mutex);
580 }
581
582 static void task_scheduler_push_all(TaskScheduler *scheduler,
583                                     TaskPool *pool,
584                                     Task **tasks,
585                                     int num_tasks)
586 {
587         if (num_tasks == 0) {
588                 return;
589         }
590
591         task_pool_num_increase(pool, num_tasks);
592
593         BLI_mutex_lock(&scheduler->queue_mutex);
594
595         for (int i = 0; i < num_tasks; i++) {
596                 BLI_addhead(&scheduler->queue, tasks[i]);
597         }
598
599         BLI_condition_notify_all(&scheduler->queue_cond);
600         BLI_mutex_unlock(&scheduler->queue_mutex);
601 }
602
603 static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
604 {
605         Task *task, *nexttask;
606         size_t done = 0;
607
608         BLI_mutex_lock(&scheduler->queue_mutex);
609
610         /* free all tasks from this pool from the queue */
611         for (task = scheduler->queue.first; task; task = nexttask) {
612                 nexttask = task->next;
613
614                 if (task->pool == pool) {
615                         task_data_free(task, pool->thread_id);
616                         BLI_freelinkN(&scheduler->queue, task);
617
618                         done++;
619                 }
620         }
621
622         BLI_mutex_unlock(&scheduler->queue_mutex);
623
624         /* notify done */
625         task_pool_num_decrease(pool, done);
626 }
627
628 /* Task Pool */
629
630 static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
631                                      void *userdata,
632                                      const bool is_background,
633                                      const bool is_suspended)
634 {
635         TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool");
636
637 #ifndef NDEBUG
638         /* Assert we do not try to create a background pool from some parent task - those only work OK from main thread. */
639         if (is_background) {
640                 const pthread_t thread_id = pthread_self();
641                 int i = scheduler->num_threads;
642
643                 while (i--) {
644                         BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
645                 }
646         }
647 #endif
648
649         pool->scheduler = scheduler;
650         pool->num = 0;
651         pool->do_cancel = false;
652         pool->do_work = false;
653         pool->is_suspended = is_suspended;
654         pool->start_suspended = is_suspended;
655         pool->num_suspended = 0;
656         pool->suspended_queue.first = pool->suspended_queue.last = NULL;
657         pool->run_in_background = is_background;
658         pool->use_local_tls = false;
659
660         BLI_mutex_init(&pool->num_mutex);
661         BLI_condition_init(&pool->num_cond);
662
663         pool->userdata = userdata;
664         BLI_mutex_init(&pool->user_mutex);
665
666         if (BLI_thread_is_main()) {
667                 pool->thread_id = 0;
668         }
669         else {
670                 TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);
671                 if (thread == NULL) {
672                         /* NOTE: Task pool is created from non-main thread which is not
673                          * managed by the task scheduler. We identify ourselves as thread ID
674                          * 0 but we do not use scheduler's TLS storage and use our own
675                          * instead to avoid any possible threading conflicts.
676                          */
677                         pool->thread_id = 0;
678                         pool->use_local_tls = true;
679 #ifndef NDEBUG
680                         pool->creator_thread_id = pthread_self();
681 #endif
682                         initialize_task_tls(&pool->local_tls);
683                 }
684                 else {
685                         pool->thread_id = thread->id;
686                 }
687         }
688
689 #ifdef DEBUG_STATS
690         pool->mempool_stats =
691                 MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1),
692                             "per-taskpool mempool stats");
693 #endif
694
695         /* Ensure malloc will go fine from threads,
696          *
697          * This is needed because we could be in main thread here
698          * and malloc could be non-thread safe at this point because
699          * no other jobs are running.
700          */
701         BLI_threaded_malloc_begin();
702
703         return pool;
704 }
705
706 /**
707  * Create a normal task pool.
708  * This means that in single-threaded context, it will not be executed at all until you call
709  * \a BLI_task_pool_work_and_wait() on it.
710  */
711 TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
712 {
713         return task_pool_create_ex(scheduler, userdata, false, false);
714 }
715
716 /**
717  * Create a background task pool.
718  * In multi-threaded context, there is no differences with \a BLI_task_pool_create(), but in single-threaded case
719  * it is ensured to have at least one worker thread to run on (i.e. you do not have to call
720  * \a BLI_task_pool_work_and_wait() on it to be sure it will be processed).
721  *
722  * \note Background pools are non-recursive (that is, you should not create other background pools in tasks assigned
723  *       to a background pool, they could end never being executed, since the 'fallback' background thread is already
724  *       busy with parent task in single-threaded context).
725  */
726 TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata)
727 {
728         return task_pool_create_ex(scheduler, userdata, true, false);
729 }
730
731 /**
732  * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
733  * for until BLI_task_pool_work_and_wait() is called. This helps reducing therading
734  * overhead when pushing huge amount of small initial tasks from the main thread.
735  */
736 TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata)
737 {
738         return task_pool_create_ex(scheduler, userdata, false, true);
739 }
740
741 void BLI_task_pool_free(TaskPool *pool)
742 {
743         BLI_task_pool_cancel(pool);
744
745         BLI_mutex_end(&pool->num_mutex);
746         BLI_condition_end(&pool->num_cond);
747
748         BLI_mutex_end(&pool->user_mutex);
749
750 #ifdef DEBUG_STATS
751         printf("Thread ID    Allocated   Reused   Discarded\n");
752         for (int i = 0; i < pool->scheduler->num_threads + 1; ++i) {
753                 printf("%02d           %05d       %05d    %05d\n",
754                        i,
755                        pool->mempool_stats[i].num_alloc,
756                        pool->mempool_stats[i].num_reuse,
757                        pool->mempool_stats[i].num_discard);
758         }
759         MEM_freeN(pool->mempool_stats);
760 #endif
761
762         if (pool->use_local_tls) {
763                 free_task_tls(&pool->local_tls);
764         }
765
766         MEM_freeN(pool);
767
768         BLI_threaded_malloc_end();
769 }
770
771 BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id)
772 {
773         return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work));
774 }
775
776 static void task_pool_push(
777         TaskPool *pool, TaskRunFunction run, void *taskdata,
778         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority,
779         int thread_id)
780 {
781         /* Allocate task and fill it's properties. */
782         Task *task = task_alloc(pool, thread_id);
783         task->run = run;
784         task->taskdata = taskdata;
785         task->free_taskdata = free_taskdata;
786         task->freedata = freedata;
787         task->pool = pool;
788         /* For suspended pools we put everything yo a global queue first
789          * and exit as soon as possible.
790          *
791          * This tasks will be moved to actual execution when pool is
792          * activated by work_and_wait().
793          */
794         if (pool->is_suspended) {
795                 BLI_addhead(&pool->suspended_queue, task);
796                 atomic_fetch_and_add_z(&pool->num_suspended, 1);
797                 return;
798         }
799         /* Populate to any local queue first, this is cheapest push ever. */
800         if (task_can_use_local_queues(pool, thread_id)) {
801                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
802                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
803                 /* Try to push to a local execution queue.
804                  * These tasks will be picked up next.
805                  */
806                 if (tls->num_local_queue < LOCAL_QUEUE_SIZE) {
807                         tls->local_queue[tls->num_local_queue] = task;
808                         tls->num_local_queue++;
809                         return;
810                 }
811                 /* If we are in the delayed tasks push mode, we push tasks to a
812                  * temporary local queue first without any locks, and then move them
813                  * to global execution queue with a single lock.
814                  */
815                 if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) {
816                         tls->delayed_queue[tls->num_delayed_queue] = task;
817                         tls->num_delayed_queue++;
818                         return;
819                 }
820         }
821         /* Do push to a global execution pool, slowest possible method,
822          * causes quite reasonable amount of threading overhead.
823          */
824         task_scheduler_push(pool->scheduler, task, priority);
825 }
826
827 void BLI_task_pool_push_ex(
828         TaskPool *pool, TaskRunFunction run, void *taskdata,
829         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority)
830 {
831         task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1);
832 }
833
834 void BLI_task_pool_push(
835         TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
836 {
837         BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
838 }
839
840 void BLI_task_pool_push_from_thread(TaskPool *pool, TaskRunFunction run,
841         void *taskdata, bool free_taskdata, TaskPriority priority, int thread_id)
842 {
843         task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id);
844 }
845
846 void BLI_task_pool_work_and_wait(TaskPool *pool)
847 {
848         TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
849         TaskScheduler *scheduler = pool->scheduler;
850
851         if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
852                 if (pool->num_suspended) {
853                         task_pool_num_increase(pool, pool->num_suspended);
854                         BLI_mutex_lock(&scheduler->queue_mutex);
855
856                         BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
857
858                         BLI_condition_notify_all(&scheduler->queue_cond);
859                         BLI_mutex_unlock(&scheduler->queue_mutex);
860
861                         pool->num_suspended = 0;
862                 }
863         }
864
865         pool->do_work = true;
866
867         ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
868
869         handle_local_queue(tls, pool->thread_id);
870
871         BLI_mutex_lock(&pool->num_mutex);
872
873         while (pool->num != 0) {
874                 Task *task, *work_task = NULL;
875                 bool found_task = false;
876
877                 BLI_mutex_unlock(&pool->num_mutex);
878
879                 BLI_mutex_lock(&scheduler->queue_mutex);
880
881                 /* find task from this pool. if we get a task from another pool,
882                  * we can get into deadlock */
883
884                 for (task = scheduler->queue.first; task; task = task->next) {
885                         if (task->pool == pool) {
886                                 work_task = task;
887                                 found_task = true;
888                                 BLI_remlink(&scheduler->queue, task);
889                                 break;
890                         }
891                 }
892
893                 BLI_mutex_unlock(&scheduler->queue_mutex);
894
895                 /* if found task, do it, otherwise wait until other tasks are done */
896                 if (found_task) {
897                         /* run task */
898                         BLI_assert(!tls->do_delayed_push);
899                         work_task->run(pool, work_task->taskdata, pool->thread_id);
900                         BLI_assert(!tls->do_delayed_push);
901
902                         /* delete task */
903                         task_free(pool, task, pool->thread_id);
904
905                         /* Handle all tasks from local queue. */
906                         handle_local_queue(tls, pool->thread_id);
907
908                         /* notify pool task was done */
909                         task_pool_num_decrease(pool, 1);
910                 }
911
912                 BLI_mutex_lock(&pool->num_mutex);
913                 if (pool->num == 0)
914                         break;
915
916                 if (!found_task)
917                         BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
918         }
919
920         BLI_mutex_unlock(&pool->num_mutex);
921
922         BLI_assert(tls->num_local_queue == 0);
923 }
924
925 void BLI_task_pool_work_wait_and_reset(TaskPool *pool)
926 {
927         BLI_task_pool_work_and_wait(pool);
928
929         pool->do_work = false;
930         pool->is_suspended = pool->start_suspended;
931 }
932
933 void BLI_task_pool_cancel(TaskPool *pool)
934 {
935         pool->do_cancel = true;
936
937         task_scheduler_clear(pool->scheduler, pool);
938
939         /* wait until all entries are cleared */
940         BLI_mutex_lock(&pool->num_mutex);
941         while (pool->num)
942                 BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
943         BLI_mutex_unlock(&pool->num_mutex);
944
945         pool->do_cancel = false;
946 }
947
948 bool BLI_task_pool_canceled(TaskPool *pool)
949 {
950         return pool->do_cancel;
951 }
952
953 void *BLI_task_pool_userdata(TaskPool *pool)
954 {
955         return pool->userdata;
956 }
957
958 ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
959 {
960         return &pool->user_mutex;
961 }
962
963 void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id)
964 {
965         if (task_can_use_local_queues(pool, thread_id)) {
966                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
967                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
968                 tls->do_delayed_push = true;
969         }
970 }
971
972 void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id)
973 {
974         if (task_can_use_local_queues(pool, thread_id)) {
975                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
976                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
977                 BLI_assert(tls->do_delayed_push);
978                 task_scheduler_push_all(pool->scheduler,
979                                         pool,
980                                         tls->delayed_queue,
981                                         tls->num_delayed_queue);
982                 tls->do_delayed_push = false;
983                 tls->num_delayed_queue = 0;
984         }
985 }
986
987 /* Parallel range routines */
988
989 /**
990  *
991  * Main functions:
992  * - #BLI_task_parallel_range
993  * - #BLI_task_parallel_listbase (#ListBase - double linked list)
994  *
995  * TODO:
996  * - #BLI_task_parallel_foreach_link (#Link - single linked list)
997  * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
998  * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
999  *
1000  */
1001
1002 /* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
1003 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
1004 #define MALLOCA_FREE(_mem, _size) if (((_mem) != NULL) && ((_size) > 8192)) MEM_freeN((_mem))
1005
1006 typedef struct ParallelRangeState {
1007         int start, stop;
1008         void *userdata;
1009
1010         TaskParallelRangeFunc func;
1011
1012         int iter;
1013         int chunk_size;
1014 } ParallelRangeState;
1015
1016 BLI_INLINE bool parallel_range_next_iter_get(
1017         ParallelRangeState * __restrict state,
1018         int * __restrict iter, int * __restrict count)
1019 {
1020         int previter = atomic_fetch_and_add_int32(&state->iter, state->chunk_size);
1021
1022         *iter = previter;
1023         *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
1024
1025         return (previter < state->stop);
1026 }
1027
1028 static void parallel_range_func(
1029         TaskPool * __restrict pool,
1030         void *userdata_chunk,
1031         int thread_id)
1032 {
1033         ParallelRangeState * __restrict state = BLI_task_pool_userdata(pool);
1034         ParallelRangeTLS tls = {
1035                 .thread_id = thread_id,
1036                 .userdata_chunk = userdata_chunk,
1037         };
1038         int iter, count;
1039         while (parallel_range_next_iter_get(state, &iter, &count)) {
1040                 for (int i = 0; i < count; ++i) {
1041                         state->func(state->userdata, iter + i, &tls);
1042                 }
1043         }
1044 }
1045
1046 static void parallel_range_single_thread(const int start, int const stop,
1047                                          void *userdata,
1048                                          TaskParallelRangeFunc func,
1049                                          const ParallelRangeSettings *settings)
1050 {
1051         void *userdata_chunk = settings->userdata_chunk;
1052         const size_t userdata_chunk_size = settings->userdata_chunk_size;
1053         void *userdata_chunk_local = NULL;
1054         const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
1055         if (use_userdata_chunk) {
1056                 userdata_chunk_local = MALLOCA(userdata_chunk_size);
1057                 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1058         }
1059         ParallelRangeTLS tls = {
1060                 .thread_id = 0,
1061                 .userdata_chunk = userdata_chunk_local,
1062         };
1063         for (int i = start; i < stop; ++i) {
1064                 func(userdata, i, &tls);
1065         }
1066         if (settings->func_finalize != NULL) {
1067                 settings->func_finalize(userdata, userdata_chunk_local);
1068         }
1069         MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
1070 }
1071
1072 /**
1073  * This function allows to parallelized for loops in a similar way to OpenMP's 'parallel for' statement.
1074  *
1075  * See public API doc of ParallelRangeSettings for description of all settings.
1076  */
1077 void BLI_task_parallel_range(const int start, const int stop,
1078                              void *userdata,
1079                              TaskParallelRangeFunc func,
1080                              const ParallelRangeSettings *settings)
1081 {
1082         TaskScheduler *task_scheduler;
1083         TaskPool *task_pool;
1084         ParallelRangeState state;
1085         int i, num_threads, num_tasks;
1086
1087         void *userdata_chunk = settings->userdata_chunk;
1088         const size_t userdata_chunk_size = settings->userdata_chunk_size;
1089         void *userdata_chunk_local = NULL;
1090         void *userdata_chunk_array = NULL;
1091         const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
1092
1093         if (start == stop) {
1094                 return;
1095         }
1096
1097         BLI_assert(start < stop);
1098         if (userdata_chunk_size != 0) {
1099                 BLI_assert(userdata_chunk != NULL);
1100         }
1101
1102         /* If it's not enough data to be crunched, don't bother with tasks at all,
1103          * do everything from the main thread.
1104          */
1105         if (!settings->use_threading) {
1106                 parallel_range_single_thread(start, stop,
1107                                              userdata,
1108                                              func,
1109                                              settings);
1110                 return;
1111         }
1112
1113         task_scheduler = BLI_task_scheduler_get();
1114         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1115
1116         /* The idea here is to prevent creating task for each of the loop iterations
1117          * and instead have tasks which are evenly distributed across CPU cores and
1118          * pull next iter to be crunched using the queue.
1119          */
1120         num_tasks = num_threads + 2;
1121
1122         state.start = start;
1123         state.stop = stop;
1124         state.userdata = userdata;
1125         state.func = func;
1126         state.iter = start;
1127         switch (settings->scheduling_mode) {
1128                 case TASK_SCHEDULING_STATIC:
1129                         state.chunk_size = max_ii(
1130                                 settings->min_iter_per_thread,
1131                                 (stop - start) / (num_tasks));
1132                         break;
1133                 case TASK_SCHEDULING_DYNAMIC:
1134                         /* TODO(sergey): Make it configurable from min_iter_per_thread. */
1135                         state.chunk_size = 32;
1136                         break;
1137         }
1138
1139         num_tasks = min_ii(num_tasks,
1140                            max_ii(1, (stop - start) / state.chunk_size));
1141
1142         if (num_tasks == 1) {
1143                 parallel_range_single_thread(start, stop,
1144                                              userdata,
1145                                              func,
1146                                              settings);
1147                 return;
1148         }
1149
1150         task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1151
1152         /* NOTE: This way we are adding a memory barrier and ensure all worker
1153          * threads can read and modify the value, without any locks. */
1154         atomic_fetch_and_add_int32(&state.iter, 0);
1155
1156         if (use_userdata_chunk) {
1157                 userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
1158         }
1159
1160         for (i = 0; i < num_tasks; i++) {
1161                 if (use_userdata_chunk) {
1162                         userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1163                         memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1164                 }
1165                 /* Use this pool's pre-allocated tasks. */
1166                 BLI_task_pool_push_from_thread(task_pool,
1167                                                parallel_range_func,
1168                                                userdata_chunk_local, false,
1169                                                TASK_PRIORITY_HIGH,
1170                                                task_pool->thread_id);
1171         }
1172
1173         BLI_task_pool_work_and_wait(task_pool);
1174         BLI_task_pool_free(task_pool);
1175
1176         if (use_userdata_chunk) {
1177                 if (settings->func_finalize != NULL) {
1178                         for (i = 0; i < num_tasks; i++) {
1179                                 userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1180                                 settings->func_finalize(userdata, userdata_chunk_local);
1181                         }
1182                 }
1183                 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
1184         }
1185 }
1186
1187 #undef MALLOCA
1188 #undef MALLOCA_FREE
1189
1190 typedef struct ParallelListbaseState {
1191         void *userdata;
1192         TaskParallelListbaseFunc func;
1193
1194         int chunk_size;
1195         int index;
1196         Link *link;
1197         SpinLock lock;
1198 } ParallelListState;
1199
1200 BLI_INLINE Link *parallel_listbase_next_iter_get(
1201         ParallelListState * __restrict state,
1202         int * __restrict index,
1203         int * __restrict count)
1204 {
1205         int task_count = 0;
1206         BLI_spin_lock(&state->lock);
1207         Link *result = state->link;
1208         if (LIKELY(result != NULL)) {
1209                 *index = state->index;
1210                 while (state->link != NULL && task_count < state->chunk_size) {
1211                         ++task_count;
1212                         state->link = state->link->next;
1213                 }
1214                 state->index += task_count;
1215         }
1216         BLI_spin_unlock(&state->lock);
1217         *count = task_count;
1218         return result;
1219 }
1220
1221 static void parallel_listbase_func(
1222         TaskPool * __restrict pool,
1223         void *UNUSED(taskdata),
1224         int UNUSED(threadid))
1225 {
1226         ParallelListState * __restrict state = BLI_task_pool_userdata(pool);
1227         Link *link;
1228         int index, count;
1229
1230         while ((link = parallel_listbase_next_iter_get(state, &index, &count)) != NULL) {
1231                 for (int i = 0; i < count; ++i) {
1232                         state->func(state->userdata, link, index + i);
1233                         link = link->next;
1234                 }
1235         }
1236 }
1237
1238 static void task_parallel_listbase_no_threads(
1239         struct ListBase *listbase,
1240         void *userdata,
1241         TaskParallelListbaseFunc func)
1242 {
1243         int i = 0;
1244         for (Link *link = listbase->first; link != NULL; link = link->next, ++i) {
1245                 func(userdata, link, i);
1246         }
1247 }
1248
1249 /* NOTE: The idea here is to compensate for rather measurable threading
1250  * overhead caused by fetching tasks. With too many CPU threads we are starting
1251  * to spend too much time in those overheads. */
1252 BLI_INLINE int task_parallel_listbasecalc_chunk_size(const int num_threads)
1253 {
1254         if (num_threads > 32) {
1255                 return 128;
1256         }
1257         else if (num_threads > 16) {
1258                 return 64;
1259         }
1260         return 32;
1261 }
1262
1263 /**
1264  * This function allows to parallelize for loops over ListBase items.
1265  *
1266  * \param listbase: The double linked list to loop over.
1267  * \param userdata: Common userdata passed to all instances of \a func.
1268  * \param func: Callback function.
1269  * \param use_threading: If \a true, actually split-execute loop in threads, else just do a sequential forloop
1270  *                      (allows caller to use any kind of test to switch on parallelization or not).
1271  *
1272  * \note There is no static scheduling here, since it would need another full loop over items to count them...
1273  */
1274 void BLI_task_parallel_listbase(
1275         struct ListBase *listbase,
1276         void *userdata,
1277         TaskParallelListbaseFunc func,
1278         const bool use_threading)
1279 {
1280         if (BLI_listbase_is_empty(listbase)) {
1281                 return;
1282         }
1283         if (!use_threading) {
1284                 task_parallel_listbase_no_threads(listbase, userdata, func);
1285                 return;
1286         }
1287         TaskScheduler *task_scheduler = BLI_task_scheduler_get();
1288         const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1289         /* TODO(sergey): Consider making chunk size configurable. */
1290         const int chunk_size = task_parallel_listbasecalc_chunk_size(num_threads);
1291         const int num_tasks = min_ii(
1292                 num_threads,
1293                 BLI_listbase_count(listbase) / chunk_size);
1294         if (num_tasks <= 1) {
1295                 task_parallel_listbase_no_threads(listbase, userdata, func);
1296                 return;
1297         }
1298
1299         ParallelListState state;
1300         TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1301
1302         state.index = 0;
1303         state.link = listbase->first;
1304         state.userdata = userdata;
1305         state.func = func;
1306         state.chunk_size = chunk_size;
1307         BLI_spin_init(&state.lock);
1308
1309         BLI_assert(num_tasks > 0);
1310         for (int i = 0; i < num_tasks; i++) {
1311                 /* Use this pool's pre-allocated tasks. */
1312                 BLI_task_pool_push_from_thread(task_pool,
1313                                                parallel_listbase_func,
1314                                                NULL, false,
1315                                                TASK_PRIORITY_HIGH,
1316                                                task_pool->thread_id);
1317         }
1318
1319         BLI_task_pool_work_and_wait(task_pool);
1320         BLI_task_pool_free(task_pool);
1321
1322         BLI_spin_end(&state.lock);
1323 }
1324
1325
1326 typedef struct ParallelMempoolState {
1327         void *userdata;
1328         TaskParallelMempoolFunc func;
1329 } ParallelMempoolState;
1330
1331 static void parallel_mempool_func(
1332         TaskPool * __restrict pool,
1333         void *taskdata,
1334         int UNUSED(threadid))
1335 {
1336         ParallelMempoolState * __restrict state = BLI_task_pool_userdata(pool);
1337         BLI_mempool_iter *iter = taskdata;
1338         MempoolIterData *item;
1339
1340         while ((item = BLI_mempool_iterstep(iter)) != NULL) {
1341                 state->func(state->userdata, item);
1342         }
1343 }
1344
1345 /**
1346  * This function allows to parallelize for loops over Mempool items.
1347  *
1348  * \param mempool: The iterable BLI_mempool to loop over.
1349  * \param userdata: Common userdata passed to all instances of \a func.
1350  * \param func: Callback function.
1351  * \param use_threading: If \a true, actually split-execute loop in threads, else just do a sequential for loop
1352  * (allows caller to use any kind of test to switch on parallelization or not).
1353  *
1354  * \note There is no static scheduling here.
1355  */
1356 void BLI_task_parallel_mempool(
1357         BLI_mempool *mempool,
1358         void *userdata,
1359         TaskParallelMempoolFunc func,
1360         const bool use_threading)
1361 {
1362         TaskScheduler *task_scheduler;
1363         TaskPool *task_pool;
1364         ParallelMempoolState state;
1365         int i, num_threads, num_tasks;
1366
1367         if (BLI_mempool_len(mempool) == 0) {
1368                 return;
1369         }
1370
1371         if (!use_threading) {
1372                 BLI_mempool_iter iter;
1373                 BLI_mempool_iternew(mempool, &iter);
1374
1375                 for (void *item = BLI_mempool_iterstep(&iter); item != NULL; item = BLI_mempool_iterstep(&iter)) {
1376                         func(userdata, item);
1377                 }
1378                 return;
1379         }
1380
1381         task_scheduler = BLI_task_scheduler_get();
1382         task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1383         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1384
1385         /* The idea here is to prevent creating task for each of the loop iterations
1386          * and instead have tasks which are evenly distributed across CPU cores and
1387          * pull next item to be crunched using the threaded-aware BLI_mempool_iter.
1388          */
1389         num_tasks = num_threads + 2;
1390
1391         state.userdata = userdata;
1392         state.func = func;
1393
1394         BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, (size_t)num_tasks);
1395
1396         for (i = 0; i < num_tasks; i++) {
1397                 /* Use this pool's pre-allocated tasks. */
1398                 BLI_task_pool_push_from_thread(task_pool,
1399                                                parallel_mempool_func,
1400                                                &mempool_iterators[i], false,
1401                                                TASK_PRIORITY_HIGH,
1402                                                task_pool->thread_id);
1403         }
1404
1405         BLI_task_pool_work_and_wait(task_pool);
1406         BLI_task_pool_free(task_pool);
1407
1408         BLI_mempool_iter_threadsafe_free(mempool_iterators);
1409 }