b6d704d8d822cc3426e250c0e66c7be05ecdbe60
[blender.git] / source / blender / blenlib / intern / task.c
1 /*
2  * ***** BEGIN GPL LICENSE BLOCK *****
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * ***** END GPL LICENSE BLOCK *****
19  */
20
21 /** \file blender/blenlib/intern/task.c
22  *  \ingroup bli
23  *
24  * A generic task system which can be used for any task based subsystem.
25  */
26
27 #include <stdlib.h>
28
29 #include "MEM_guardedalloc.h"
30
31 #include "DNA_listBase.h"
32
33 #include "BLI_listbase.h"
34 #include "BLI_math.h"
35 #include "BLI_mempool.h"
36 #include "BLI_task.h"
37 #include "BLI_threads.h"
38
39 #include "atomic_ops.h"
40
41 /* Define this to enable some detailed statistic print. */
42 #undef DEBUG_STATS
43
44 /* Types */
45
46 /* Number of per-thread pre-allocated tasks.
47  *
48  * For more details see description of TaskMemPool.
49  */
50 #define MEMPOOL_SIZE 256
51
52 /* Number of tasks which are pushed directly to local thread queue.
53  *
54  * This allows thread to fetch next task without locking the whole queue.
55  */
56 #define LOCAL_QUEUE_SIZE 1
57
58 /* Number of tasks which are allowed to be scheduled in a delayed manner.
59  *
60  * This allows to use less locks per graph node children schedule. More details
61  * could be found at TaskThreadLocalStorage::do_delayed_push.
62  */
63 #define DELAYED_QUEUE_SIZE 4096
64
65 #ifndef NDEBUG
66 #  define ASSERT_THREAD_ID(scheduler, thread_id)                              \
67         do {                                                                      \
68                 if (!BLI_thread_is_main()) {                                          \
69                         TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);  \
70                         if (thread == NULL) {                                             \
71                                 BLI_assert(thread_id == 0);                                   \
72                         }                                                                 \
73                         else {                                                            \
74                                 BLI_assert(thread_id == thread->id);                          \
75                         }                                                                 \
76                 }                                                                     \
77                 else {                                                                \
78                         BLI_assert(thread_id == 0);                                       \
79                 }                                                                     \
80         } while (false)
81 #else
82 #  define ASSERT_THREAD_ID(scheduler, thread_id)
83 #endif
84
85 typedef struct Task {
86         struct Task *next, *prev;
87
88         TaskRunFunction run;
89         void *taskdata;
90         bool free_taskdata;
91         TaskFreeFunction freedata;
92         TaskPool *pool;
93 } Task;
94
95 /* This is a per-thread storage of pre-allocated tasks.
96  *
97  * The idea behind this is simple: reduce amount of malloc() calls when pushing
98  * new task to the pool. This is done by keeping memory from the tasks which
99  * were finished already, so instead of freeing that memory we put it to the
100  * pool for the later re-use.
101  *
102  * The tricky part here is to avoid any inter-thread synchronization, hence no
103  * lock must exist around this pool. The pool will become an owner of the pointer
104  * from freed task, and only corresponding thread will be able to use this pool
105  * (no memory stealing and such).
106  *
107  * This leads to the following use of the pool:
108  *
109  * - task_push() should provide proper thread ID from which the task is being
110  *   pushed from.
111  *
112  * - Task allocation function which check corresponding memory pool and if there
113  *   is any memory in there it'll mark memory as re-used, remove it from the pool
114  *   and use that memory for the new task.
115  *
116  *   At this moment task queue owns the memory.
117  *
118  * - When task is done and task_free() is called the memory will be put to the
119  *   pool which corresponds to a thread which handled the task.
120  */
121 typedef struct TaskMemPool {
122         /* Number of pre-allocated tasks in the pool. */
123         int num_tasks;
124         /* Pre-allocated task memory pointers. */
125         Task *tasks[MEMPOOL_SIZE];
126 } TaskMemPool;
127
128 #ifdef DEBUG_STATS
129 typedef struct TaskMemPoolStats {
130         /* Number of allocations. */
131         int num_alloc;
132         /* Number of avoided allocations (pointer was re-used from the pool). */
133         int num_reuse;
134         /* Number of discarded memory due to pool saturation, */
135         int num_discard;
136 } TaskMemPoolStats;
137 #endif
138
139 typedef struct TaskThreadLocalStorage {
140         /* Memory pool for faster task allocation.
141          * The idea is to re-use memory of finished/discarded tasks by this thread.
142          */
143         TaskMemPool task_mempool;
144
145         /* Local queue keeps thread alive by keeping small amount of tasks ready
146          * to be picked up without causing global thread locks for synchronization.
147          */
148         int num_local_queue;
149         Task *local_queue[LOCAL_QUEUE_SIZE];
150
151         /* Thread can be marked for delayed tasks push. This is helpful when it's
152          * know that lots of subsequent task pushed will happen from the same thread
153          * without "interrupting" for task execution.
154          *
155          * We try to accumulate as much tasks as possible in a local queue without
156          * any locks first, and then we push all of them into a scheduler's queue
157          * from within a single mutex lock.
158          */
159         bool do_delayed_push;
160         int num_delayed_queue;
161         Task *delayed_queue[DELAYED_QUEUE_SIZE];
162 } TaskThreadLocalStorage;
163
164 struct TaskPool {
165         TaskScheduler *scheduler;
166
167         volatile size_t num;
168         ThreadMutex num_mutex;
169         ThreadCondition num_cond;
170
171         void *userdata;
172         ThreadMutex user_mutex;
173
174         volatile bool do_cancel;
175         volatile bool do_work;
176
177         volatile bool is_suspended;
178         ListBase suspended_queue;
179         size_t num_suspended;
180
181         /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
182          * has to use its special background fallback thread in case we are in
183          * single-threaded situation.
184          */
185         bool run_in_background;
186
187         /* This is a task scheduler's ID of a thread at which pool was constructed.
188          * It will be used to access task TLS.
189          */
190         int thread_id;
191
192         /* For the pools which are created from non-main thread which is not a
193          * scheduler worker thread we can't re-use any of scheduler's threads TLS
194          * and have to use our own one.
195          */
196         bool use_local_tls;
197         TaskThreadLocalStorage local_tls;
198 #ifndef NDEBUG
199         pthread_t creator_thread_id;
200 #endif
201
202 #ifdef DEBUG_STATS
203         TaskMemPoolStats *mempool_stats;
204 #endif
205 };
206
207 struct TaskScheduler {
208         pthread_t *threads;
209         struct TaskThread *task_threads;
210         int num_threads;
211         bool background_thread_only;
212
213         ListBase queue;
214         ThreadMutex queue_mutex;
215         ThreadCondition queue_cond;
216
217         volatile bool do_exit;
218
219         /* NOTE: In pthread's TLS we store the whole TaskThread structure. */
220         pthread_key_t tls_id_key;
221 };
222
223 typedef struct TaskThread {
224         TaskScheduler *scheduler;
225         int id;
226         TaskThreadLocalStorage tls;
227 } TaskThread;
228
229 /* Helper */
230 BLI_INLINE void task_data_free(Task *task, const int thread_id)
231 {
232         if (task->free_taskdata) {
233                 if (task->freedata) {
234                         task->freedata(task->pool, task->taskdata, thread_id);
235                 }
236                 else {
237                         MEM_freeN(task->taskdata);
238                 }
239         }
240 }
241
242 BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls)
243 {
244         memset(tls, 0, sizeof(TaskThreadLocalStorage));
245 }
246
247 BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool,
248                                                 const int thread_id)
249 {
250         TaskScheduler *scheduler = pool->scheduler;
251         BLI_assert(thread_id >= 0);
252         BLI_assert(thread_id <= scheduler->num_threads);
253         if (pool->use_local_tls && thread_id == 0) {
254                 BLI_assert(pool->thread_id == 0);
255                 BLI_assert(!BLI_thread_is_main());
256                 BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id));
257                 return &pool->local_tls;
258         }
259         if (thread_id == 0) {
260                 BLI_assert(BLI_thread_is_main());
261                 return &scheduler->task_threads[pool->thread_id].tls;
262         }
263         return &scheduler->task_threads[thread_id].tls;
264 }
265
266 BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls)
267 {
268         TaskMemPool *task_mempool = &tls->task_mempool;
269         for (int i = 0; i < task_mempool->num_tasks; ++i) {
270                 MEM_freeN(task_mempool->tasks[i]);
271         }
272 }
273
274 static Task *task_alloc(TaskPool *pool, const int thread_id)
275 {
276         BLI_assert(thread_id <= pool->scheduler->num_threads);
277         if (thread_id != -1) {
278                 BLI_assert(thread_id >= 0);
279                 BLI_assert(thread_id <= pool->scheduler->num_threads);
280                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
281                 TaskMemPool *task_mempool = &tls->task_mempool;
282                 /* Try to re-use task memory from a thread local storage. */
283                 if (task_mempool->num_tasks > 0) {
284                         --task_mempool->num_tasks;
285                         /* Success! We've just avoided task allocation. */
286 #ifdef DEBUG_STATS
287                         pool->mempool_stats[thread_id].num_reuse++;
288 #endif
289                         return task_mempool->tasks[task_mempool->num_tasks];
290                 }
291                 /* We are doomed to allocate new task data. */
292 #ifdef DEBUG_STATS
293                 pool->mempool_stats[thread_id].num_alloc++;
294 #endif
295         }
296         return MEM_mallocN(sizeof(Task), "New task");
297 }
298
299 static void task_free(TaskPool *pool, Task *task, const int thread_id)
300 {
301         task_data_free(task, thread_id);
302         BLI_assert(thread_id >= 0);
303         BLI_assert(thread_id <= pool->scheduler->num_threads);
304         if (thread_id == 0) {
305                 BLI_assert(pool->use_local_tls || BLI_thread_is_main());
306         }
307         TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
308         TaskMemPool *task_mempool = &tls->task_mempool;
309         if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) {
310                 /* Successfully allowed the task to be re-used later. */
311                 task_mempool->tasks[task_mempool->num_tasks] = task;
312                 ++task_mempool->num_tasks;
313         }
314         else {
315                 /* Local storage saturated, no other way than just discard
316                  * the memory.
317                  *
318                  * TODO(sergey): We can perhaps store such pointer in a global
319                  * scheduler pool, maybe it'll be faster than discarding and
320                  * allocating again.
321                  */
322                 MEM_freeN(task);
323 #ifdef DEBUG_STATS
324                 pool->mempool_stats[thread_id].num_discard++;
325 #endif
326         }
327 }
328
329 /* Task Scheduler */
330
331 static void task_pool_num_decrease(TaskPool *pool, size_t done)
332 {
333         BLI_mutex_lock(&pool->num_mutex);
334
335         BLI_assert(pool->num >= done);
336
337         pool->num -= done;
338
339         if (pool->num == 0)
340                 BLI_condition_notify_all(&pool->num_cond);
341
342         BLI_mutex_unlock(&pool->num_mutex);
343 }
344
345 static void task_pool_num_increase(TaskPool *pool, size_t new)
346 {
347         BLI_mutex_lock(&pool->num_mutex);
348
349         pool->num += new;
350         BLI_condition_notify_all(&pool->num_cond);
351
352         BLI_mutex_unlock(&pool->num_mutex);
353 }
354
355 static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task)
356 {
357         bool found_task = false;
358         BLI_mutex_lock(&scheduler->queue_mutex);
359
360         while (!scheduler->queue.first && !scheduler->do_exit)
361                 BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
362
363         do {
364                 Task *current_task;
365
366                 /* Assuming we can only have a void queue in 'exit' case here seems logical (we should only be here after
367                  * our worker thread has been woken up from a condition_wait(), which only happens after a new task was
368                  * added to the queue), but it is wrong.
369                  * Waiting on condition may wake up the thread even if condition is not signaled (spurious wake-ups), and some
370                  * race condition may also empty the queue **after** condition has been signaled, but **before** awoken thread
371                  * reaches this point...
372                  * See http://stackoverflow.com/questions/8594591
373                  *
374                  * So we only abort here if do_exit is set.
375                  */
376                 if (scheduler->do_exit) {
377                         BLI_mutex_unlock(&scheduler->queue_mutex);
378                         return false;
379                 }
380
381                 for (current_task = scheduler->queue.first;
382                      current_task != NULL;
383                      current_task = current_task->next)
384                 {
385                         TaskPool *pool = current_task->pool;
386
387                         if (scheduler->background_thread_only && !pool->run_in_background) {
388                                 continue;
389                         }
390
391                         *task = current_task;
392                         found_task = true;
393                         BLI_remlink(&scheduler->queue, *task);
394                         break;
395                 }
396                 if (!found_task)
397                         BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex);
398         } while (!found_task);
399
400         BLI_mutex_unlock(&scheduler->queue_mutex);
401
402         return true;
403 }
404
405 BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls,
406                                    const int thread_id)
407 {
408         BLI_assert(!tls->do_delayed_push);
409         while (tls->num_local_queue > 0) {
410                 /* We pop task from queue before handling it so handler of the task can
411                  * push next job to the local queue.
412                  */
413                 tls->num_local_queue--;
414                 Task *local_task = tls->local_queue[tls->num_local_queue];
415                 /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
416                  * pool tasks.
417                  */
418                 TaskPool *local_pool = local_task->pool;
419                 local_task->run(local_pool, local_task->taskdata, thread_id);
420                 task_free(local_pool, local_task, thread_id);
421         }
422         BLI_assert(!tls->do_delayed_push);
423 }
424
425 static void *task_scheduler_thread_run(void *thread_p)
426 {
427         TaskThread *thread = (TaskThread *) thread_p;
428         TaskThreadLocalStorage *tls = &thread->tls;
429         TaskScheduler *scheduler = thread->scheduler;
430         int thread_id = thread->id;
431         Task *task;
432
433         pthread_setspecific(scheduler->tls_id_key, thread);
434
435         /* keep popping off tasks */
436         while (task_scheduler_thread_wait_pop(scheduler, &task)) {
437                 TaskPool *pool = task->pool;
438
439                 /* run task */
440                 BLI_assert(!tls->do_delayed_push);
441                 task->run(pool, task->taskdata, thread_id);
442                 BLI_assert(!tls->do_delayed_push);
443
444                 /* delete task */
445                 task_free(pool, task, thread_id);
446
447                 /* Handle all tasks from local queue. */
448                 handle_local_queue(tls, thread_id);
449
450                 /* notify pool task was done */
451                 task_pool_num_decrease(pool, 1);
452         }
453
454         return NULL;
455 }
456
457 TaskScheduler *BLI_task_scheduler_create(int num_threads)
458 {
459         TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler");
460
461         /* multiple places can use this task scheduler, sharing the same
462          * threads, so we keep track of the number of users. */
463         scheduler->do_exit = false;
464
465         BLI_listbase_clear(&scheduler->queue);
466         BLI_mutex_init(&scheduler->queue_mutex);
467         BLI_condition_init(&scheduler->queue_cond);
468
469         if (num_threads == 0) {
470                 /* automatic number of threads will be main thread + num cores */
471                 num_threads = BLI_system_thread_count();
472         }
473
474         /* main thread will also work, so we count it too */
475         num_threads -= 1;
476
477         /* Add background-only thread if needed. */
478         if (num_threads == 0) {
479                 scheduler->background_thread_only = true;
480                 num_threads = 1;
481         }
482
483         scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1),
484                                               "TaskScheduler task threads");
485
486         /* Initialize TLS for main thread. */
487         initialize_task_tls(&scheduler->task_threads[0].tls);
488
489         pthread_key_create(&scheduler->tls_id_key, NULL);
490
491         /* launch threads that will be waiting for work */
492         if (num_threads > 0) {
493                 int i;
494
495                 scheduler->num_threads = num_threads;
496                 scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads");
497
498                 for (i = 0; i < num_threads; i++) {
499                         TaskThread *thread = &scheduler->task_threads[i + 1];
500                         thread->scheduler = scheduler;
501                         thread->id = i + 1;
502                         initialize_task_tls(&thread->tls);
503
504                         if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) {
505                                 fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads);
506                         }
507                 }
508         }
509
510         return scheduler;
511 }
512
513 void BLI_task_scheduler_free(TaskScheduler *scheduler)
514 {
515         Task *task;
516
517         /* stop all waiting threads */
518         BLI_mutex_lock(&scheduler->queue_mutex);
519         scheduler->do_exit = true;
520         BLI_condition_notify_all(&scheduler->queue_cond);
521         BLI_mutex_unlock(&scheduler->queue_mutex);
522
523         pthread_key_delete(scheduler->tls_id_key);
524
525         /* delete threads */
526         if (scheduler->threads) {
527                 int i;
528
529                 for (i = 0; i < scheduler->num_threads; i++) {
530                         if (pthread_join(scheduler->threads[i], NULL) != 0)
531                                 fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads);
532                 }
533
534                 MEM_freeN(scheduler->threads);
535         }
536
537         /* Delete task thread data */
538         if (scheduler->task_threads) {
539                 for (int i = 0; i < scheduler->num_threads + 1; ++i) {
540                         TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls;
541                         free_task_tls(tls);
542                 }
543
544                 MEM_freeN(scheduler->task_threads);
545         }
546
547         /* delete leftover tasks */
548         for (task = scheduler->queue.first; task; task = task->next) {
549                 task_data_free(task, 0);
550         }
551         BLI_freelistN(&scheduler->queue);
552
553         /* delete mutex/condition */
554         BLI_mutex_end(&scheduler->queue_mutex);
555         BLI_condition_end(&scheduler->queue_cond);
556
557         MEM_freeN(scheduler);
558 }
559
560 int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
561 {
562         return scheduler->num_threads + 1;
563 }
564
565 static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
566 {
567         task_pool_num_increase(task->pool, 1);
568
569         /* add task to queue */
570         BLI_mutex_lock(&scheduler->queue_mutex);
571
572         if (priority == TASK_PRIORITY_HIGH)
573                 BLI_addhead(&scheduler->queue, task);
574         else
575                 BLI_addtail(&scheduler->queue, task);
576
577         BLI_condition_notify_one(&scheduler->queue_cond);
578         BLI_mutex_unlock(&scheduler->queue_mutex);
579 }
580
581 static void task_scheduler_push_all(TaskScheduler *scheduler,
582                                     TaskPool *pool,
583                                     Task **tasks,
584                                     int num_tasks)
585 {
586         if (num_tasks == 0) {
587                 return;
588         }
589
590         task_pool_num_increase(pool, num_tasks);
591
592         BLI_mutex_lock(&scheduler->queue_mutex);
593
594         for (int i = 0; i < num_tasks; i++) {
595                 BLI_addhead(&scheduler->queue, tasks[i]);
596         }
597
598         BLI_condition_notify_all(&scheduler->queue_cond);
599         BLI_mutex_unlock(&scheduler->queue_mutex);
600 }
601
602 static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool)
603 {
604         Task *task, *nexttask;
605         size_t done = 0;
606
607         BLI_mutex_lock(&scheduler->queue_mutex);
608
609         /* free all tasks from this pool from the queue */
610         for (task = scheduler->queue.first; task; task = nexttask) {
611                 nexttask = task->next;
612
613                 if (task->pool == pool) {
614                         task_data_free(task, pool->thread_id);
615                         BLI_freelinkN(&scheduler->queue, task);
616
617                         done++;
618                 }
619         }
620
621         BLI_mutex_unlock(&scheduler->queue_mutex);
622
623         /* notify done */
624         task_pool_num_decrease(pool, done);
625 }
626
627 /* Task Pool */
628
629 static TaskPool *task_pool_create_ex(TaskScheduler *scheduler,
630                                      void *userdata,
631                                      const bool is_background,
632                                      const bool is_suspended)
633 {
634         TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool");
635
636 #ifndef NDEBUG
637         /* Assert we do not try to create a background pool from some parent task - those only work OK from main thread. */
638         if (is_background) {
639                 const pthread_t thread_id = pthread_self();
640                 int i = scheduler->num_threads;
641
642                 while (i--) {
643                         BLI_assert(!pthread_equal(scheduler->threads[i], thread_id));
644                 }
645         }
646 #endif
647
648         pool->scheduler = scheduler;
649         pool->num = 0;
650         pool->do_cancel = false;
651         pool->do_work = false;
652         pool->is_suspended = is_suspended;
653         pool->num_suspended = 0;
654         pool->suspended_queue.first = pool->suspended_queue.last = NULL;
655         pool->run_in_background = is_background;
656         pool->use_local_tls = false;
657
658         BLI_mutex_init(&pool->num_mutex);
659         BLI_condition_init(&pool->num_cond);
660
661         pool->userdata = userdata;
662         BLI_mutex_init(&pool->user_mutex);
663
664         if (BLI_thread_is_main()) {
665                 pool->thread_id = 0;
666         }
667         else {
668                 TaskThread *thread = pthread_getspecific(scheduler->tls_id_key);
669                 if (thread == NULL) {
670                         /* NOTE: Task pool is created from non-main thread which is not
671                          * managed by the task scheduler. We identify ourselves as thread ID
672                          * 0 but we do not use scheduler's TLS storage and use our own
673                          * instead to avoid any possible threading conflicts.
674                          */
675                         pool->thread_id = 0;
676                         pool->use_local_tls = true;
677 #ifndef NDEBUG
678                         pool->creator_thread_id = pthread_self();
679 #endif
680                         initialize_task_tls(&pool->local_tls);
681                 }
682                 else {
683                         pool->thread_id = thread->id;
684                 }
685         }
686
687 #ifdef DEBUG_STATS
688         pool->mempool_stats =
689                 MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1),
690                             "per-taskpool mempool stats");
691 #endif
692
693         /* Ensure malloc will go fine from threads,
694          *
695          * This is needed because we could be in main thread here
696          * and malloc could be non-thread safe at this point because
697          * no other jobs are running.
698          */
699         BLI_threaded_malloc_begin();
700
701         return pool;
702 }
703
704 /**
705  * Create a normal task pool.
706  * This means that in single-threaded context, it will not be executed at all until you call
707  * \a BLI_task_pool_work_and_wait() on it.
708  */
709 TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata)
710 {
711         return task_pool_create_ex(scheduler, userdata, false, false);
712 }
713
714 /**
715  * Create a background task pool.
716  * In multi-threaded context, there is no differences with \a BLI_task_pool_create(), but in single-threaded case
717  * it is ensured to have at least one worker thread to run on (i.e. you do not have to call
718  * \a BLI_task_pool_work_and_wait() on it to be sure it will be processed).
719  *
720  * \note Background pools are non-recursive (that is, you should not create other background pools in tasks assigned
721  *       to a background pool, they could end never being executed, since the 'fallback' background thread is already
722  *       busy with parent task in single-threaded context).
723  */
724 TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata)
725 {
726         return task_pool_create_ex(scheduler, userdata, true, false);
727 }
728
729 /**
730  * Similar to BLI_task_pool_create() but does not schedule any tasks for execution
731  * for until BLI_task_pool_work_and_wait() is called. This helps reducing therading
732  * overhead when pushing huge amount of small initial tasks from the main thread.
733  */
734 TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata)
735 {
736         return task_pool_create_ex(scheduler, userdata, false, true);
737 }
738
739 void BLI_task_pool_free(TaskPool *pool)
740 {
741         BLI_task_pool_cancel(pool);
742
743         BLI_mutex_end(&pool->num_mutex);
744         BLI_condition_end(&pool->num_cond);
745
746         BLI_mutex_end(&pool->user_mutex);
747
748 #ifdef DEBUG_STATS
749         printf("Thread ID    Allocated   Reused   Discarded\n");
750         for (int i = 0; i < pool->scheduler->num_threads + 1; ++i) {
751                 printf("%02d           %05d       %05d    %05d\n",
752                        i,
753                        pool->mempool_stats[i].num_alloc,
754                        pool->mempool_stats[i].num_reuse,
755                        pool->mempool_stats[i].num_discard);
756         }
757         MEM_freeN(pool->mempool_stats);
758 #endif
759
760         if (pool->use_local_tls) {
761                 free_task_tls(&pool->local_tls);
762         }
763
764         MEM_freeN(pool);
765
766         BLI_threaded_malloc_end();
767 }
768
769 BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id)
770 {
771         return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work));
772 }
773
774 static void task_pool_push(
775         TaskPool *pool, TaskRunFunction run, void *taskdata,
776         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority,
777         int thread_id)
778 {
779         /* Allocate task and fill it's properties. */
780         Task *task = task_alloc(pool, thread_id);
781         task->run = run;
782         task->taskdata = taskdata;
783         task->free_taskdata = free_taskdata;
784         task->freedata = freedata;
785         task->pool = pool;
786         /* For suspended pools we put everything yo a global queue first
787          * and exit as soon as possible.
788          *
789          * This tasks will be moved to actual execution when pool is
790          * activated by work_and_wait().
791          */
792         if (pool->is_suspended) {
793                 BLI_addhead(&pool->suspended_queue, task);
794                 atomic_fetch_and_add_z(&pool->num_suspended, 1);
795                 return;
796         }
797         /* Populate to any local queue first, this is cheapest push ever. */
798         if (task_can_use_local_queues(pool, thread_id)) {
799                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
800                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
801                 /* Try to push to a local execution queue.
802                  * These tasks will be picked up next.
803                  */
804                 if (tls->num_local_queue < LOCAL_QUEUE_SIZE) {
805                         tls->local_queue[tls->num_local_queue] = task;
806                         tls->num_local_queue++;
807                         return;
808                 }
809                 /* If we are in the delayed tasks push mode, we push tasks to a
810                  * temporary local queue first without any locks, and then move them
811                  * to global execution queue with a single lock.
812                  */
813                 if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) {
814                         tls->delayed_queue[tls->num_delayed_queue] = task;
815                         tls->num_delayed_queue++;
816                         return;
817                 }
818         }
819         /* Do push to a global execution ppol, slowest possible method,
820          * causes quite reasonable amount of threading overhead.
821          */
822         task_scheduler_push(pool->scheduler, task, priority);
823 }
824
825 void BLI_task_pool_push_ex(
826         TaskPool *pool, TaskRunFunction run, void *taskdata,
827         bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority)
828 {
829         task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1);
830 }
831
832 void BLI_task_pool_push(
833         TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority)
834 {
835         BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority);
836 }
837
838 void BLI_task_pool_push_from_thread(TaskPool *pool, TaskRunFunction run,
839         void *taskdata, bool free_taskdata, TaskPriority priority, int thread_id)
840 {
841         task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id);
842 }
843
844 void BLI_task_pool_work_and_wait(TaskPool *pool)
845 {
846         TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id);
847         TaskScheduler *scheduler = pool->scheduler;
848
849         if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) {
850                 if (pool->num_suspended) {
851                         task_pool_num_increase(pool, pool->num_suspended);
852                         BLI_mutex_lock(&scheduler->queue_mutex);
853
854                         BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue);
855
856                         BLI_condition_notify_all(&scheduler->queue_cond);
857                         BLI_mutex_unlock(&scheduler->queue_mutex);
858                 }
859         }
860
861         pool->do_work = true;
862
863         ASSERT_THREAD_ID(pool->scheduler, pool->thread_id);
864
865         BLI_mutex_lock(&pool->num_mutex);
866
867         while (pool->num != 0) {
868                 Task *task, *work_task = NULL;
869                 bool found_task = false;
870
871                 BLI_mutex_unlock(&pool->num_mutex);
872
873                 BLI_mutex_lock(&scheduler->queue_mutex);
874
875                 /* find task from this pool. if we get a task from another pool,
876                  * we can get into deadlock */
877
878                 for (task = scheduler->queue.first; task; task = task->next) {
879                         if (task->pool == pool) {
880                                 work_task = task;
881                                 found_task = true;
882                                 BLI_remlink(&scheduler->queue, task);
883                                 break;
884                         }
885                 }
886
887                 BLI_mutex_unlock(&scheduler->queue_mutex);
888
889                 /* if found task, do it, otherwise wait until other tasks are done */
890                 if (found_task) {
891                         /* run task */
892                         BLI_assert(!tls->do_delayed_push);
893                         work_task->run(pool, work_task->taskdata, pool->thread_id);
894                         BLI_assert(!tls->do_delayed_push);
895
896                         /* delete task */
897                         task_free(pool, task, pool->thread_id);
898
899                         /* Handle all tasks from local queue. */
900                         handle_local_queue(tls, pool->thread_id);
901
902                         /* notify pool task was done */
903                         task_pool_num_decrease(pool, 1);
904                 }
905
906                 BLI_mutex_lock(&pool->num_mutex);
907                 if (pool->num == 0)
908                         break;
909
910                 if (!found_task)
911                         BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
912         }
913
914         BLI_mutex_unlock(&pool->num_mutex);
915
916         handle_local_queue(tls, pool->thread_id);
917 }
918
919 void BLI_task_pool_cancel(TaskPool *pool)
920 {
921         pool->do_cancel = true;
922
923         task_scheduler_clear(pool->scheduler, pool);
924
925         /* wait until all entries are cleared */
926         BLI_mutex_lock(&pool->num_mutex);
927         while (pool->num)
928                 BLI_condition_wait(&pool->num_cond, &pool->num_mutex);
929         BLI_mutex_unlock(&pool->num_mutex);
930
931         pool->do_cancel = false;
932 }
933
934 bool BLI_task_pool_canceled(TaskPool *pool)
935 {
936         return pool->do_cancel;
937 }
938
939 void *BLI_task_pool_userdata(TaskPool *pool)
940 {
941         return pool->userdata;
942 }
943
944 ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
945 {
946         return &pool->user_mutex;
947 }
948
949 void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id)
950 {
951         if (task_can_use_local_queues(pool, thread_id)) {
952                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
953                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
954                 tls->do_delayed_push = true;
955         }
956 }
957
958 void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id)
959 {
960         if (task_can_use_local_queues(pool, thread_id)) {
961                 ASSERT_THREAD_ID(pool->scheduler, thread_id);
962                 TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id);
963                 BLI_assert(tls->do_delayed_push);
964                 task_scheduler_push_all(pool->scheduler,
965                                         pool,
966                                         tls->delayed_queue,
967                                         tls->num_delayed_queue);
968                 tls->do_delayed_push = false;
969                 tls->num_delayed_queue = 0;
970         }
971 }
972
973 /* Parallel range routines */
974
975 /**
976  *
977  * Main functions:
978  * - #BLI_task_parallel_range
979  * - #BLI_task_parallel_listbase (#ListBase - double linked list)
980  *
981  * TODO:
982  * - #BLI_task_parallel_foreach_link (#Link - single linked list)
983  * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set)
984  * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools)
985  *
986  */
987
988 /* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */
989 #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
990 #define MALLOCA_FREE(_mem, _size) if (((_mem) != NULL) && ((_size) > 8192)) MEM_freeN((_mem))
991
992 typedef struct ParallelRangeState {
993         int start, stop;
994         void *userdata;
995
996         TaskParallelRangeFunc func;
997
998         int iter;
999         int chunk_size;
1000 } ParallelRangeState;
1001
1002 BLI_INLINE bool parallel_range_next_iter_get(
1003         ParallelRangeState * __restrict state,
1004         int * __restrict iter, int * __restrict count)
1005 {
1006         int previter = atomic_fetch_and_add_int32(&state->iter, state->chunk_size);
1007
1008         *iter = previter;
1009         *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter));
1010
1011         return (previter < state->stop);
1012 }
1013
1014 static void parallel_range_func(
1015         TaskPool * __restrict pool,
1016         void *userdata_chunk,
1017         int thread_id)
1018 {
1019         ParallelRangeState * __restrict state = BLI_task_pool_userdata(pool);
1020         ParallelRangeTLS tls = {
1021                 .thread_id = thread_id,
1022                 .userdata_chunk = userdata_chunk,
1023         };
1024         int iter, count;
1025         while (parallel_range_next_iter_get(state, &iter, &count)) {
1026                 for (int i = 0; i < count; ++i) {
1027                         state->func(state->userdata, iter + i, &tls);
1028                 }
1029         }
1030 }
1031
1032 static void palallel_range_single_thread(const int start, int const stop,
1033                                          void *userdata,
1034                                          TaskParallelRangeFunc func,
1035                                          const ParallelRangeSettings *settings)
1036 {
1037         void *userdata_chunk = settings->userdata_chunk;
1038         const size_t userdata_chunk_size = settings->userdata_chunk_size;
1039         void *userdata_chunk_local = NULL;
1040         const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
1041         if (use_userdata_chunk) {
1042                 userdata_chunk_local = MALLOCA(userdata_chunk_size);
1043                 memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1044         }
1045         ParallelRangeTLS tls = {
1046                 .thread_id = 0,
1047                 .userdata_chunk = userdata_chunk_local,
1048         };
1049         for (int i = start; i < stop; ++i) {
1050                 func(userdata, i, &tls);
1051         }
1052         if (settings->func_finalize != NULL) {
1053                 settings->func_finalize(userdata, userdata_chunk_local);
1054         }
1055         MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
1056 }
1057
1058 /**
1059  * This function allows to parallelized for loops in a similar way to OpenMP's 'parallel for' statement.
1060  *
1061  * See public API doc of ParallelRangeSettings for description of all settings.
1062  */
1063 void BLI_task_parallel_range(const int start, const int stop,
1064                              void *userdata,
1065                              TaskParallelRangeFunc func,
1066                              const ParallelRangeSettings *settings)
1067 {
1068         TaskScheduler *task_scheduler;
1069         TaskPool *task_pool;
1070         ParallelRangeState state;
1071         int i, num_threads, num_tasks;
1072
1073         void *userdata_chunk = settings->userdata_chunk;
1074         const size_t userdata_chunk_size = settings->userdata_chunk_size;
1075         void *userdata_chunk_local = NULL;
1076         void *userdata_chunk_array = NULL;
1077         const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
1078
1079         if (start == stop) {
1080                 return;
1081         }
1082
1083         BLI_assert(start < stop);
1084         if (userdata_chunk_size != 0) {
1085                 BLI_assert(userdata_chunk != NULL);
1086         }
1087
1088         /* If it's not enough data to be crunched, don't bother with tasks at all,
1089          * do everything from the main thread.
1090          */
1091         if (!settings->use_threading) {
1092                 palallel_range_single_thread(start, stop,
1093                                              userdata,
1094                                              func,
1095                                              settings);
1096                 return;
1097         }
1098
1099         task_scheduler = BLI_task_scheduler_get();
1100         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1101
1102         /* The idea here is to prevent creating task for each of the loop iterations
1103          * and instead have tasks which are evenly distributed across CPU cores and
1104          * pull next iter to be crunched using the queue.
1105          */
1106         num_tasks = num_threads + 2;
1107
1108         state.start = start;
1109         state.stop = stop;
1110         state.userdata = userdata;
1111         state.func = func;
1112         state.iter = start;
1113         switch (settings->scheduling_mode) {
1114                 case TASK_SCHEDULING_STATIC:
1115                         state.chunk_size = max_ii(
1116                                 settings->min_iter_per_thread,
1117                                 (stop - start) / (num_tasks));
1118                         break;
1119                 case TASK_SCHEDULING_DYNAMIC:
1120                         /* TODO(sergey): Make it configurable from min_iter_per_thread. */
1121                         state.chunk_size = 32;
1122                         break;
1123         }
1124
1125         num_tasks = min_ii(num_tasks,
1126                            max_ii(1, (stop - start) / state.chunk_size));
1127
1128         if (num_tasks == 1) {
1129                 palallel_range_single_thread(start, stop,
1130                                              userdata,
1131                                              func,
1132                                              settings);
1133                 return;
1134         }
1135
1136         task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1137
1138         /* NOTE: This way we are adding a memory barrier and ensure all worker
1139          * threads can read and modify the value, without any locks. */
1140         atomic_fetch_and_add_int32(&state.iter, 0);
1141
1142         if (use_userdata_chunk) {
1143                 userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks);
1144         }
1145
1146         for (i = 0; i < num_tasks; i++) {
1147                 if (use_userdata_chunk) {
1148                         userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1149                         memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
1150                 }
1151                 /* Use this pool's pre-allocated tasks. */
1152                 BLI_task_pool_push_from_thread(task_pool,
1153                                                parallel_range_func,
1154                                                userdata_chunk_local, false,
1155                                                TASK_PRIORITY_HIGH,
1156                                                task_pool->thread_id);
1157         }
1158
1159         BLI_task_pool_work_and_wait(task_pool);
1160         BLI_task_pool_free(task_pool);
1161
1162         if (use_userdata_chunk) {
1163                 if (settings->func_finalize != NULL) {
1164                         for (i = 0; i < num_tasks; i++) {
1165                                 userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i);
1166                                 settings->func_finalize(userdata, userdata_chunk_local);
1167                         }
1168                 }
1169                 MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks);
1170         }
1171 }
1172
1173 #undef MALLOCA
1174 #undef MALLOCA_FREE
1175
1176 typedef struct ParallelListbaseState {
1177         void *userdata;
1178         TaskParallelListbaseFunc func;
1179
1180         int chunk_size;
1181         int index;
1182         Link *link;
1183         SpinLock lock;
1184 } ParallelListState;
1185
1186 BLI_INLINE Link *parallel_listbase_next_iter_get(
1187         ParallelListState * __restrict state,
1188         int * __restrict index,
1189         int * __restrict count)
1190 {
1191         int task_count = 0;
1192         BLI_spin_lock(&state->lock);
1193         Link *result = state->link;
1194         if (LIKELY(result != NULL)) {
1195                 *index = state->index;
1196                 while (state->link != NULL && task_count < state->chunk_size) {
1197                         ++task_count;
1198                         state->link = state->link->next;
1199                 }
1200                 state->index += task_count;
1201         }
1202         BLI_spin_unlock(&state->lock);
1203         *count = task_count;
1204         return result;
1205 }
1206
1207 static void parallel_listbase_func(
1208         TaskPool * __restrict pool,
1209         void *UNUSED(taskdata),
1210         int UNUSED(threadid))
1211 {
1212         ParallelListState * __restrict state = BLI_task_pool_userdata(pool);
1213         Link *link;
1214         int index, count;
1215
1216         while ((link = parallel_listbase_next_iter_get(state, &index, &count)) != NULL) {
1217                 for (int i = 0; i < count; ++i) {
1218                         state->func(state->userdata, link, index + i);
1219                         link = link->next;
1220                 }
1221         }
1222 }
1223
1224 static void task_parallel_listbase_no_threads(
1225         struct ListBase *listbase,
1226         void *userdata,
1227         TaskParallelListbaseFunc func)
1228 {
1229         int i = 0;
1230         for (Link *link = listbase->first; link != NULL; link = link->next, ++i) {
1231                 func(userdata, link, i);
1232         }
1233 }
1234
1235 /* NOTE: The idea here is to compensate for rather measurable threading
1236  * overhead caused by fetching tasks. With too many CPU threads we are starting
1237  * to spend too much time in those overheads. */
1238 BLI_INLINE int task_parallel_listbasecalc_chunk_size(const int num_threads)
1239 {
1240         if (num_threads > 32) {
1241                 return 128;
1242         }
1243         else if (num_threads > 16) {
1244                 return 64;
1245         }
1246         return 32;
1247 }
1248
1249 /**
1250  * This function allows to parallelize for loops over ListBase items.
1251  *
1252  * \param listbase The double linked list to loop over.
1253  * \param userdata Common userdata passed to all instances of \a func.
1254  * \param func Callback function.
1255  * \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
1256  *                      (allows caller to use any kind of test to switch on parallelization or not).
1257  *
1258  * \note There is no static scheduling here, since it would need another full loop over items to count them...
1259  */
1260 void BLI_task_parallel_listbase(
1261         struct ListBase *listbase,
1262         void *userdata,
1263         TaskParallelListbaseFunc func,
1264         const bool use_threading)
1265 {
1266         if (BLI_listbase_is_empty(listbase)) {
1267                 return;
1268         }
1269         if (!use_threading) {
1270                 task_parallel_listbase_no_threads(listbase, userdata, func);
1271                 return;
1272         }
1273         TaskScheduler *task_scheduler = BLI_task_scheduler_get();
1274         const int num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1275         /* TODO(sergey): Consider making chunk size configurable. */
1276         const int chunk_size = task_parallel_listbasecalc_chunk_size(num_threads);
1277         const int num_tasks = min_ii(
1278                 num_threads,
1279                 BLI_listbase_count(listbase) / chunk_size);
1280         if (num_tasks <= 1) {
1281                 task_parallel_listbase_no_threads(listbase, userdata, func);
1282                 return;
1283         }
1284
1285         ParallelListState state;
1286         TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1287
1288         state.index = 0;
1289         state.link = listbase->first;
1290         state.userdata = userdata;
1291         state.func = func;
1292         state.chunk_size = chunk_size;
1293         BLI_spin_init(&state.lock);
1294
1295         BLI_assert(num_tasks > 0);
1296         for (int i = 0; i < num_tasks; i++) {
1297                 /* Use this pool's pre-allocated tasks. */
1298                 BLI_task_pool_push_from_thread(task_pool,
1299                                                parallel_listbase_func,
1300                                                NULL, false,
1301                                                TASK_PRIORITY_HIGH,
1302                                                task_pool->thread_id);
1303         }
1304
1305         BLI_task_pool_work_and_wait(task_pool);
1306         BLI_task_pool_free(task_pool);
1307
1308         BLI_spin_end(&state.lock);
1309 }
1310
1311
1312 typedef struct ParallelMempoolState {
1313         void *userdata;
1314         TaskParallelMempoolFunc func;
1315 } ParallelMempoolState;
1316
1317 static void parallel_mempool_func(
1318         TaskPool * __restrict pool,
1319         void *taskdata,
1320         int UNUSED(threadid))
1321 {
1322         ParallelMempoolState * __restrict state = BLI_task_pool_userdata(pool);
1323         BLI_mempool_iter *iter = taskdata;
1324         MempoolIterData *item;
1325
1326         while ((item = BLI_mempool_iterstep(iter)) != NULL) {
1327                 state->func(state->userdata, item);
1328         }
1329 }
1330
1331 /**
1332  * This function allows to parallelize for loops over Mempool items.
1333  *
1334  * \param mempool: The iterable BLI_mempool to loop over.
1335  * \param userdata: Common userdata passed to all instances of \a func.
1336  * \param func: Callback function.
1337  * \param use_threading: If \a true, actually split-execute loop in threads, else just do a sequential for loop
1338  * (allows caller to use any kind of test to switch on parallelization or not).
1339  *
1340  * \note There is no static scheduling here.
1341  */
1342 void BLI_task_parallel_mempool(
1343         BLI_mempool *mempool,
1344         void *userdata,
1345         TaskParallelMempoolFunc func,
1346         const bool use_threading)
1347 {
1348         TaskScheduler *task_scheduler;
1349         TaskPool *task_pool;
1350         ParallelMempoolState state;
1351         int i, num_threads, num_tasks;
1352
1353         if (BLI_mempool_len(mempool) == 0) {
1354                 return;
1355         }
1356
1357         if (!use_threading) {
1358                 BLI_mempool_iter iter;
1359                 BLI_mempool_iternew(mempool, &iter);
1360
1361                 for (void *item = BLI_mempool_iterstep(&iter); item != NULL; item = BLI_mempool_iterstep(&iter)) {
1362                         func(userdata, item);
1363                 }
1364                 return;
1365         }
1366
1367         task_scheduler = BLI_task_scheduler_get();
1368         task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
1369         num_threads = BLI_task_scheduler_num_threads(task_scheduler);
1370
1371         /* The idea here is to prevent creating task for each of the loop iterations
1372          * and instead have tasks which are evenly distributed across CPU cores and
1373          * pull next item to be crunched using the threaded-aware BLI_mempool_iter.
1374          */
1375         num_tasks = num_threads + 2;
1376
1377         state.userdata = userdata;
1378         state.func = func;
1379
1380         BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, (size_t)num_tasks);
1381
1382         for (i = 0; i < num_tasks; i++) {
1383                 /* Use this pool's pre-allocated tasks. */
1384                 BLI_task_pool_push_from_thread(task_pool,
1385                                                parallel_mempool_func,
1386                                                &mempool_iterators[i], false,
1387                                                TASK_PRIORITY_HIGH,
1388                                                task_pool->thread_id);
1389         }
1390
1391         BLI_task_pool_work_and_wait(task_pool);
1392         BLI_task_pool_free(task_pool);
1393
1394         BLI_mempool_iter_threadsafe_free(mempool_iterators);
1395 }