style cleanup: block comments
[blender.git] / intern / cycles / util / util_task.cpp
1 /*
2  * Copyright 2011, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18
19 #include "util_debug.h"
20 #include "util_foreach.h"
21 #include "util_system.h"
22 #include "util_task.h"
23
24 CCL_NAMESPACE_BEGIN
25
26 /* Task Pool */
27
28 TaskPool::TaskPool()
29 {
30         num = 0;
31         do_cancel = false;
32 }
33
34 TaskPool::~TaskPool()
35 {
36         stop();
37 }
38
39 void TaskPool::push(Task *task, bool front)
40 {
41         TaskScheduler::Entry entry;
42
43         entry.task = task;
44         entry.pool = this;
45
46         TaskScheduler::push(entry, front);
47 }
48
49 void TaskPool::push(const TaskRunFunction& run, bool front)
50 {
51         push(new Task(run), front);
52 }
53
54 void TaskPool::wait_work()
55 {
56         thread_scoped_lock num_lock(num_mutex);
57
58         while(num != 0) {
59                 num_lock.unlock();
60
61                 thread_scoped_lock queue_lock(TaskScheduler::queue_mutex);
62
63                 /* find task from this pool. if we get a task from another pool,
64                  * we can get into deadlock */
65                 TaskScheduler::Entry work_entry;
66                 bool found_entry = false;
67                 list<TaskScheduler::Entry>::iterator it;
68
69                 for(it = TaskScheduler::queue.begin(); it != TaskScheduler::queue.end(); it++) {
70                         TaskScheduler::Entry& entry = *it;
71
72                         if(entry.pool == this) {
73                                 work_entry = entry;
74                                 found_entry = true;
75                                 TaskScheduler::queue.erase(it);
76                                 break;
77                         }
78                 }
79
80                 queue_lock.unlock();
81
82                 /* if found task, do it, otherwise wait until other tasks are done */
83                 if(found_entry) {
84                         /* run task */
85                         work_entry.task->run();
86
87                         /* delete task */
88                         delete work_entry.task;
89
90                         /* notify pool task was done */
91                         num_decrease(1);
92                 }
93
94                 num_lock.lock();
95                 if(num == 0)
96                         break;
97
98                 if(!found_entry)
99                         num_cond.wait(num_lock);
100         }
101 }
102
103 void TaskPool::cancel()
104 {
105         do_cancel = true;
106
107         TaskScheduler::clear(this);
108         
109         {
110                 thread_scoped_lock num_lock(num_mutex);
111
112                 while(num)
113                         num_cond.wait(num_lock);
114         }
115
116         do_cancel = false;
117 }
118
119 void TaskPool::stop()
120 {
121         TaskScheduler::clear(this);
122
123         assert(num == 0);
124 }
125
126 bool TaskPool::cancelled()
127 {
128         return do_cancel;
129 }
130
131 void TaskPool::num_decrease(int done)
132 {
133         num_mutex.lock();
134         num -= done;
135
136         assert(num >= 0);
137         if(num == 0)
138                 num_cond.notify_all();
139
140         num_mutex.unlock();
141 }
142
143 void TaskPool::num_increase()
144 {
145         thread_scoped_lock num_lock(num_mutex);
146         num++;
147         num_cond.notify_all();
148 }
149
150 /* Task Scheduler */
151
152 thread_mutex TaskScheduler::mutex;
153 int TaskScheduler::users = 0;
154 vector<thread*> TaskScheduler::threads;
155 vector<int> TaskScheduler::thread_level;
156 volatile bool TaskScheduler::do_exit = false;
157
158 list<TaskScheduler::Entry> TaskScheduler::queue;
159 thread_mutex TaskScheduler::queue_mutex;
160 thread_condition_variable TaskScheduler::queue_cond;
161
162 void TaskScheduler::init(int num_threads)
163 {
164         thread_scoped_lock lock(mutex);
165
166         /* multiple cycles instances can use this task scheduler, sharing the same
167          * threads, so we keep track of the number of users. */
168         if(users == 0) {
169                 do_exit = false;
170
171                 /* launch threads that will be waiting for work */
172                 if(num_threads == 0)
173                         num_threads = system_cpu_thread_count();
174
175                 threads.resize(num_threads);
176                 thread_level.resize(num_threads);
177
178                 for(size_t i = 0; i < threads.size(); i++) {
179                         threads[i] = new thread(function_bind(&TaskScheduler::thread_run, i));
180                         thread_level[i] = 0;
181                 }
182         }
183         
184         users++;
185 }
186
187 void TaskScheduler::exit()
188 {
189         thread_scoped_lock lock(mutex);
190
191         users--;
192
193         if(users == 0) {
194                 /* stop all waiting threads */
195                 do_exit = true;
196                 TaskScheduler::queue_cond.notify_all();
197
198                 /* delete threads */
199                 foreach(thread *t, threads) {
200                         t->join();
201                         delete t;
202                 }
203
204                 threads.clear();
205                 thread_level.clear();
206         }
207 }
208
209 bool TaskScheduler::thread_wait_pop(Entry& entry)
210 {
211         thread_scoped_lock queue_lock(queue_mutex);
212
213         while(queue.empty() && !do_exit)
214                 queue_cond.wait(queue_lock);
215
216         if(queue.empty()) {
217                 assert(do_exit);
218                 return false;
219         }
220         
221         entry = queue.front();
222         queue.pop_front();
223
224         return true;
225 }
226
227 void TaskScheduler::thread_run(int thread_id)
228 {
229         Entry entry;
230
231         /* todo: test affinity/denormal mask */
232
233         /* keep popping off tasks */
234         while(thread_wait_pop(entry)) {
235                 /* run task */
236                 entry.task->run();
237
238                 /* delete task */
239                 delete entry.task;
240
241                 /* notify pool task was done */
242                 entry.pool->num_decrease(1);
243         }
244 }
245
246 void TaskScheduler::push(Entry& entry, bool front)
247 {
248         entry.pool->num_increase();
249
250         /* add entry to queue */
251         TaskScheduler::queue_mutex.lock();
252         if(front)
253                 TaskScheduler::queue.push_front(entry);
254         else
255                 TaskScheduler::queue.push_back(entry);
256
257         TaskScheduler::queue_cond.notify_one();
258         TaskScheduler::queue_mutex.unlock();
259 }
260
261 void TaskScheduler::clear(TaskPool *pool)
262 {
263         thread_scoped_lock queue_lock(TaskScheduler::queue_mutex);
264
265         /* erase all tasks from this pool from the queue */
266         list<Entry>::iterator it = queue.begin();
267         int done = 0;
268
269         while(it != queue.end()) {
270                 Entry& entry = *it;
271
272                 if(entry.pool == pool) {
273                         done++;
274                         delete entry.task;
275
276                         it = queue.erase(it);
277                 }
278                 else
279                         it++;
280         }
281
282         queue_lock.unlock();
283
284         /* notify done */
285         pool->num_decrease(done);
286 }
287
288 CCL_NAMESPACE_END
289