compositor: replace C++ new/delete with guardedalloc.
[blender.git] / source / blender / compositor / intern / COM_ExecutionGroup.cpp
1 /*
2  * Copyright 2011, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  *
18  * Contributor: 
19  *              Jeroen Bakker 
20  *              Monique Dewanchand
21  */
22
23 #include <algorithm>
24 #include <math.h>
25 #include <sstream>
26 #include <stdlib.h>
27
28 #include "COM_ExecutionGroup.h"
29 #include "COM_InputSocket.h"
30 #include "COM_SocketConnection.h"
31 #include "COM_defines.h"
32 #include "COM_ExecutionSystem.h"
33 #include "COM_ReadBufferOperation.h"
34 #include "COM_WriteBufferOperation.h"
35 #include "COM_ReadBufferOperation.h"
36 #include "COM_WorkScheduler.h"
37 #include "COM_ViewerOperation.h"
38 #include "COM_ChunkOrder.h"
39 #include "COM_ExecutionSystemHelper.h"
40
41 #include "MEM_guardedalloc.h"
42 #include "BLI_math.h"
43 #include "PIL_time.h"
44 #include "WM_api.h"
45 #include "WM_types.h"
46
47 ExecutionGroup::ExecutionGroup()
48 {
49         this->m_isOutput = false;
50         this->m_complex = false;
51         this->m_chunkExecutionStates = NULL;
52         this->m_bTree = NULL;
53         this->m_height = 0;
54         this->m_width = 0;
55         this->m_cachedMaxReadBufferOffset = 0;
56         this->m_numberOfXChunks = 0;
57         this->m_numberOfYChunks = 0;
58         this->m_numberOfChunks = 0;
59         this->m_initialized = false;
60         this->m_openCL = false;
61         this->m_singleThreaded = false;
62         this->m_chunksFinished = 0;
63 }
64
65 CompositorPriority ExecutionGroup::getRenderPriotrity()
66 {
67         return this->getOutputNodeOperation()->getRenderPriority();
68 }
69
70 bool ExecutionGroup::containsOperation(NodeOperation *operation)
71 {
72         for (vector<NodeOperation *>::const_iterator iterator = this->m_operations.begin(); iterator != this->m_operations.end(); ++iterator) {
73                 NodeOperation *inListOperation = *iterator;
74                 if (inListOperation == operation) {
75                         return true;
76                 }
77         }
78         return false;
79 }
80
81 const bool ExecutionGroup::isComplex() const
82 {
83         return this->m_complex;
84 }
85
86 bool ExecutionGroup::canContainOperation(NodeOperation *operation)
87 {
88         if (!this->m_initialized) { return true; }
89         if (operation->isReadBufferOperation()) { return true; }
90         if (operation->isWriteBufferOperation()) { return false; }
91         if (operation->isSetOperation()) { return true; }
92
93         if (!this->isComplex()) {
94                 return (!operation->isComplex());
95         }
96         else {
97                 return false;
98         }
99 }
100
101 void ExecutionGroup::addOperation(ExecutionSystem *system, NodeOperation *operation)
102 {
103         /* should never happen but in rare cases it can - it causes confusing crashes */
104         BLI_assert(operation->isOperation() == true);
105
106         if (containsOperation(operation)) return;
107         if (canContainOperation(operation)) {
108                 if (!operation->isBufferOperation()) {
109                         this->m_complex = operation->isComplex();
110                         this->m_openCL = operation->isOpenCL();
111                         this->m_singleThreaded = operation->isSingleThreaded();
112                         this->m_initialized = true;
113                 }
114                 this->m_operations.push_back(operation);
115                 if (operation->isReadBufferOperation()) {
116                         ReadBufferOperation *readOperation = (ReadBufferOperation *)operation;
117                         WriteBufferOperation *writeOperation = readOperation->getMemoryProxy()->getWriteBufferOperation();
118                         this->addOperation(system, writeOperation);
119                 }
120                 else {
121                         unsigned int index;
122                         for (index = 0; index < operation->getNumberOfInputSockets(); index++) {
123                                 InputSocket *inputSocket = operation->getInputSocket(index);
124                                 if (inputSocket->isConnected()) {
125                                         NodeOperation *node = (NodeOperation *)inputSocket->getConnection()->getFromNode();
126                                         this->addOperation(system, node);
127                                 }
128                         }
129                 }
130         }
131         else {
132                 if (operation->isWriteBufferOperation()) {
133                         WriteBufferOperation *writeoperation = (WriteBufferOperation *)operation;
134                         if (writeoperation->getMemoryProxy()->getExecutor() == NULL) {
135                                 ExecutionGroup *newGroup = new ExecutionGroup();
136                                 writeoperation->getMemoryProxy()->setExecutor(newGroup);
137                                 newGroup->addOperation(system, operation);
138                                 ExecutionSystemHelper::addExecutionGroup(system->getExecutionGroups(), newGroup);
139                         }
140                 }
141         }
142 }
143
144 NodeOperation *ExecutionGroup::getOutputNodeOperation() const
145 {
146         return this->m_operations[0]; // the first operation of the group is always the output operation.
147 }
148
149 void ExecutionGroup::initExecution()
150 {
151         if (this->m_chunkExecutionStates != NULL) {
152                 MEM_freeN(this->m_chunkExecutionStates);
153         }
154         unsigned int index;
155         determineNumberOfChunks();
156
157         this->m_chunkExecutionStates = NULL;
158         if (this->m_numberOfChunks != 0) {
159                 this->m_chunkExecutionStates = (ChunkExecutionState *)MEM_mallocN(sizeof(ChunkExecutionState) * this->m_numberOfChunks, __func__);
160                 for (index = 0; index < this->m_numberOfChunks; index++) {
161                         this->m_chunkExecutionStates[index] = COM_ES_NOT_SCHEDULED;
162                 }
163         }
164
165
166         unsigned int maxNumber = 0;
167
168         for (index = 0; index < this->m_operations.size(); index++) {
169                 NodeOperation *operation = this->m_operations[index];
170                 if (operation->isReadBufferOperation()) {
171                         ReadBufferOperation *readOperation = (ReadBufferOperation *)operation;
172                         this->m_cachedReadOperations.push_back(readOperation);
173                         maxNumber = max(maxNumber, readOperation->getOffset());
174                 }
175         }
176         maxNumber++;
177         this->m_cachedMaxReadBufferOffset = maxNumber;
178
179 }
180
181 void ExecutionGroup::deinitExecution()
182 {
183         if (this->m_chunkExecutionStates != NULL) {
184                 MEM_freeN(this->m_chunkExecutionStates);
185                 this->m_chunkExecutionStates = NULL;
186         }
187         this->m_numberOfChunks = 0;
188         this->m_numberOfXChunks = 0;
189         this->m_numberOfYChunks = 0;
190         this->m_cachedReadOperations.clear();
191         this->m_bTree = NULL;
192 }
193 void ExecutionGroup::determineResolution(unsigned int resolution[2])
194 {
195         NodeOperation *operation = this->getOutputNodeOperation();
196         resolution[0] = operation->getWidth();
197         resolution[1] = operation->getHeight();
198         this->setResolution(resolution);
199 }
200
201 void ExecutionGroup::determineNumberOfChunks()
202 {
203         if (this->m_singleThreaded) {
204                 this->m_numberOfXChunks = 1;
205                 this->m_numberOfYChunks = 1;
206                 this->m_numberOfChunks = 1;
207         } 
208         else {
209                 const float chunkSizef = this->m_chunkSize;
210                 this->m_numberOfXChunks = ceil(this->m_width / chunkSizef);
211                 this->m_numberOfYChunks = ceil(this->m_height / chunkSizef);
212                 this->m_numberOfChunks = this->m_numberOfXChunks * this->m_numberOfYChunks;
213         }
214 }
215
216 /**
217  * this method is called for the top execution groups. containing the compositor node or the preview node or the viewer node)
218  */
219 void ExecutionGroup::execute(ExecutionSystem *graph)
220 {
221         CompositorContext& context = graph->getContext();
222         const bNodeTree *bTree = context.getbNodeTree();
223         if (this->m_width == 0 || this->m_height == 0) {return; } /// @note: break out... no pixels to calculate.
224         if (bTree->test_break && bTree->test_break(bTree->tbh)) {return; } /// @note: early break out for blur and preview nodes
225         if (this->m_numberOfChunks == 0) {return; } /// @note: early break out
226         unsigned int chunkNumber;
227
228         this->m_chunksFinished = 0;
229         this->m_bTree = bTree;
230         unsigned int index;
231         unsigned int *chunkOrder = (unsigned int *)MEM_mallocN(sizeof(unsigned int) * this->m_numberOfChunks, __func__);
232
233         for (chunkNumber = 0; chunkNumber < this->m_numberOfChunks; chunkNumber++) {
234                 chunkOrder[chunkNumber] = chunkNumber;
235         }
236         NodeOperation *operation = this->getOutputNodeOperation();
237         float centerX = 0.5;
238         float centerY = 0.5;
239         OrderOfChunks chunkorder = COM_ORDER_OF_CHUNKS_DEFAULT;
240
241         if (operation->isViewerOperation()) {
242                 ViewerBaseOperation *viewer = (ViewerBaseOperation *)operation;
243                 centerX = viewer->getCenterX();
244                 centerY = viewer->getCenterY();
245                 chunkorder = viewer->getChunkOrder();
246         }
247
248         switch (chunkorder) {
249                 case COM_TO_RANDOM:
250                         for (index = 0; index < 2 * this->m_numberOfChunks; index++) {
251                                 int index1 = rand() % this->m_numberOfChunks;
252                                 int index2 = rand() % this->m_numberOfChunks;
253                                 int s = chunkOrder[index1];
254                                 chunkOrder[index1] = chunkOrder[index2];
255                                 chunkOrder[index2] = s;
256                         }
257                         break;
258                 case COM_TO_CENTER_OUT:
259                 {
260                         ChunkOrderHotspot *hotspots[1];
261                         hotspots[0] = new ChunkOrderHotspot(this->m_width * centerX, this->m_height * centerY, 0.0f);
262                         rcti rect;
263                         ChunkOrder *chunkOrders = (ChunkOrder *)MEM_mallocN(sizeof(ChunkOrder) * this->m_numberOfChunks, __func__);
264                         for (index = 0; index < this->m_numberOfChunks; index++) {
265                                 determineChunkRect(&rect, index);
266                                 chunkOrders[index].setChunkNumber(index);
267                                 chunkOrders[index].setX(rect.xmin);
268                                 chunkOrders[index].setY(rect.ymin);
269                                 chunkOrders[index].determineDistance(hotspots, 1);
270                         }
271
272                         sort(&chunkOrders[0], &chunkOrders[this->m_numberOfChunks - 1]);
273                         for (index = 0; index < this->m_numberOfChunks; index++) {
274                                 chunkOrder[index] = chunkOrders[index].getChunkNumber();
275                         }
276
277                         delete hotspots[0];
278                         MEM_freeN(chunkOrders);
279                 }
280                 break;
281                 case COM_TO_RULE_OF_THIRDS:
282                 {
283                         ChunkOrderHotspot *hotspots[9];
284                         unsigned int tx = this->m_width / 6;
285                         unsigned int ty = this->m_height / 6;
286                         unsigned int mx = this->m_width / 2;
287                         unsigned int my = this->m_height / 2;
288                         unsigned int bx = mx + 2 * tx;
289                         unsigned int by = my + 2 * ty;
290
291                         float addition = this->m_numberOfChunks / COM_RULE_OF_THIRDS_DIVIDER;
292                         hotspots[0] = new ChunkOrderHotspot(mx, my, addition * 0);
293                         hotspots[1] = new ChunkOrderHotspot(tx, my, addition * 1);
294                         hotspots[2] = new ChunkOrderHotspot(bx, my, addition * 2);
295                         hotspots[3] = new ChunkOrderHotspot(bx, by, addition * 3);
296                         hotspots[4] = new ChunkOrderHotspot(tx, ty, addition * 4);
297                         hotspots[5] = new ChunkOrderHotspot(bx, ty, addition * 5);
298                         hotspots[6] = new ChunkOrderHotspot(tx, by, addition * 6);
299                         hotspots[7] = new ChunkOrderHotspot(mx, ty, addition * 7);
300                         hotspots[8] = new ChunkOrderHotspot(mx, by, addition * 8);
301                         rcti rect;
302                         ChunkOrder *chunkOrders = (ChunkOrder *)MEM_mallocN(sizeof(ChunkOrder) * this->m_numberOfChunks, __func__);
303                         for (index = 0; index < this->m_numberOfChunks; index++) {
304                                 determineChunkRect(&rect, index);
305                                 chunkOrders[index].setChunkNumber(index);
306                                 chunkOrders[index].setX(rect.xmin);
307                                 chunkOrders[index].setY(rect.ymin);
308                                 chunkOrders[index].determineDistance(hotspots, 9);
309                         }
310
311                         sort(&chunkOrders[0], &chunkOrders[this->m_numberOfChunks]);
312
313                         for (index = 0; index < this->m_numberOfChunks; index++) {
314                                 chunkOrder[index] = chunkOrders[index].getChunkNumber();
315                         }
316
317                         delete hotspots[0];
318                         delete hotspots[1];
319                         delete hotspots[2];
320                         delete hotspots[3];
321                         delete hotspots[4];
322                         delete hotspots[5];
323                         delete hotspots[6];
324                         delete hotspots[7];
325                         delete hotspots[8];
326                         MEM_freeN(chunkOrders);
327                 }
328                 break;
329                 case COM_TO_TOP_DOWN:
330                 default:
331                         break;
332         }
333
334         bool breaked = false;
335         bool finished = false;
336         unsigned int startIndex = 0;
337         const int maxNumberEvaluated = BLI_system_thread_count() * 2;
338
339         while (!finished && !breaked) {
340                 bool startEvaluated = false;
341                 finished = true;
342                 int numberEvaluated = 0;
343
344                 for (index = startIndex; index < this->m_numberOfChunks && numberEvaluated < maxNumberEvaluated; index++) {
345                         chunkNumber = chunkOrder[index];
346                         int yChunk = chunkNumber / this->m_numberOfXChunks;
347                         int xChunk = chunkNumber - (yChunk * this->m_numberOfXChunks);
348                         const ChunkExecutionState state = this->m_chunkExecutionStates[chunkNumber];
349                         if (state == COM_ES_NOT_SCHEDULED) {
350                                 scheduleChunkWhenPossible(graph, xChunk, yChunk);
351                                 finished = false;
352                                 startEvaluated = true;
353                                 numberEvaluated++;
354
355                                 WM_main_add_notifier(NC_WINDOW | ND_DRAW, NULL);
356                         }
357                         else if (state == COM_ES_SCHEDULED) {
358                                 finished = false;
359                                 startEvaluated = true;
360                                 numberEvaluated++;
361                         }
362                         else if (state == COM_ES_EXECUTED && !startEvaluated) {
363                                 startIndex = index + 1;
364                         }
365                 }
366
367                 WorkScheduler::finish();
368
369                 if (bTree->test_break && bTree->test_break(bTree->tbh)) {
370                         breaked = true;
371                 }
372         }
373
374         MEM_freeN(chunkOrder);
375 }
376
377 MemoryBuffer **ExecutionGroup::getInputBuffersOpenCL(int chunkNumber)
378 {
379         rcti rect;
380         vector<MemoryProxy *> memoryproxies;
381         unsigned int index;
382         determineChunkRect(&rect, chunkNumber);
383
384         this->determineDependingMemoryProxies(&memoryproxies);
385         MemoryBuffer **memoryBuffers = (MemoryBuffer **)MEM_callocN(sizeof(MemoryBuffer *) * this->m_cachedMaxReadBufferOffset, __func__);
386         rcti output;
387         for (index = 0; index < this->m_cachedReadOperations.size(); index++) {
388                 ReadBufferOperation *readOperation = (ReadBufferOperation *)this->m_cachedReadOperations[index];
389                 MemoryProxy *memoryProxy = readOperation->getMemoryProxy();
390                 this->determineDependingAreaOfInterest(&rect, readOperation, &output);
391                 MemoryBuffer *memoryBuffer = memoryProxy->getExecutor()->constructConsolidatedMemoryBuffer(memoryProxy, &output);
392                 memoryBuffers[readOperation->getOffset()] = memoryBuffer;
393         }
394         return memoryBuffers;
395 }
396
397 MemoryBuffer *ExecutionGroup::constructConsolidatedMemoryBuffer(MemoryProxy *memoryProxy, rcti *rect)
398 {
399         MemoryBuffer *imageBuffer = memoryProxy->getBuffer();
400         MemoryBuffer *result = new MemoryBuffer(memoryProxy, rect);
401         result->copyContentFrom(imageBuffer);
402         return result;
403 }
404
405 void ExecutionGroup::finalizeChunkExecution(int chunkNumber, MemoryBuffer **memoryBuffers)
406 {
407         if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_SCHEDULED)
408                 this->m_chunkExecutionStates[chunkNumber] = COM_ES_EXECUTED;
409         
410         this->m_chunksFinished++;
411         if (memoryBuffers) {
412                 for (unsigned int index = 0; index < this->m_cachedMaxReadBufferOffset; index++) {
413                         MemoryBuffer *buffer = memoryBuffers[index];
414                         if (buffer) {
415                                 if (buffer->isTemporarily()) {
416                                         memoryBuffers[index] = NULL;
417                                         delete buffer;
418                                 }
419                         }
420                 }
421                 MEM_freeN(memoryBuffers);
422         }
423         if (this->m_bTree) {
424                 // status report is only performed for top level Execution Groups.
425                 float progress = this->m_chunksFinished;
426                 progress /= this->m_numberOfChunks;
427                 this->m_bTree->progress(this->m_bTree->prh, progress);
428         }
429 }
430
431 inline void ExecutionGroup::determineChunkRect(rcti *rect, const unsigned int xChunk, const unsigned int yChunk) const
432 {
433         if (this->m_singleThreaded) {
434                 BLI_rcti_init(rect, 0, this->m_width, 0, this->m_height);
435         }
436         else {
437                 const unsigned int minx = xChunk * this->m_chunkSize;
438                 const unsigned int miny = yChunk * this->m_chunkSize;
439                 BLI_rcti_init(rect, minx, min(minx + this->m_chunkSize, this->m_width), miny, min(miny + this->m_chunkSize, this->m_height));
440         }
441 }
442
443 void ExecutionGroup::determineChunkRect(rcti *rect, const unsigned int chunkNumber) const
444 {
445         const unsigned int yChunk = chunkNumber / this->m_numberOfXChunks;
446         const unsigned int xChunk = chunkNumber - (yChunk * this->m_numberOfXChunks);
447         determineChunkRect(rect, xChunk, yChunk);
448 }
449
450 MemoryBuffer *ExecutionGroup::allocateOutputBuffer(int chunkNumber, rcti *rect)
451 {
452         // we asume that this method is only called from complex execution groups.
453         NodeOperation *operation = this->getOutputNodeOperation();
454         if (operation->isWriteBufferOperation()) {
455                 WriteBufferOperation *writeOperation = (WriteBufferOperation *)operation;
456                 MemoryBuffer *buffer = new MemoryBuffer(writeOperation->getMemoryProxy(), rect);
457                 return buffer;
458         }
459         return NULL;
460 }
461
462
463 bool ExecutionGroup::scheduleAreaWhenPossible(ExecutionSystem *graph, rcti *area)
464 {
465         if (this->m_singleThreaded) {
466                 return scheduleChunkWhenPossible(graph, 0, 0);
467         }
468         // find all chunks inside the rect
469         // determine minxchunk, minychunk, maxxchunk, maxychunk where x and y are chunknumbers
470
471         float chunkSizef = this->m_chunkSize;
472
473         int indexx, indexy;
474         int minxchunk = floor(area->xmin / chunkSizef);
475         int maxxchunk = ceil((area->xmax - 1) / chunkSizef);
476         int minychunk = floor(area->ymin / chunkSizef);
477         int maxychunk = ceil((area->ymax - 1) / chunkSizef);
478         minxchunk = MAX2(minxchunk, 0);
479         minychunk = MAX2(minychunk, 0);
480         maxxchunk = MIN2(maxxchunk, this->m_numberOfXChunks);
481         maxychunk = MIN2(maxychunk, this->m_numberOfYChunks);
482
483         bool result = true;
484         for (indexx = minxchunk; indexx < maxxchunk; indexx++) {
485                 for (indexy = minychunk; indexy < maxychunk; indexy++) {
486                         if (!scheduleChunkWhenPossible(graph, indexx, indexy)) {
487                                 result = false;
488                         }
489                 }
490         }
491
492         return result;
493 }
494
495 bool ExecutionGroup::scheduleChunk(unsigned int chunkNumber)
496 {
497         if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_NOT_SCHEDULED) {
498                 this->m_chunkExecutionStates[chunkNumber] = COM_ES_SCHEDULED;
499                 WorkScheduler::schedule(this, chunkNumber);
500                 return true;
501         }
502         return false;
503 }
504
505 bool ExecutionGroup::scheduleChunkWhenPossible(ExecutionSystem *graph, int xChunk, int yChunk)
506 {
507         if (xChunk < 0 || xChunk >= (int)this->m_numberOfXChunks) {
508                 return true;
509         }
510         if (yChunk < 0 || yChunk >= (int)this->m_numberOfYChunks) {
511                 return true;
512         }
513         int chunkNumber = yChunk * this->m_numberOfXChunks + xChunk;
514         // chunk is already executed
515         if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_EXECUTED) {
516                 return true;
517         }
518
519         // chunk is scheduled, but not executed
520         if (this->m_chunkExecutionStates[chunkNumber] == COM_ES_SCHEDULED) {
521                 return false;
522         }
523
524         // chunk is nor executed nor scheduled.
525         vector<MemoryProxy *> memoryProxies;
526         this->determineDependingMemoryProxies(&memoryProxies);
527
528         rcti rect;
529         determineChunkRect(&rect, xChunk, yChunk);
530         unsigned int index;
531         bool canBeExecuted = true;
532         rcti area;
533
534         for (index = 0; index < this->m_cachedReadOperations.size(); index++) {
535                 ReadBufferOperation *readOperation = (ReadBufferOperation *)this->m_cachedReadOperations[index];
536                 BLI_rcti_init(&area, 0, 0, 0, 0);
537                 MemoryProxy *memoryProxy = memoryProxies[index];
538                 determineDependingAreaOfInterest(&rect, readOperation, &area);
539                 ExecutionGroup *group = memoryProxy->getExecutor();
540
541                 if (group != NULL) {
542                         if (!group->scheduleAreaWhenPossible(graph, &area)) {
543                                 canBeExecuted = false;
544                         }
545                 }
546                 else {
547                         throw "ERROR";
548                 }
549         }
550
551         if (canBeExecuted) {
552                 scheduleChunk(chunkNumber);
553         }
554
555         return false;
556 }
557
558 void ExecutionGroup::determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output)
559 {
560         this->getOutputNodeOperation()->determineDependingAreaOfInterest(input, readOperation, output);
561 }
562
563 void ExecutionGroup::determineDependingMemoryProxies(vector<MemoryProxy *> *memoryProxies)
564 {
565         unsigned int index;
566         for (index = 0; index < this->m_cachedReadOperations.size(); index++) {
567                 ReadBufferOperation *readOperation = (ReadBufferOperation *) this->m_cachedReadOperations[index];
568                 memoryProxies->push_back(readOperation->getMemoryProxy());
569         }
570 }
571
572 bool ExecutionGroup::isOpenCL()
573 {
574         return this->m_openCL;
575 }