doxygen: add newline after \file
[blender.git] / source / blender / depsgraph / intern / eval / deg_eval.cc
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  *
16  * The Original Code is Copyright (C) 2013 Blender Foundation.
17  * All rights reserved.
18  */
19
20 /** \file
21  * \ingroup depsgraph
22  *
23  * Evaluation engine entrypoints for Depsgraph Engine.
24  */
25
26 #include "intern/eval/deg_eval.h"
27
28 #include "PIL_time.h"
29
30 #include "BLI_compiler_attrs.h"
31 #include "BLI_utildefines.h"
32 #include "BLI_task.h"
33 #include "BLI_ghash.h"
34
35 #include "BKE_global.h"
36
37 #include "DNA_object_types.h"
38 #include "DNA_scene_types.h"
39
40 #include "DEG_depsgraph.h"
41 #include "DEG_depsgraph_query.h"
42
43 #include "atomic_ops.h"
44
45 #include "intern/eval/deg_eval_copy_on_write.h"
46 #include "intern/eval/deg_eval_flush.h"
47 #include "intern/eval/deg_eval_stats.h"
48 #include "intern/node/deg_node.h"
49 #include "intern/node/deg_node_component.h"
50 #include "intern/node/deg_node_id.h"
51 #include "intern/node/deg_node_operation.h"
52 #include "intern/node/deg_node_time.h"
53 #include "intern/depsgraph.h"
54
55 namespace DEG {
56
57 /* ********************** */
58 /* Evaluation Entrypoints */
59
60 /* Forward declarations. */
61 static void schedule_children(TaskPool *pool,
62                               Depsgraph *graph,
63                               OperationNode *node,
64                               const int thread_id);
65
66 struct DepsgraphEvalState {
67         Depsgraph *graph;
68         bool do_stats;
69         bool is_cow_stage;
70 };
71
72 static void deg_task_run_func(TaskPool *pool,
73                               void *taskdata,
74                               int thread_id)
75 {
76         void *userdata_v = BLI_task_pool_userdata(pool);
77         DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
78         OperationNode *node = (OperationNode *)taskdata;
79         /* Sanity checks. */
80         BLI_assert(!node->is_noop() && "NOOP nodes should not actually be scheduled");
81         /* Perform operation. */
82         if (state->do_stats) {
83                 const double start_time = PIL_check_seconds_timer();
84                 node->evaluate((::Depsgraph *)state->graph);
85                 node->stats.current_time += PIL_check_seconds_timer() - start_time;
86         }
87         else {
88                 node->evaluate((::Depsgraph *)state->graph);
89         }
90         /* Schedule children. */
91         BLI_task_pool_delayed_push_begin(pool, thread_id);
92         schedule_children(pool, state->graph, node, thread_id);
93         BLI_task_pool_delayed_push_end(pool, thread_id);
94 }
95
96 typedef struct CalculatePendingData {
97         Depsgraph *graph;
98 } CalculatePendingData;
99
100 static bool check_operation_node_visible(OperationNode *op_node)
101 {
102         const ComponentNode *comp_node = op_node->owner;
103         /* Special exception, copy on write component is to be always evaluated,
104          * to keep copied "database" in a consistent state. */
105         if (comp_node->type == NodeType::COPY_ON_WRITE) {
106                 return true;
107         }
108         return comp_node->affects_directly_visible;
109 }
110
111 static void calculate_pending_func(
112         void *__restrict data_v,
113         const int i,
114         const ParallelRangeTLS *__restrict /*tls*/)
115 {
116         CalculatePendingData *data = (CalculatePendingData *)data_v;
117         Depsgraph *graph = data->graph;
118         OperationNode *node = graph->operations[i];
119         /* Update counters, applies for both visible and invisible IDs. */
120         node->num_links_pending = 0;
121         node->scheduled = false;
122         /* Invisible IDs requires no pending operations. */
123         if (!check_operation_node_visible(node)) {
124                 return;
125         }
126         /* No need to bother with anything if node is not tagged for update. */
127         if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
128                 return;
129         }
130         for (Relation *rel : node->inlinks) {
131                 if (rel->from->type == NodeType::OPERATION &&
132                     (rel->flag & RELATION_FLAG_CYCLIC) == 0)
133                 {
134                         OperationNode *from = (OperationNode *)rel->from;
135                         /* TODO(sergey): This is how old layer system was checking for the
136                          * calculation, but how is it possible that visible object depends
137                          * on an invisible? This is something what is prohibited after
138                          * deg_graph_build_flush_layers(). */
139                         if (!check_operation_node_visible(from)) {
140                                 continue;
141                         }
142                         /* No need to vait for operation which is up to date. */
143                         if ((from->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
144                                 continue;
145                         }
146                         ++node->num_links_pending;
147                 }
148         }
149 }
150
151 static void calculate_pending_parents(Depsgraph *graph)
152 {
153         const int num_operations = graph->operations.size();
154         CalculatePendingData data;
155         data.graph = graph;
156         ParallelRangeSettings settings;
157         BLI_parallel_range_settings_defaults(&settings);
158         settings.min_iter_per_thread = 1024;
159         BLI_task_parallel_range(0,
160                                 num_operations,
161                                 &data,
162                                 calculate_pending_func,
163                                 &settings);
164 }
165
166 static void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)
167 {
168         const bool do_stats = state->do_stats;
169         calculate_pending_parents(graph);
170         /* Clear tags and other things which needs to be clear. */
171         for (OperationNode *node : graph->operations) {
172                 if (do_stats) {
173                         node->stats.reset_current();
174                 }
175         }
176 }
177
178 /* Schedule a node if it needs evaluation.
179  *   dec_parents: Decrement pending parents count, true when child nodes are
180  *                scheduled after a task has been completed.
181  */
182 static void schedule_node(TaskPool *pool, Depsgraph *graph,
183                           OperationNode *node, bool dec_parents,
184                           const int thread_id)
185 {
186         /* No need to schedule nodes of invisible ID. */
187         if (!check_operation_node_visible(node)) {
188                 return;
189         }
190         /* No need to schedule operations which are not tagged for update, they are
191          * considered to be up to date. */
192         if ((node->flag & DEPSOP_FLAG_NEEDS_UPDATE) == 0) {
193                 return;
194         }
195         /* TODO(sergey): This is not strictly speaking safe to read
196          * num_links_pending. */
197         if (dec_parents) {
198                 BLI_assert(node->num_links_pending > 0);
199                 atomic_sub_and_fetch_uint32(&node->num_links_pending, 1);
200         }
201         /* Cal not schedule operation while its dependencies are not yet
202          * evaluated. */
203         if (node->num_links_pending != 0) {
204                 return;
205         }
206         /* During the COW stage only schedule COW nodes. */
207         DepsgraphEvalState *state = (DepsgraphEvalState *)BLI_task_pool_userdata(pool);
208         if (state->is_cow_stage) {
209                 if (node->owner->type != NodeType::COPY_ON_WRITE) {
210                         return;
211                 }
212         }
213         else {
214                 BLI_assert(node->scheduled || node->owner->type != NodeType::COPY_ON_WRITE);
215         }
216         /* Actually schedule the node. */
217         bool is_scheduled = atomic_fetch_and_or_uint8(
218                 (uint8_t *)&node->scheduled, (uint8_t)true);
219         if (!is_scheduled) {
220                 if (node->is_noop()) {
221                         /* skip NOOP node, schedule children right away */
222                         schedule_children(pool, graph, node, thread_id);
223                 }
224                 else {
225                         /* children are scheduled once this task is completed */
226                         BLI_task_pool_push_from_thread(pool,
227                                                        deg_task_run_func,
228                                                        node,
229                                                        false,
230                                                        TASK_PRIORITY_HIGH,
231                                                        thread_id);
232                 }
233         }
234 }
235
236 static void schedule_graph(TaskPool *pool, Depsgraph *graph)
237 {
238         for (OperationNode *node : graph->operations) {
239                 schedule_node(pool, graph, node, false, 0);
240         }
241 }
242
243 static void schedule_children(TaskPool *pool,
244                               Depsgraph *graph,
245                               OperationNode *node,
246                               const int thread_id)
247 {
248         for (Relation *rel : node->outlinks) {
249                 OperationNode *child = (OperationNode *)rel->to;
250                 BLI_assert(child->type == NodeType::OPERATION);
251                 if (child->scheduled) {
252                         /* Happens when having cyclic dependencies. */
253                         continue;
254                 }
255                 schedule_node(pool,
256                               graph,
257                               child,
258                               (rel->flag & RELATION_FLAG_CYCLIC) == 0,
259                               thread_id);
260         }
261 }
262
263 static void depsgraph_ensure_view_layer(Depsgraph *graph)
264 {
265         /* We update copy-on-write scene in the following cases:
266          * - It was not expanded yet.
267          * - It was tagged for update of CoW component.
268          * This allows us to have proper view layer pointer. */
269         Scene *scene_cow = graph->scene_cow;
270         if (!deg_copy_on_write_is_expanded(&scene_cow->id) ||
271              scene_cow->id.recalc & ID_RECALC_COPY_ON_WRITE)
272         {
273                 const IDNode *id_node = graph->find_id_node(&graph->scene->id);
274                 deg_update_copy_on_write_datablock(graph, id_node);
275         }
276 }
277
278 /**
279  * Evaluate all nodes tagged for updating,
280  * \warning This is usually done as part of main loop, but may also be
281  * called from frame-change update.
282  *
283  * \note Time sources should be all valid!
284  */
285 void deg_evaluate_on_refresh(Depsgraph *graph)
286 {
287         /* Nothing to update, early out. */
288         if (BLI_gset_len(graph->entry_tags) == 0) {
289                 return;
290         }
291         const bool do_time_debug = ((G.debug & G_DEBUG_DEPSGRAPH_TIME) != 0);
292         const double start_time = do_time_debug ? PIL_check_seconds_timer() : 0;
293         graph->debug_is_evaluating = true;
294         depsgraph_ensure_view_layer(graph);
295         /* Set up evaluation state. */
296         DepsgraphEvalState state;
297         state.graph = graph;
298         state.do_stats = do_time_debug;
299         /* Set up task scheduler and pull for threaded evaluation. */
300         TaskScheduler *task_scheduler;
301         bool need_free_scheduler;
302         if (G.debug & G_DEBUG_DEPSGRAPH_NO_THREADS) {
303                 task_scheduler = BLI_task_scheduler_create(1);
304                 need_free_scheduler = true;
305         }
306         else {
307                 task_scheduler = BLI_task_scheduler_get();
308                 need_free_scheduler = false;
309         }
310         TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, &state);
311         /* Prepare all nodes for evaluation. */
312         initialize_execution(&state, graph);
313         /* Do actual evaluation now. */
314         /* First, process all Copy-On-Write nodes. */
315         state.is_cow_stage = true;
316         schedule_graph(task_pool, graph);
317         BLI_task_pool_work_wait_and_reset(task_pool);
318         /* After that, process all other nodes. */
319         state.is_cow_stage = false;
320         schedule_graph(task_pool, graph);
321         BLI_task_pool_work_and_wait(task_pool);
322         BLI_task_pool_free(task_pool);
323         /* Finalize statistics gathering. This is because we only gather single
324          * operation timing here, without aggregating anything to avoid any extra
325          * synchronization. */
326         if (state.do_stats) {
327                 deg_eval_stats_aggregate(graph);
328         }
329         /* Clear any uncleared tags - just in case. */
330         deg_graph_clear_tags(graph);
331         if (need_free_scheduler) {
332                 BLI_task_scheduler_free(task_scheduler);
333         }
334         graph->debug_is_evaluating = false;
335         if (do_time_debug) {
336                 printf("Depsgraph updated in %f seconds.\n",
337                        PIL_check_seconds_timer() - start_time);
338         }
339 }
340
341 }  // namespace DEG