2 * ***** BEGIN GPL LICENSE BLOCK *****
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * The Original Code is Copyright (C) 2006 Blender Foundation.
19 * All rights reserved.
21 * The Original Code is: all of this file.
23 * Contributor(s): Campbell Barton, Alfredo de Greef, David Millan Escriva,
26 * ***** END GPL LICENSE BLOCK *****
29 /** \file blender/nodes/shader/nodes/node_shader_common.c
34 #include "DNA_node_types.h"
38 #include "node_shader_util.h"
39 #include "node_common.h"
40 #include "node_exec.h"
42 static void copy_stack(bNodeStack *to, bNodeStack *from)
45 copy_v4_v4(to->vec, from->vec);
46 to->data = from->data;
47 to->datatype = from->datatype;
49 /* tag as copy to prevent freeing */
54 static void move_stack(bNodeStack *to, bNodeStack *from)
57 copy_v4_v4(to->vec, from->vec);
58 to->data = from->data;
59 to->datatype = from->datatype;
60 to->is_copy = from->is_copy;
71 static void *group_initexec(bNode *node)
73 bNodeTree *ngroup= (bNodeTree*)node->id;
79 /* initialize the internal node tree execution */
80 exec = ntreeShaderBeginExecTree(ngroup, 0);
85 static void group_freeexec(bNode *UNUSED(node), void *nodedata)
87 bNodeTreeExec*gexec= (bNodeTreeExec*)nodedata;
89 ntreeShaderEndExecTree(gexec, 0);
92 /* Copy inputs to the internal stack.
94 static void group_copy_inputs(bNode *node, bNodeStack **in, bNodeStack *gstack)
99 for (sock=node->inputs.first, a=0; sock; sock=sock->next, ++a) {
100 if (sock->groupsock) {
101 ns = node_get_socket_stack(gstack, sock->groupsock);
102 copy_stack(ns, in[a]);
107 /* Copy internal results to the external outputs.
109 static void group_move_outputs(bNode *node, bNodeStack **out, bNodeStack *gstack)
114 for (sock=node->outputs.first, a=0; sock; sock=sock->next, ++a) {
115 if (sock->groupsock) {
116 ns = node_get_socket_stack(gstack, sock->groupsock);
117 move_stack(out[a], ns);
122 static void group_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out)
124 bNodeTreeExec *exec= (bNodeTreeExec*)nodedata;
125 bNodeThreadStack *nts;
130 /* XXX same behavior as trunk: all nodes inside group are executed.
131 * it's stupid, but just makes it work. compo redesign will do this better.
135 for (inode=exec->nodetree->nodes.first; inode; inode=inode->next)
136 inode->need_exec = 1;
139 nts = ntreeGetThreadStack(exec, thread);
141 group_copy_inputs(node, in, nts->stack);
142 ntreeExecThreadNodes(exec, nts, data, thread);
143 group_move_outputs(node, out, nts->stack);
145 ntreeReleaseThreadStack(nts);
148 static void group_gpu_copy_inputs(bNode *node, GPUNodeStack *in, bNodeStack *gstack)
153 for (sock=node->inputs.first, a=0; sock; sock=sock->next, ++a) {
154 if (sock->groupsock) {
155 ns = node_get_socket_stack(gstack, sock->groupsock);
156 /* convert the external gpu stack back to internal node stack data */
157 node_data_from_gpu_stack(ns, &in[a]);
162 /* Copy internal results to the external outputs.
164 static void group_gpu_move_outputs(bNode *node, GPUNodeStack *out, bNodeStack *gstack)
169 for (sock=node->outputs.first, a=0; sock; sock=sock->next, ++a) {
170 if (sock->groupsock) {
171 ns = node_get_socket_stack(gstack, sock->groupsock);
172 /* convert the node stack data result back to gpu stack */
173 node_gpu_stack_from_data(&out[a], sock->type, ns);
178 static int gpu_group_execute(GPUMaterial *mat, bNode *node, void *nodedata, GPUNodeStack *in, GPUNodeStack *out)
180 bNodeTreeExec *exec= (bNodeTreeExec*)nodedata;
182 group_gpu_copy_inputs(node, in, exec->stack);
183 ntreeExecGPUNodes(exec, mat, (node->flag & NODE_GROUP_EDIT));
184 group_gpu_move_outputs(node, out, exec->stack);
189 void register_node_type_sh_group(bNodeTreeType *ttype)
191 static bNodeType ntype;
193 node_type_base(ttype, &ntype, NODE_GROUP, "Group", NODE_CLASS_GROUP, NODE_OPTIONS|NODE_CONST_OUTPUT);
194 node_type_socket_templates(&ntype, NULL, NULL);
195 node_type_size(&ntype, 120, 60, 200);
196 node_type_label(&ntype, node_group_label);
197 node_type_init(&ntype, node_group_init);
198 node_type_valid(&ntype, node_group_valid);
199 node_type_template(&ntype, node_group_template);
200 node_type_update(&ntype, NULL, node_group_verify);
201 node_type_group_edit(&ntype, node_group_edit_get, node_group_edit_set, node_group_edit_clear);
202 node_type_exec_new(&ntype, group_initexec, group_freeexec, group_execute);
203 node_type_gpu_ext(&ntype, gpu_group_execute);
205 nodeRegisterType(ttype, &ntype);
211 #if 0 /* XXX loop nodes don't work nicely with current trees */
212 static void forloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out)
214 bNodeTreeExec *exec= (bNodeTreeExec*)nodedata;
215 bNodeThreadStack *nts;
216 int iterations= (int)in[0]->vec[0];
221 /* XXX same behavior as trunk: all nodes inside group are executed.
222 * it's stupid, but just makes it work. compo redesign will do this better.
226 for (inode=exec->nodetree->nodes.first; inode; inode=inode->next)
227 inode->need_exec = 1;
230 nts = ntreeGetThreadStack(exec, thread);
232 /* "Iteration" socket */
233 sock = exec->nodetree->inputs.first;
234 ns = node_get_socket_stack(nts->stack, sock);
236 // group_copy_inputs(node, in, nts->stack);
237 for (iteration=0; iteration < iterations; ++iteration) {
238 /* first input contains current iteration counter */
239 ns->vec[0] = (float)iteration;
240 ns->vec[1]=ns->vec[2]=ns->vec[3] = 0.0f;
242 // if (iteration > 0)
243 // loop_init_iteration(exec->nodetree, nts->stack);
244 // ntreeExecThreadNodes(exec, nts, data, thread);
246 // loop_copy_outputs(node, in, out, exec->stack);
248 ntreeReleaseThreadStack(nts);
251 void register_node_type_sh_forloop(bNodeTreeType *ttype)
253 static bNodeType ntype;
255 node_type_base(ttype, &ntype, NODE_FORLOOP, "For", NODE_CLASS_GROUP, NODE_OPTIONS);
256 node_type_socket_templates(&ntype, NULL, NULL);
257 node_type_size(&ntype, 120, 60, 200);
258 node_type_label(&ntype, node_group_label);
259 node_type_init(&ntype, node_forloop_init);
260 node_type_valid(&ntype, node_group_valid);
261 node_type_template(&ntype, node_forloop_template);
262 node_type_update(&ntype, NULL, node_group_verify);
263 node_type_tree(&ntype, node_forloop_init_tree, node_loop_update_tree);
264 node_type_group_edit(&ntype, node_group_edit_get, node_group_edit_set, node_group_edit_clear);
265 node_type_exec_new(&ntype, group_initexec, group_freeexec, forloop_execute);
267 nodeRegisterType(ttype, &ntype);
271 /**** WHILE LOOP ****/
273 #if 0 /* XXX loop nodes don't work nicely with current trees */
274 static void whileloop_execute(void *data, int thread, struct bNode *node, void *nodedata, struct bNodeStack **in, struct bNodeStack **out)
276 bNodeTreeExec *exec= (bNodeTreeExec*)nodedata;
277 bNodeThreadStack *nts;
278 int condition= (in[0]->vec[0] > 0.0f);
283 /* XXX same behavior as trunk: all nodes inside group are executed.
284 * it's stupid, but just makes it work. compo redesign will do this better.
288 for (inode=exec->nodetree->nodes.first; inode; inode=inode->next)
289 inode->need_exec = 1;
292 nts = ntreeGetThreadStack(exec, thread);
294 /* "Condition" socket */
295 sock = exec->nodetree->outputs.first;
296 ns = node_get_socket_stack(nts->stack, sock);
299 // group_copy_inputs(node, in, nts->stack);
300 while (condition && iteration < node->custom1) {
301 // if (iteration > 0)
302 // loop_init_iteration(exec->nodetree, nts->stack);
303 // ntreeExecThreadNodes(exec, nts, data, thread);
305 condition = (ns->vec[0] > 0.0f);
308 // loop_copy_outputs(node, in, out, exec->stack);
310 ntreeReleaseThreadStack(nts);
313 void register_node_type_sh_whileloop(bNodeTreeType *ttype)
315 static bNodeType ntype;
317 node_type_base(ttype, &ntype, NODE_WHILELOOP, "While", NODE_CLASS_GROUP, NODE_OPTIONS);
318 node_type_socket_templates(&ntype, NULL, NULL);
319 node_type_size(&ntype, 120, 60, 200);
320 node_type_label(&ntype, node_group_label);
321 node_type_init(&ntype, node_whileloop_init);
322 node_type_valid(&ntype, node_group_valid);
323 node_type_template(&ntype, node_whileloop_template);
324 node_type_update(&ntype, NULL, node_group_verify);
325 node_type_tree(&ntype, node_whileloop_init_tree, node_loop_update_tree);
326 node_type_group_edit(&ntype, node_group_edit_get, node_group_edit_set, node_group_edit_clear);
327 node_type_exec_new(&ntype, group_initexec, group_freeexec, whileloop_execute);
329 nodeRegisterType(ttype, &ntype);