Cycles: svn merge -r37333:37694 https://svn.blender.org/svnroot/bf-blender/trunk
[blender.git] / intern / cycles / device / device.cpp
1 /*
2  * Copyright 2011, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18
19 #include <stdlib.h>
20 #include <string.h>
21
22 #include "device.h"
23 #include "device_intern.h"
24
25 #include "util_cuda.h"
26 #include "util_debug.h"
27 #include "util_opengl.h"
28 #include "util_types.h"
29 #include "util_vector.h"
30
31 CCL_NAMESPACE_BEGIN
32
33 /* Device Task */
34
35 DeviceTask::DeviceTask(Type type_)
36 : type(type_), x(0), y(0), w(0), h(0), rng_state(0), rgba(0), buffer(0),
37   pass(0), resolution(0),
38   displace_input(0), displace_offset(0), displace_x(0), displace_w(0)
39 {
40 }
41
42 void DeviceTask::split(ThreadQueue<DeviceTask>& tasks, int num)
43 {
44         if(type == DISPLACE) {
45                 for(int i = 0; i < num; i++) {
46                         int tx = displace_x + (displace_w/num)*i;
47                         int tw = (i == num-1)? displace_w - i*(displace_w/num): displace_w/num;
48
49                         DeviceTask task = *this;
50
51                         task.displace_x = tx;
52                         task.displace_w = tw;
53
54                         tasks.push(task);
55                 }
56         }
57         else {
58                 for(int i = 0; i < num; i++) {
59                         int ty = y + (h/num)*i;
60                         int th = (i == num-1)? h - i*(h/num): h/num;
61
62                         DeviceTask task = *this;
63
64                         task.y = ty;
65                         task.h = th;
66
67                         tasks.push(task);
68                 }
69         }
70 }
71
72 /* Device */
73
74 void Device::pixels_alloc(device_memory& mem)
75 {
76         mem_alloc(mem, MEM_READ_WRITE);
77 }
78
79 void Device::pixels_copy_from(device_memory& mem, int y, int w, int h)
80 {
81         mem_copy_from(mem, sizeof(uchar)*4*y*w, sizeof(uchar)*4*w*h);
82 }
83
84 void Device::pixels_free(device_memory& mem)
85 {
86         mem_free(mem);
87 }
88
89 void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int width, int height)
90 {
91         pixels_copy_from(rgba, y, w, h);
92
93         glPixelZoom((float)width/(float)w, (float)height/(float)h);
94         glRasterPos2f(0, y);
95
96         glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, (void*)rgba.data_pointer);
97
98         glRasterPos2f(0.0f, 0.0f);
99         glPixelZoom(1.0f, 1.0f);
100 }
101
102 Device *Device::create(DeviceType type, bool background)
103 {
104         Device *device;
105
106         switch(type) {
107                 case DEVICE_CPU:
108                         device = device_cpu_create();
109                         break;
110 #ifdef WITH_CUDA
111                 case DEVICE_CUDA:
112                         if(cuLibraryInit())
113                                 device = device_cuda_create(background);
114                         else
115                                 device = NULL;
116                         break;
117 #endif
118 #ifdef WITH_MULTI
119                 case DEVICE_MULTI:
120                         device = device_multi_create(background);
121                         break;
122 #endif
123 #ifdef WITH_NETWORK
124                 case DEVICE_NETWORK:
125                         device = device_network_create("127.0.0.1");
126                         break;
127 #endif
128 #ifdef WITH_OPENCL
129                 case DEVICE_OPENCL:
130                         device = device_opencl_create(background);
131                         break;
132 #endif
133                 default:
134                         return NULL;
135         }
136
137         return device;
138 }
139
140 DeviceType Device::type_from_string(const char *name)
141 {
142         if(strcmp(name, "cpu") == 0)
143                 return DEVICE_CPU;
144         else if(strcmp(name, "cuda") == 0)
145                 return DEVICE_CUDA;
146         else if(strcmp(name, "opencl") == 0)
147                 return DEVICE_OPENCL;
148         else if(strcmp(name, "network") == 0)
149                 return DEVICE_NETWORK;
150         else if(strcmp(name, "multi") == 0)
151                 return DEVICE_MULTI;
152         
153         return DEVICE_NONE;
154 }
155
156 string Device::string_from_type(DeviceType type)
157 {
158         if(type == DEVICE_CPU)
159                 return "cpu";
160         else if(type == DEVICE_CUDA)
161                 return "cuda";
162         else if(type == DEVICE_OPENCL)
163                 return "opencl";
164         else if(type == DEVICE_NETWORK)
165                 return "network";
166         else if(type == DEVICE_MULTI)
167                 return "multi";
168         
169         return "";
170 }
171
172 vector<DeviceType> Device::available_types()
173 {
174         vector<DeviceType> types;
175
176         types.push_back(DEVICE_CPU);
177
178 #ifdef WITH_CUDA
179         if(cuLibraryInit())
180                 types.push_back(DEVICE_CUDA);
181 #endif
182
183 #ifdef WITH_OPENCL
184         types.push_back(DEVICE_OPENCL);
185 #endif
186
187 #ifdef WITH_NETWORK
188         types.push_back(DEVICE_NETWORK);
189 #endif
190 #ifdef WITH_MULTI
191         types.push_back(DEVICE_MULTI);
192 #endif
193
194         return types;
195 }
196
197 CCL_NAMESPACE_END
198