01c5081770943fbed0290d3397700ffbb0420a1e
[blender.git] / intern / cycles / device / device.cpp
1 /*
2  * Copyright 2011, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18
19 #include <stdlib.h>
20 #include <string.h>
21
22 #include "device.h"
23 #include "device_intern.h"
24
25 #include "util_cuda.h"
26 #include "util_debug.h"
27 #include "util_math.h"
28 #include "util_opencl.h"
29 #include "util_opengl.h"
30 #include "util_types.h"
31 #include "util_vector.h"
32
33 CCL_NAMESPACE_BEGIN
34
35 /* Device Task */
36
37 DeviceTask::DeviceTask(Type type_)
38 : type(type_), x(0), y(0), w(0), h(0), rng_state(0), rgba(0), buffer(0),
39   sample(0), resolution(0),
40   displace_input(0), displace_offset(0), displace_x(0), displace_w(0)
41 {
42 }
43
44 void DeviceTask::split(ThreadQueue<DeviceTask>& tasks, int num)
45 {
46         if(type == DISPLACE) {
47                 num = min(displace_w, num);
48
49                 for(int i = 0; i < num; i++) {
50                         int tx = displace_x + (displace_w/num)*i;
51                         int tw = (i == num-1)? displace_w - i*(displace_w/num): displace_w/num;
52
53                         DeviceTask task = *this;
54
55                         task.displace_x = tx;
56                         task.displace_w = tw;
57
58                         tasks.push(task);
59                 }
60         }
61         else {
62                 num = min(h, num);
63
64                 for(int i = 0; i < num; i++) {
65                         int ty = y + (h/num)*i;
66                         int th = (i == num-1)? h - i*(h/num): h/num;
67
68                         DeviceTask task = *this;
69
70                         task.y = ty;
71                         task.h = th;
72
73                         tasks.push(task);
74                 }
75         }
76 }
77
78 /* Device */
79
80 void Device::pixels_alloc(device_memory& mem)
81 {
82         mem_alloc(mem, MEM_READ_WRITE);
83 }
84
85 void Device::pixels_copy_from(device_memory& mem, int y, int w, int h)
86 {
87         mem_copy_from(mem, sizeof(uint8_t)*4*y*w, sizeof(uint8_t)*4*w*h);
88 }
89
90 void Device::pixels_free(device_memory& mem)
91 {
92         mem_free(mem);
93 }
94
95 void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int width, int height, bool transparent)
96 {
97         pixels_copy_from(rgba, y, w, h);
98
99         if(transparent) {
100                 glEnable(GL_BLEND);
101                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
102         }
103
104         glPixelZoom((float)width/(float)w, (float)height/(float)h);
105         glRasterPos2f(0, y);
106
107         uint8_t *pixels = (uint8_t*)rgba.data_pointer;
108
109         /* for multi devices, this assumes the ineffecient method that we allocate
110            all pixels on the device even though we only render to a subset */
111         pixels += 4*y*w;
112
113         glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
114
115         glRasterPos2f(0.0f, 0.0f);
116         glPixelZoom(1.0f, 1.0f);
117
118         if(transparent)
119                 glDisable(GL_BLEND);
120 }
121
122 Device *Device::create(DeviceType type, bool background, int threads)
123 {
124         Device *device;
125
126         switch(type) {
127                 case DEVICE_CPU:
128                         device = device_cpu_create(threads);
129                         break;
130 #ifdef WITH_CUDA
131                 case DEVICE_CUDA:
132                         if(cuLibraryInit())
133                                 device = device_cuda_create(background);
134                         else
135                                 device = NULL;
136                         break;
137 #endif
138 #ifdef WITH_MULTI
139                 case DEVICE_MULTI:
140                         device = device_multi_create(background);
141                         break;
142 #endif
143 #ifdef WITH_NETWORK
144                 case DEVICE_NETWORK:
145                         device = device_network_create("127.0.0.1");
146                         break;
147 #endif
148 #ifdef WITH_OPENCL
149                 case DEVICE_OPENCL:
150                         if(clLibraryInit())
151                                 device = device_opencl_create(background);
152                         else
153                                 device = NULL;
154                         break;
155 #endif
156                 default:
157                         return NULL;
158         }
159
160         device->device_type = type;
161
162         return device;
163 }
164
165 DeviceType Device::type_from_string(const char *name)
166 {
167         if(strcmp(name, "cpu") == 0)
168                 return DEVICE_CPU;
169         else if(strcmp(name, "cuda") == 0)
170                 return DEVICE_CUDA;
171         else if(strcmp(name, "opencl") == 0)
172                 return DEVICE_OPENCL;
173         else if(strcmp(name, "network") == 0)
174                 return DEVICE_NETWORK;
175         else if(strcmp(name, "multi") == 0)
176                 return DEVICE_MULTI;
177         
178         return DEVICE_NONE;
179 }
180
181 string Device::string_from_type(DeviceType type)
182 {
183         if(type == DEVICE_CPU)
184                 return "cpu";
185         else if(type == DEVICE_CUDA)
186                 return "cuda";
187         else if(type == DEVICE_OPENCL)
188                 return "opencl";
189         else if(type == DEVICE_NETWORK)
190                 return "network";
191         else if(type == DEVICE_MULTI)
192                 return "multi";
193         
194         return "";
195 }
196
197 vector<DeviceType> Device::available_types()
198 {
199         vector<DeviceType> types;
200
201         types.push_back(DEVICE_CPU);
202
203 #ifdef WITH_CUDA
204         if(cuLibraryInit())
205                 types.push_back(DEVICE_CUDA);
206 #endif
207
208 #ifdef WITH_OPENCL
209         if(clLibraryInit())
210                 types.push_back(DEVICE_OPENCL);
211 #endif
212
213 #ifdef WITH_NETWORK
214         types.push_back(DEVICE_NETWORK);
215 #endif
216 #ifdef WITH_MULTI
217         types.push_back(DEVICE_MULTI);
218 #endif
219
220         return types;
221 }
222
223 CCL_NAMESPACE_END
224