Cycles: svn merge -r41467:41531 ^/trunk/blender
[blender.git] / intern / cycles / device / device.cpp
1 /*
2  * Copyright 2011, Blender Foundation.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software Foundation,
16  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17  */
18
19 #include <stdlib.h>
20 #include <string.h>
21
22 #include "device.h"
23 #include "device_intern.h"
24
25 #include "util_cuda.h"
26 #include "util_debug.h"
27 #include "util_math.h"
28 #include "util_opencl.h"
29 #include "util_opengl.h"
30 #include "util_types.h"
31 #include "util_vector.h"
32
33 CCL_NAMESPACE_BEGIN
34
35 /* Device Task */
36
37 DeviceTask::DeviceTask(Type type_)
38 : type(type_), x(0), y(0), w(0), h(0), rng_state(0), rgba(0), buffer(0),
39   sample(0), resolution(0),
40   displace_input(0), displace_offset(0), displace_x(0), displace_w(0)
41 {
42 }
43
44 void DeviceTask::split(ThreadQueue<DeviceTask>& tasks, int num)
45 {
46         if(type == DISPLACE) {
47                 num = min(displace_w, num);
48
49                 for(int i = 0; i < num; i++) {
50                         int tx = displace_x + (displace_w/num)*i;
51                         int tw = (i == num-1)? displace_w - i*(displace_w/num): displace_w/num;
52
53                         DeviceTask task = *this;
54
55                         task.displace_x = tx;
56                         task.displace_w = tw;
57
58                         tasks.push(task);
59                 }
60         }
61         else {
62                 num = min(h, num);
63
64                 for(int i = 0; i < num; i++) {
65                         int ty = y + (h/num)*i;
66                         int th = (i == num-1)? h - i*(h/num): h/num;
67
68                         DeviceTask task = *this;
69
70                         task.y = ty;
71                         task.h = th;
72
73                         tasks.push(task);
74                 }
75         }
76 }
77
78 /* Device */
79
80 void Device::pixels_alloc(device_memory& mem)
81 {
82         mem_alloc(mem, MEM_READ_WRITE);
83 }
84
85 void Device::pixels_copy_from(device_memory& mem, int y, int w, int h)
86 {
87         mem_copy_from(mem, sizeof(uint8_t)*4*y*w, sizeof(uint8_t)*4*w*h);
88 }
89
90 void Device::pixels_free(device_memory& mem)
91 {
92         mem_free(mem);
93 }
94
95 void Device::draw_pixels(device_memory& rgba, int y, int w, int h, int width, int height, bool transparent)
96 {
97         pixels_copy_from(rgba, y, w, h);
98
99         if(transparent) {
100                 glEnable(GL_BLEND);
101                 glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
102         }
103
104         glPixelZoom((float)width/(float)w, (float)height/(float)h);
105         glRasterPos2f(0, y);
106
107         uint8_t *pixels = (uint8_t*)rgba.data_pointer;
108
109         /* for multi devices, this assumes the ineffecient method that we allocate
110            all pixels on the device even though we only render to a subset */
111         pixels += 4*y*w;
112
113         glDrawPixels(w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
114
115         glRasterPos2f(0.0f, 0.0f);
116         glPixelZoom(1.0f, 1.0f);
117
118         if(transparent)
119                 glDisable(GL_BLEND);
120 }
121
122 Device *Device::create(DeviceType type, bool background, int threads)
123 {
124         Device *device;
125
126         switch(type) {
127                 case DEVICE_CPU:
128                         device = device_cpu_create(threads);
129                         break;
130 #ifdef WITH_CUDA
131                 case DEVICE_CUDA:
132                         if(cuLibraryInit())
133                                 device = device_cuda_create(background);
134                         else
135                                 device = NULL;
136                         break;
137 #endif
138 #ifdef WITH_MULTI
139                 case DEVICE_MULTI:
140                         device = device_multi_create(background);
141                         break;
142 #endif
143 #ifdef WITH_NETWORK
144                 case DEVICE_NETWORK:
145                         device = device_network_create("127.0.0.1");
146                         break;
147 #endif
148 #ifdef WITH_OPENCL
149                 case DEVICE_OPENCL:
150                         if(clLibraryInit())
151                                 device = device_opencl_create(background);
152                         else
153                                 device = NULL;
154                         break;
155 #endif
156                 default:
157                         return NULL;
158         }
159
160         return device;
161 }
162
163 DeviceType Device::type_from_string(const char *name)
164 {
165         if(strcmp(name, "cpu") == 0)
166                 return DEVICE_CPU;
167         else if(strcmp(name, "cuda") == 0)
168                 return DEVICE_CUDA;
169         else if(strcmp(name, "opencl") == 0)
170                 return DEVICE_OPENCL;
171         else if(strcmp(name, "network") == 0)
172                 return DEVICE_NETWORK;
173         else if(strcmp(name, "multi") == 0)
174                 return DEVICE_MULTI;
175         
176         return DEVICE_NONE;
177 }
178
179 string Device::string_from_type(DeviceType type)
180 {
181         if(type == DEVICE_CPU)
182                 return "cpu";
183         else if(type == DEVICE_CUDA)
184                 return "cuda";
185         else if(type == DEVICE_OPENCL)
186                 return "opencl";
187         else if(type == DEVICE_NETWORK)
188                 return "network";
189         else if(type == DEVICE_MULTI)
190                 return "multi";
191         
192         return "";
193 }
194
195 vector<DeviceType> Device::available_types()
196 {
197         vector<DeviceType> types;
198
199         types.push_back(DEVICE_CPU);
200
201 #ifdef WITH_CUDA
202         if(cuLibraryInit())
203                 types.push_back(DEVICE_CUDA);
204 #endif
205
206 #ifdef WITH_OPENCL
207         if(clLibraryInit())
208                 types.push_back(DEVICE_OPENCL);
209 #endif
210
211 #ifdef WITH_NETWORK
212         types.push_back(DEVICE_NETWORK);
213 #endif
214 #ifdef WITH_MULTI
215         types.push_back(DEVICE_MULTI);
216 #endif
217
218         return types;
219 }
220
221 CCL_NAMESPACE_END
222