Cycles: Remove few function arguments needed only for the split kernel
[blender.git] / intern / cycles / kernel / kernel_camera.h
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /* Perspective Camera */
20
21 ccl_device float2 camera_sample_aperture(KernelGlobals *kg, float u, float v)
22 {
23         float blades = kernel_data.cam.blades;
24         float2 bokeh;
25
26         if(blades == 0.0f) {
27                 /* sample disk */
28                 bokeh = concentric_sample_disk(u, v);
29         }
30         else {
31                 /* sample polygon */
32                 float rotation = kernel_data.cam.bladesrotation;
33                 bokeh = regular_polygon_sample(blades, rotation, u, v);
34         }
35
36         /* anamorphic lens bokeh */
37         bokeh.x *= kernel_data.cam.inv_aperture_ratio;
38
39         return bokeh;
40 }
41
42 ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
43 {
44         /* create ray form raster position */
45         Transform rastertocamera = kernel_data.cam.rastertocamera;
46         float3 raster = make_float3(raster_x, raster_y, 0.0f);
47         float3 Pcamera = transform_perspective(&rastertocamera, raster);
48
49 #ifdef __CAMERA_MOTION__
50         if(kernel_data.cam.have_perspective_motion) {
51                 /* TODO(sergey): Currently we interpolate projected coordinate which
52                  * gives nice looking result and which is simple, but is in fact a bit
53                  * different comparing to constructing projective matrix from an
54                  * interpolated field of view.
55                  */
56                 if(ray->time < 0.5f) {
57                         Transform rastertocamera_pre = kernel_data.cam.perspective_motion.pre;
58                         float3 Pcamera_pre =
59                                 transform_perspective(&rastertocamera_pre, raster);
60                         Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
61                 }
62                 else {
63                         Transform rastertocamera_post = kernel_data.cam.perspective_motion.post;
64                         float3 Pcamera_post =
65                                 transform_perspective(&rastertocamera_post, raster);
66                         Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
67                 }
68         }
69 #endif
70
71         ray->P = make_float3(0.0f, 0.0f, 0.0f);
72         ray->D = Pcamera;
73
74         /* modify ray for depth of field */
75         float aperturesize = kernel_data.cam.aperturesize;
76
77         if(aperturesize > 0.0f) {
78                 /* sample point on aperture */
79                 float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
80
81                 /* compute point on plane of focus */
82                 float ft = kernel_data.cam.focaldistance/ray->D.z;
83                 float3 Pfocus = ray->D*ft;
84
85                 /* update ray for effect of lens */
86                 ray->P = make_float3(lensuv.x, lensuv.y, 0.0f);
87                 ray->D = normalize(Pfocus - ray->P);
88         }
89
90         /* transform ray from camera to world */
91         Transform cameratoworld = kernel_data.cam.cameratoworld;
92
93 #ifdef __CAMERA_MOTION__
94         if(kernel_data.cam.have_motion) {
95 #ifdef __KERNEL_OPENCL__
96                 const MotionTransform tfm = kernel_data.cam.motion;
97                 transform_motion_interpolate(&cameratoworld,
98                                              ((const DecompMotionTransform*)&tfm),
99                                              ray->time);
100 #else
101                 transform_motion_interpolate(&cameratoworld,
102                                              ((const DecompMotionTransform*)&kernel_data.cam.motion),
103                                              ray->time);
104 #endif
105         }
106 #endif
107
108         ray->P = transform_point(&cameratoworld, ray->P);
109         ray->D = transform_direction(&cameratoworld, ray->D);
110         ray->D = normalize(ray->D);
111
112 #ifdef __RAY_DIFFERENTIALS__
113         /* ray differential */
114         float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
115
116         ray->dP = differential3_zero();
117
118         ray->dD.dx = normalize(Ddiff + float4_to_float3(kernel_data.cam.dx)) - normalize(Ddiff);
119         ray->dD.dy = normalize(Ddiff + float4_to_float3(kernel_data.cam.dy)) - normalize(Ddiff);
120 #endif
121
122 #ifdef __CAMERA_CLIPPING__
123         /* clipping */
124         float3 Pclip = normalize(Pcamera);
125         float z_inv = 1.0f / Pclip.z;
126         ray->P += kernel_data.cam.nearclip*ray->D * z_inv;
127         ray->t = kernel_data.cam.cliplength * z_inv;
128 #else
129         ray->t = FLT_MAX;
130 #endif
131 }
132
133 /* Orthographic Camera */
134 ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
135 {
136         /* create ray form raster position */
137         Transform rastertocamera = kernel_data.cam.rastertocamera;
138         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
139
140         ray->D = make_float3(0.0f, 0.0f, 1.0f);
141
142         /* modify ray for depth of field */
143         float aperturesize = kernel_data.cam.aperturesize;
144
145         if(aperturesize > 0.0f) {
146                 /* sample point on aperture */
147                 float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
148
149                 /* compute point on plane of focus */
150                 float3 Pfocus = ray->D * kernel_data.cam.focaldistance;
151
152                 /* update ray for effect of lens */
153                 float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
154                 ray->P = Pcamera + lensuvw;
155                 ray->D = normalize(Pfocus - lensuvw);
156         }
157         else {
158                 ray->P = Pcamera;
159         }
160         /* transform ray from camera to world */
161         Transform cameratoworld = kernel_data.cam.cameratoworld;
162
163 #ifdef __CAMERA_MOTION__
164         if(kernel_data.cam.have_motion) {
165 #ifdef __KERNEL_OPENCL__
166                 const MotionTransform tfm = kernel_data.cam.motion;
167                 transform_motion_interpolate(&cameratoworld,
168                                              (const DecompMotionTransform*)&tfm,
169                                              ray->time);
170 #else
171                 transform_motion_interpolate(&cameratoworld,
172                                              (const DecompMotionTransform*)&kernel_data.cam.motion,
173                                              ray->time);
174 #endif
175         }
176 #endif
177
178         ray->P = transform_point(&cameratoworld, ray->P);
179         ray->D = transform_direction(&cameratoworld, ray->D);
180         ray->D = normalize(ray->D);
181
182 #ifdef __RAY_DIFFERENTIALS__
183         /* ray differential */
184         ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
185         ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
186
187         ray->dD = differential3_zero();
188 #endif
189
190 #ifdef __CAMERA_CLIPPING__
191         /* clipping */
192         ray->t = kernel_data.cam.cliplength;
193 #else
194         ray->t = FLT_MAX;
195 #endif
196 }
197
198 /* Panorama Camera */
199
200 ccl_device void camera_sample_panorama(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
201 {
202         Transform rastertocamera = kernel_data.cam.rastertocamera;
203         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
204
205         /* create ray form raster position */
206         ray->P = make_float3(0.0f, 0.0f, 0.0f);
207
208 #ifdef __CAMERA_CLIPPING__
209         /* clipping */
210         ray->t = kernel_data.cam.cliplength;
211 #else
212         ray->t = FLT_MAX;
213 #endif
214
215         ray->D = panorama_to_direction(kg, Pcamera.x, Pcamera.y);
216
217         /* indicates ray should not receive any light, outside of the lens */
218         if(is_zero(ray->D)) {   
219                 ray->t = 0.0f;
220                 return;
221         }
222
223         /* modify ray for depth of field */
224         float aperturesize = kernel_data.cam.aperturesize;
225
226         if(aperturesize > 0.0f) {
227                 /* sample point on aperture */
228                 float2 lensuv = camera_sample_aperture(kg, lens_u, lens_v)*aperturesize;
229
230                 /* compute point on plane of focus */
231                 float3 D = normalize(ray->D);
232                 float3 Pfocus = D * kernel_data.cam.focaldistance;
233
234                 /* calculate orthonormal coordinates perpendicular to D */
235                 float3 U, V;
236                 U = normalize(make_float3(1.0f, 0.0f, 0.0f) -  D.x * D);
237                 V = normalize(cross(D, U));
238
239                 /* update ray for effect of lens */
240                 ray->P = U * lensuv.x + V * lensuv.y;
241                 ray->D = normalize(Pfocus - ray->P);
242         }
243
244         /* transform ray from camera to world */
245         Transform cameratoworld = kernel_data.cam.cameratoworld;
246
247 #ifdef __CAMERA_MOTION__
248         if(kernel_data.cam.have_motion) {
249 #ifdef __KERNEL_OPENCL__
250                 const MotionTransform tfm = kernel_data.cam.motion;
251                 transform_motion_interpolate(&cameratoworld,
252                                              (const DecompMotionTransform*)&tfm,
253                                              ray->time);
254 #else
255                 transform_motion_interpolate(&cameratoworld,
256                                              (const DecompMotionTransform*)&kernel_data.cam.motion,
257                                              ray->time);
258 #endif
259         }
260 #endif
261
262         ray->P = transform_point(&cameratoworld, ray->P);
263         ray->D = transform_direction(&cameratoworld, ray->D);
264         ray->D = normalize(ray->D);
265
266 #ifdef __RAY_DIFFERENTIALS__
267         /* ray differential */
268         ray->dP = differential3_zero();
269
270         Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
271         float3 Ddiff = normalize(transform_direction(&cameratoworld, panorama_to_direction(kg, Pcamera.x, Pcamera.y)));
272
273         Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
274         ray->dD.dx = normalize(transform_direction(&cameratoworld, panorama_to_direction(kg, Pcamera.x, Pcamera.y))) - Ddiff;
275
276         Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
277         ray->dD.dy = normalize(transform_direction(&cameratoworld, panorama_to_direction(kg, Pcamera.x, Pcamera.y))) - Ddiff;
278 #endif
279 }
280
281 /* Common */
282
283 ccl_device void camera_sample(KernelGlobals *kg, int x, int y, float filter_u, float filter_v,
284         float lens_u, float lens_v, float time, ccl_addr_space Ray *ray)
285 {
286         /* pixel filter */
287         int filter_table_offset = kernel_data.film.filter_table_offset;
288         float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
289         float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
290
291 #ifdef __CAMERA_MOTION__
292         /* motion blur */
293         if(kernel_data.cam.shuttertime == -1.0f) {
294                 ray->time = TIME_INVALID;
295         }
296         else {
297                 /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
298                  * effect in use but rolling shutter duration is set to 0.0.
299                  */
300                 const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
301                 ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
302                 /* TODO(sergey): Currently single rolling shutter effect type only
303                  * where scanlines are acquired from top to bottom and whole scanline
304                  * is acquired at once (no delay in acquisition happens between pixels
305                  * of single scanline).
306                  *
307                  * Might want to support more models in the future.
308                  */
309                 if(kernel_data.cam.rolling_shutter_type) {
310                         /* Time corresponding to a fully rolling shutter only effect:
311                          * top of the frame is time 0.0, bottom of the frame is time 1.0.
312                          */
313                         const float time = 1.0f - (float)y / kernel_data.cam.height;
314                         const float duration = kernel_data.cam.rolling_shutter_duration;
315                         if(duration != 0.0f) {
316                                 /* This isn't fully physical correct, but lets us to have simple
317                                  * controls in the interface. The idea here is basically sort of
318                                  * linear interpolation between how much rolling shutter effect
319                                  * exist on the frame and how much of it is a motion blur effect.
320                                  */
321                                 ray->time = (ray->time - 0.5f) * duration;
322                                 ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
323                         }
324                         else {
325                                 ray->time = time;
326                         }
327                 }
328         }
329 #endif
330
331         /* sample */
332         if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
333                 camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
334         else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
335                 camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
336         else
337                 camera_sample_panorama(kg, raster_x, raster_y, lens_u, lens_v, ray);
338 }
339
340 /* Utilities */
341
342 ccl_device_inline float3 camera_position(KernelGlobals *kg)
343 {
344         Transform cameratoworld = kernel_data.cam.cameratoworld;
345         return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
346 }
347
348 ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
349 {
350         Transform cameratoworld = kernel_data.cam.cameratoworld;
351         float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
352
353         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
354                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
355                 return fabsf(dot((P - camP), camD));
356         }
357         else
358                 return len(P - camP);
359 }
360
361 ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
362 {
363         Transform cameratoworld = kernel_data.cam.cameratoworld;
364
365         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
366                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
367                 return -camD;
368         }
369         else {
370                 float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
371                 return normalize(camP - P);
372         }
373 }
374
375 ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
376 {
377         if(kernel_data.cam.type != CAMERA_PANORAMA) {
378                 /* perspective / ortho */
379                 if(ccl_fetch(sd, object) == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
380                         P += camera_position(kg);
381
382                 Transform tfm = kernel_data.cam.worldtondc;
383                 return transform_perspective(&tfm, P);
384         }
385         else {
386                 /* panorama */
387                 Transform tfm = kernel_data.cam.worldtocamera;
388
389                 if(ccl_fetch(sd, object) != OBJECT_NONE)
390                         P = normalize(transform_point(&tfm, P));
391                 else
392                         P = normalize(transform_direction(&tfm, P));
393
394                 float2 uv = direction_to_panorama(kg, P);
395
396                 return make_float3(uv.x, uv.y, 0.0f);
397         }
398 }
399
400 CCL_NAMESPACE_END