Code refactor: add ProjectionTransform separate from regular Transform.
[blender.git] / intern / cycles / kernel / kernel_camera.h
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /* Perspective Camera */
20
21 ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
22 {
23         float blades = cam->blades;
24         float2 bokeh;
25
26         if(blades == 0.0f) {
27                 /* sample disk */
28                 bokeh = concentric_sample_disk(u, v);
29         }
30         else {
31                 /* sample polygon */
32                 float rotation = cam->bladesrotation;
33                 bokeh = regular_polygon_sample(blades, rotation, u, v);
34         }
35
36         /* anamorphic lens bokeh */
37         bokeh.x *= cam->inv_aperture_ratio;
38
39         return bokeh;
40 }
41
42 ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
43 {
44         /* create ray form raster position */
45         ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
46         float3 raster = make_float3(raster_x, raster_y, 0.0f);
47         float3 Pcamera = transform_perspective(&rastertocamera, raster);
48
49 #ifdef __CAMERA_MOTION__
50         if(kernel_data.cam.have_perspective_motion) {
51                 /* TODO(sergey): Currently we interpolate projected coordinate which
52                  * gives nice looking result and which is simple, but is in fact a bit
53                  * different comparing to constructing projective matrix from an
54                  * interpolated field of view.
55                  */
56                 if(ray->time < 0.5f) {
57                         ProjectionTransform rastertocamera_pre = kernel_data.cam.perspective_motion.pre;
58                         float3 Pcamera_pre =
59                                 transform_perspective(&rastertocamera_pre, raster);
60                         Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
61                 }
62                 else {
63                         ProjectionTransform rastertocamera_post = kernel_data.cam.perspective_motion.post;
64                         float3 Pcamera_post =
65                                 transform_perspective(&rastertocamera_post, raster);
66                         Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
67                 }
68         }
69 #endif
70
71         float3 P = make_float3(0.0f, 0.0f, 0.0f);
72         float3 D = Pcamera;
73
74         /* modify ray for depth of field */
75         float aperturesize = kernel_data.cam.aperturesize;
76
77         if(aperturesize > 0.0f) {
78                 /* sample point on aperture */
79                 float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
80
81                 /* compute point on plane of focus */
82                 float ft = kernel_data.cam.focaldistance/D.z;
83                 float3 Pfocus = D*ft;
84
85                 /* update ray for effect of lens */
86                 P = make_float3(lensuv.x, lensuv.y, 0.0f);
87                 D = normalize(Pfocus - P);
88         }
89
90         /* transform ray from camera to world */
91         Transform cameratoworld = kernel_data.cam.cameratoworld;
92
93 #ifdef __CAMERA_MOTION__
94         if(kernel_data.cam.have_motion) {
95                 ccl_constant MotionTransform *motion = &kernel_data.cam.motion;
96                 transform_motion_interpolate_constant(&cameratoworld,
97                                                       motion,
98                                                       ray->time);
99         }
100 #endif
101
102         P = transform_point(&cameratoworld, P);
103         D = normalize(transform_direction(&cameratoworld, D));
104
105         bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
106         if(!use_stereo) {
107                 /* No stereo */
108                 ray->P = P;
109                 ray->D = D;
110
111 #ifdef __RAY_DIFFERENTIALS__
112                 float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
113
114                 ray->dP = differential3_zero();
115                 ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
116                 ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
117 #endif
118         }
119         else {
120                 /* Spherical stereo */
121                 spherical_stereo_transform(&kernel_data.cam, &P, &D);
122                 ray->P = P;
123                 ray->D = D;
124
125 #ifdef __RAY_DIFFERENTIALS__
126                 /* Ray differentials, computed from scratch using the raster coordinates
127                  * because we don't want to be affected by depth of field. We compute
128                  * ray origin and direction for the center and two neighbouring pixels
129                  * and simply take their differences. */
130                 float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
131
132                 float3 Pcenter = Pnostereo;
133                 float3 Dcenter = Pcamera;
134                 Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
135                 spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
136
137                 float3 Px = Pnostereo;
138                 float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
139                 Dx = normalize(transform_direction(&cameratoworld, Dx));
140                 spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
141
142                 ray->dP.dx = Px - Pcenter;
143                 ray->dD.dx = Dx - Dcenter;
144
145                 float3 Py = Pnostereo;
146                 float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
147                 Dy = normalize(transform_direction(&cameratoworld, Dy));
148                 spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
149
150                 ray->dP.dy = Py - Pcenter;
151                 ray->dD.dy = Dy - Dcenter;
152 #endif
153         }
154
155 #ifdef __CAMERA_CLIPPING__
156         /* clipping */
157         float z_inv = 1.0f / normalize(Pcamera).z;
158         float nearclip = kernel_data.cam.nearclip * z_inv;
159         ray->P += nearclip * ray->D;
160         ray->dP.dx += nearclip * ray->dD.dx;
161         ray->dP.dy += nearclip * ray->dD.dy;
162         ray->t = kernel_data.cam.cliplength * z_inv;
163 #else
164         ray->t = FLT_MAX;
165 #endif
166 }
167
168 /* Orthographic Camera */
169 ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
170 {
171         /* create ray form raster position */
172         ProjectionTransform rastertocamera = kernel_data.cam.rastertocamera;
173         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
174
175         float3 P;
176         float3 D = make_float3(0.0f, 0.0f, 1.0f);
177
178         /* modify ray for depth of field */
179         float aperturesize = kernel_data.cam.aperturesize;
180
181         if(aperturesize > 0.0f) {
182                 /* sample point on aperture */
183                 float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
184
185                 /* compute point on plane of focus */
186                 float3 Pfocus = D * kernel_data.cam.focaldistance;
187
188                 /* update ray for effect of lens */
189                 float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
190                 P = Pcamera + lensuvw;
191                 D = normalize(Pfocus - lensuvw);
192         }
193         else {
194                 P = Pcamera;
195         }
196         /* transform ray from camera to world */
197         Transform cameratoworld = kernel_data.cam.cameratoworld;
198
199 #ifdef __CAMERA_MOTION__
200         if(kernel_data.cam.have_motion) {
201                 ccl_constant MotionTransform *motion = &kernel_data.cam.motion;
202                 transform_motion_interpolate_constant(&cameratoworld,
203                                                       motion,
204                                                       ray->time);
205         }
206 #endif
207
208         ray->P = transform_point(&cameratoworld, P);
209         ray->D = normalize(transform_direction(&cameratoworld, D));
210
211 #ifdef __RAY_DIFFERENTIALS__
212         /* ray differential */
213         ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
214         ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
215
216         ray->dD = differential3_zero();
217 #endif
218
219 #ifdef __CAMERA_CLIPPING__
220         /* clipping */
221         ray->t = kernel_data.cam.cliplength;
222 #else
223         ray->t = FLT_MAX;
224 #endif
225 }
226
227 /* Panorama Camera */
228
229 ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
230                                               float raster_x, float raster_y,
231                                               float lens_u, float lens_v,
232                                               ccl_addr_space Ray *ray)
233 {
234         ProjectionTransform rastertocamera = cam->rastertocamera;
235         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
236
237         /* create ray form raster position */
238         float3 P = make_float3(0.0f, 0.0f, 0.0f);
239         float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
240
241         /* indicates ray should not receive any light, outside of the lens */
242         if(is_zero(D)) {
243                 ray->t = 0.0f;
244                 return;
245         }
246
247         /* modify ray for depth of field */
248         float aperturesize = cam->aperturesize;
249
250         if(aperturesize > 0.0f) {
251                 /* sample point on aperture */
252                 float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v)*aperturesize;
253
254                 /* compute point on plane of focus */
255                 float3 Dfocus = normalize(D);
256                 float3 Pfocus = Dfocus * cam->focaldistance;
257
258                 /* calculate orthonormal coordinates perpendicular to Dfocus */
259                 float3 U, V;
260                 U = normalize(make_float3(1.0f, 0.0f, 0.0f) -  Dfocus.x * Dfocus);
261                 V = normalize(cross(Dfocus, U));
262
263                 /* update ray for effect of lens */
264                 P = U * lensuv.x + V * lensuv.y;
265                 D = normalize(Pfocus - P);
266         }
267
268         /* transform ray from camera to world */
269         Transform cameratoworld = cam->cameratoworld;
270
271 #ifdef __CAMERA_MOTION__
272         if(cam->have_motion) {
273                 ccl_constant MotionTransform *motion = &cam->motion;
274                 transform_motion_interpolate_constant(&cameratoworld,
275                                                       motion,
276                                                       ray->time);
277         }
278 #endif
279
280         P = transform_point(&cameratoworld, P);
281         D = normalize(transform_direction(&cameratoworld, D));
282
283         /* Stereo transform */
284         bool use_stereo = cam->interocular_offset != 0.0f;
285         if(use_stereo) {
286                 spherical_stereo_transform(cam, &P, &D);
287         }
288
289         ray->P = P;
290         ray->D = D;
291
292 #ifdef __RAY_DIFFERENTIALS__
293         /* Ray differentials, computed from scratch using the raster coordinates
294          * because we don't want to be affected by depth of field. We compute
295          * ray origin and direction for the center and two neighbouring pixels
296          * and simply take their differences. */
297         float3 Pcenter = Pcamera;
298         float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
299         Pcenter = transform_point(&cameratoworld, Pcenter);
300         Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
301         if(use_stereo) {
302                 spherical_stereo_transform(cam, &Pcenter, &Dcenter);
303         }
304
305         float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
306         float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
307         Px = transform_point(&cameratoworld, Px);
308         Dx = normalize(transform_direction(&cameratoworld, Dx));
309         if(use_stereo) {
310                 spherical_stereo_transform(cam, &Px, &Dx);
311         }
312
313         ray->dP.dx = Px - Pcenter;
314         ray->dD.dx = Dx - Dcenter;
315
316         float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
317         float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
318         Py = transform_point(&cameratoworld, Py);
319         Dy = normalize(transform_direction(&cameratoworld, Dy));
320         if(use_stereo) {
321                 spherical_stereo_transform(cam, &Py, &Dy);
322         }
323
324         ray->dP.dy = Py - Pcenter;
325         ray->dD.dy = Dy - Dcenter;
326 #endif
327
328 #ifdef __CAMERA_CLIPPING__
329         /* clipping */
330         float nearclip = cam->nearclip;
331         ray->P += nearclip * ray->D;
332         ray->dP.dx += nearclip * ray->dD.dx;
333         ray->dP.dy += nearclip * ray->dD.dy;
334         ray->t = cam->cliplength;
335 #else
336         ray->t = FLT_MAX;
337 #endif
338 }
339
340 /* Common */
341
342 ccl_device_inline void camera_sample(KernelGlobals *kg,
343                                      int x, int y,
344                                      float filter_u, float filter_v,
345                                      float lens_u, float lens_v,
346                                      float time,
347                                      ccl_addr_space Ray *ray)
348 {
349         /* pixel filter */
350         int filter_table_offset = kernel_data.film.filter_table_offset;
351         float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
352         float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
353
354 #ifdef __CAMERA_MOTION__
355         /* motion blur */
356         if(kernel_data.cam.shuttertime == -1.0f) {
357                 ray->time = 0.5f;
358         }
359         else {
360                 /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
361                  * effect in use but rolling shutter duration is set to 0.0.
362                  */
363                 const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
364                 ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
365                 /* TODO(sergey): Currently single rolling shutter effect type only
366                  * where scanlines are acquired from top to bottom and whole scanline
367                  * is acquired at once (no delay in acquisition happens between pixels
368                  * of single scanline).
369                  *
370                  * Might want to support more models in the future.
371                  */
372                 if(kernel_data.cam.rolling_shutter_type) {
373                         /* Time corresponding to a fully rolling shutter only effect:
374                          * top of the frame is time 0.0, bottom of the frame is time 1.0.
375                          */
376                         const float time = 1.0f - (float)y / kernel_data.cam.height;
377                         const float duration = kernel_data.cam.rolling_shutter_duration;
378                         if(duration != 0.0f) {
379                                 /* This isn't fully physical correct, but lets us to have simple
380                                  * controls in the interface. The idea here is basically sort of
381                                  * linear interpolation between how much rolling shutter effect
382                                  * exist on the frame and how much of it is a motion blur effect.
383                                  */
384                                 ray->time = (ray->time - 0.5f) * duration;
385                                 ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
386                         }
387                         else {
388                                 ray->time = time;
389                         }
390                 }
391         }
392 #endif
393
394         /* sample */
395         if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
396                 camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
397         else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
398                 camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
399         else
400                 camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
401 }
402
403 /* Utilities */
404
405 ccl_device_inline float3 camera_position(KernelGlobals *kg)
406 {
407         Transform cameratoworld = kernel_data.cam.cameratoworld;
408         return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
409 }
410
411 ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
412 {
413         Transform cameratoworld = kernel_data.cam.cameratoworld;
414         float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
415
416         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
417                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
418                 return fabsf(dot((P - camP), camD));
419         }
420         else
421                 return len(P - camP);
422 }
423
424 ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
425 {
426         Transform cameratoworld = kernel_data.cam.cameratoworld;
427
428         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
429                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
430                 return -camD;
431         }
432         else {
433                 float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
434                 return normalize(camP - P);
435         }
436 }
437
438 ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
439 {
440         if(kernel_data.cam.type != CAMERA_PANORAMA) {
441                 /* perspective / ortho */
442                 if(sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
443                         P += camera_position(kg);
444
445                 ProjectionTransform tfm = kernel_data.cam.worldtondc;
446                 return transform_perspective(&tfm, P);
447         }
448         else {
449                 /* panorama */
450                 Transform tfm = kernel_data.cam.worldtocamera;
451
452                 if(sd->object != OBJECT_NONE)
453                         P = normalize(transform_point(&tfm, P));
454                 else
455                         P = normalize(transform_direction(&tfm, P));
456
457                 float2 uv = direction_to_panorama(&kernel_data.cam, P);
458
459                 return make_float3(uv.x, uv.y, 0.0f);
460         }
461 }
462
463 CCL_NAMESPACE_END