Cycles: don't count volume boundaries as transparent bounces.
[blender-staging.git] / intern / cycles / kernel / kernel_camera.h
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 CCL_NAMESPACE_BEGIN
18
19 /* Perspective Camera */
20
21 ccl_device float2 camera_sample_aperture(ccl_constant KernelCamera *cam, float u, float v)
22 {
23         float blades = cam->blades;
24         float2 bokeh;
25
26         if(blades == 0.0f) {
27                 /* sample disk */
28                 bokeh = concentric_sample_disk(u, v);
29         }
30         else {
31                 /* sample polygon */
32                 float rotation = cam->bladesrotation;
33                 bokeh = regular_polygon_sample(blades, rotation, u, v);
34         }
35
36         /* anamorphic lens bokeh */
37         bokeh.x *= cam->inv_aperture_ratio;
38
39         return bokeh;
40 }
41
42 ccl_device void camera_sample_perspective(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
43 {
44         /* create ray form raster position */
45         Transform rastertocamera = kernel_data.cam.rastertocamera;
46         float3 raster = make_float3(raster_x, raster_y, 0.0f);
47         float3 Pcamera = transform_perspective(&rastertocamera, raster);
48
49 #ifdef __CAMERA_MOTION__
50         if(kernel_data.cam.have_perspective_motion) {
51                 /* TODO(sergey): Currently we interpolate projected coordinate which
52                  * gives nice looking result and which is simple, but is in fact a bit
53                  * different comparing to constructing projective matrix from an
54                  * interpolated field of view.
55                  */
56                 if(ray->time < 0.5f) {
57                         Transform rastertocamera_pre = kernel_data.cam.perspective_motion.pre;
58                         float3 Pcamera_pre =
59                                 transform_perspective(&rastertocamera_pre, raster);
60                         Pcamera = interp(Pcamera_pre, Pcamera, ray->time * 2.0f);
61                 }
62                 else {
63                         Transform rastertocamera_post = kernel_data.cam.perspective_motion.post;
64                         float3 Pcamera_post =
65                                 transform_perspective(&rastertocamera_post, raster);
66                         Pcamera = interp(Pcamera, Pcamera_post, (ray->time - 0.5f) * 2.0f);
67                 }
68         }
69 #endif
70
71         float3 P = make_float3(0.0f, 0.0f, 0.0f);
72         float3 D = Pcamera;
73
74         /* modify ray for depth of field */
75         float aperturesize = kernel_data.cam.aperturesize;
76
77         if(aperturesize > 0.0f) {
78                 /* sample point on aperture */
79                 float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
80
81                 /* compute point on plane of focus */
82                 float ft = kernel_data.cam.focaldistance/D.z;
83                 float3 Pfocus = D*ft;
84
85                 /* update ray for effect of lens */
86                 P = make_float3(lensuv.x, lensuv.y, 0.0f);
87                 D = normalize(Pfocus - P);
88         }
89
90         /* transform ray from camera to world */
91         Transform cameratoworld = kernel_data.cam.cameratoworld;
92
93 #ifdef __CAMERA_MOTION__
94         if(kernel_data.cam.have_motion) {
95 #  ifdef __KERNEL_OPENCL__
96                 const MotionTransform tfm = kernel_data.cam.motion;
97                 transform_motion_interpolate(&cameratoworld,
98                                                                          &tfm,
99                                              ray->time);
100 #  else
101                 transform_motion_interpolate(&cameratoworld,
102                                              &kernel_data.cam.motion,
103                                              ray->time);
104 #  endif
105         }
106 #endif
107
108         P = transform_point(&cameratoworld, P);
109         D = normalize(transform_direction(&cameratoworld, D));
110
111         bool use_stereo = kernel_data.cam.interocular_offset != 0.0f;
112         if(!use_stereo) {
113                 /* No stereo */
114                 ray->P = P;
115                 ray->D = D;
116
117 #ifdef __RAY_DIFFERENTIALS__
118                 float3 Dcenter = transform_direction(&cameratoworld, Pcamera);
119
120                 ray->dP = differential3_zero();
121                 ray->dD.dx = normalize(Dcenter + float4_to_float3(kernel_data.cam.dx)) - normalize(Dcenter);
122                 ray->dD.dy = normalize(Dcenter + float4_to_float3(kernel_data.cam.dy)) - normalize(Dcenter);
123 #endif
124         }
125         else {
126                 /* Spherical stereo */
127                 spherical_stereo_transform(&kernel_data.cam, &P, &D);
128                 ray->P = P;
129                 ray->D = D;
130
131 #ifdef __RAY_DIFFERENTIALS__
132                 /* Ray differentials, computed from scratch using the raster coordinates
133                  * because we don't want to be affected by depth of field. We compute
134                  * ray origin and direction for the center and two neighbouring pixels
135                  * and simply take their differences. */
136                 float3 Pnostereo = transform_point(&cameratoworld, make_float3(0.0f, 0.0f, 0.0f));
137
138                 float3 Pcenter = Pnostereo;
139                 float3 Dcenter = Pcamera;
140                 Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
141                 spherical_stereo_transform(&kernel_data.cam, &Pcenter, &Dcenter);
142
143                 float3 Px = Pnostereo;
144                 float3 Dx = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
145                 Dx = normalize(transform_direction(&cameratoworld, Dx));
146                 spherical_stereo_transform(&kernel_data.cam, &Px, &Dx);
147
148                 ray->dP.dx = Px - Pcenter;
149                 ray->dD.dx = Dx - Dcenter;
150
151                 float3 Py = Pnostereo;
152                 float3 Dy = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
153                 Dy = normalize(transform_direction(&cameratoworld, Dy));
154                 spherical_stereo_transform(&kernel_data.cam, &Py, &Dy);
155
156                 ray->dP.dy = Py - Pcenter;
157                 ray->dD.dy = Dy - Dcenter;
158 #endif
159         }
160
161 #ifdef __CAMERA_CLIPPING__
162         /* clipping */
163         float z_inv = 1.0f / normalize(Pcamera).z;
164         float nearclip = kernel_data.cam.nearclip * z_inv;
165         ray->P += nearclip * ray->D;
166         ray->dP.dx += nearclip * ray->dD.dx;
167         ray->dP.dy += nearclip * ray->dD.dy;
168         ray->t = kernel_data.cam.cliplength * z_inv;
169 #else
170         ray->t = FLT_MAX;
171 #endif
172 }
173
174 /* Orthographic Camera */
175 ccl_device void camera_sample_orthographic(KernelGlobals *kg, float raster_x, float raster_y, float lens_u, float lens_v, ccl_addr_space Ray *ray)
176 {
177         /* create ray form raster position */
178         Transform rastertocamera = kernel_data.cam.rastertocamera;
179         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
180
181         float3 P;
182         float3 D = make_float3(0.0f, 0.0f, 1.0f);
183
184         /* modify ray for depth of field */
185         float aperturesize = kernel_data.cam.aperturesize;
186
187         if(aperturesize > 0.0f) {
188                 /* sample point on aperture */
189                 float2 lensuv = camera_sample_aperture(&kernel_data.cam, lens_u, lens_v)*aperturesize;
190
191                 /* compute point on plane of focus */
192                 float3 Pfocus = D * kernel_data.cam.focaldistance;
193
194                 /* update ray for effect of lens */
195                 float3 lensuvw = make_float3(lensuv.x, lensuv.y, 0.0f);
196                 P = Pcamera + lensuvw;
197                 D = normalize(Pfocus - lensuvw);
198         }
199         else {
200                 P = Pcamera;
201         }
202         /* transform ray from camera to world */
203         Transform cameratoworld = kernel_data.cam.cameratoworld;
204
205 #ifdef __CAMERA_MOTION__
206         if(kernel_data.cam.have_motion) {
207 #  ifdef __KERNEL_OPENCL__
208                 const MotionTransform tfm = kernel_data.cam.motion;
209                 transform_motion_interpolate(&cameratoworld,
210                                              &tfm,
211                                              ray->time);
212 #  else
213                 transform_motion_interpolate(&cameratoworld,
214                                              &kernel_data.cam.motion,
215                                              ray->time);
216 #  endif
217         }
218 #endif
219
220         ray->P = transform_point(&cameratoworld, P);
221         ray->D = normalize(transform_direction(&cameratoworld, D));
222
223 #ifdef __RAY_DIFFERENTIALS__
224         /* ray differential */
225         ray->dP.dx = float4_to_float3(kernel_data.cam.dx);
226         ray->dP.dy = float4_to_float3(kernel_data.cam.dy);
227
228         ray->dD = differential3_zero();
229 #endif
230
231 #ifdef __CAMERA_CLIPPING__
232         /* clipping */
233         ray->t = kernel_data.cam.cliplength;
234 #else
235         ray->t = FLT_MAX;
236 #endif
237 }
238
239 /* Panorama Camera */
240
241 ccl_device_inline void camera_sample_panorama(ccl_constant KernelCamera *cam,
242                                               float raster_x, float raster_y,
243                                               float lens_u, float lens_v,
244                                               ccl_addr_space Ray *ray)
245 {
246         Transform rastertocamera = cam->rastertocamera;
247         float3 Pcamera = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y, 0.0f));
248
249         /* create ray form raster position */
250         float3 P = make_float3(0.0f, 0.0f, 0.0f);
251         float3 D = panorama_to_direction(cam, Pcamera.x, Pcamera.y);
252
253         /* indicates ray should not receive any light, outside of the lens */
254         if(is_zero(D)) {
255                 ray->t = 0.0f;
256                 return;
257         }
258
259         /* modify ray for depth of field */
260         float aperturesize = cam->aperturesize;
261
262         if(aperturesize > 0.0f) {
263                 /* sample point on aperture */
264                 float2 lensuv = camera_sample_aperture(cam, lens_u, lens_v)*aperturesize;
265
266                 /* compute point on plane of focus */
267                 float3 Dfocus = normalize(D);
268                 float3 Pfocus = Dfocus * cam->focaldistance;
269
270                 /* calculate orthonormal coordinates perpendicular to Dfocus */
271                 float3 U, V;
272                 U = normalize(make_float3(1.0f, 0.0f, 0.0f) -  Dfocus.x * Dfocus);
273                 V = normalize(cross(Dfocus, U));
274
275                 /* update ray for effect of lens */
276                 P = U * lensuv.x + V * lensuv.y;
277                 D = normalize(Pfocus - P);
278         }
279
280         /* transform ray from camera to world */
281         Transform cameratoworld = cam->cameratoworld;
282
283 #ifdef __CAMERA_MOTION__
284         if(cam->have_motion) {
285 #  ifdef __KERNEL_OPENCL__
286                 const MotionTransform tfm = cam->motion;
287                 transform_motion_interpolate(&cameratoworld,
288                                              &tfm,
289                                              ray->time);
290 #  else
291                 transform_motion_interpolate(&cameratoworld,
292                                              &cam->motion,
293                                              ray->time);
294 #  endif
295         }
296 #endif
297
298         P = transform_point(&cameratoworld, P);
299         D = normalize(transform_direction(&cameratoworld, D));
300
301         /* Stereo transform */
302         bool use_stereo = cam->interocular_offset != 0.0f;
303         if(use_stereo) {
304                 spherical_stereo_transform(cam, &P, &D);
305         }
306
307         ray->P = P;
308         ray->D = D;
309
310 #ifdef __RAY_DIFFERENTIALS__
311         /* Ray differentials, computed from scratch using the raster coordinates
312          * because we don't want to be affected by depth of field. We compute
313          * ray origin and direction for the center and two neighbouring pixels
314          * and simply take their differences. */
315         float3 Pcenter = Pcamera;
316         float3 Dcenter = panorama_to_direction(cam, Pcenter.x, Pcenter.y);
317         Pcenter = transform_point(&cameratoworld, Pcenter);
318         Dcenter = normalize(transform_direction(&cameratoworld, Dcenter));
319         if(use_stereo) {
320                 spherical_stereo_transform(cam, &Pcenter, &Dcenter);
321         }
322
323         float3 Px = transform_perspective(&rastertocamera, make_float3(raster_x + 1.0f, raster_y, 0.0f));
324         float3 Dx = panorama_to_direction(cam, Px.x, Px.y);
325         Px = transform_point(&cameratoworld, Px);
326         Dx = normalize(transform_direction(&cameratoworld, Dx));
327         if(use_stereo) {
328                 spherical_stereo_transform(cam, &Px, &Dx);
329         }
330
331         ray->dP.dx = Px - Pcenter;
332         ray->dD.dx = Dx - Dcenter;
333
334         float3 Py = transform_perspective(&rastertocamera, make_float3(raster_x, raster_y + 1.0f, 0.0f));
335         float3 Dy = panorama_to_direction(cam, Py.x, Py.y);
336         Py = transform_point(&cameratoworld, Py);
337         Dy = normalize(transform_direction(&cameratoworld, Dy));
338         if(use_stereo) {
339                 spherical_stereo_transform(cam, &Py, &Dy);
340         }
341
342         ray->dP.dy = Py - Pcenter;
343         ray->dD.dy = Dy - Dcenter;
344 #endif
345
346 #ifdef __CAMERA_CLIPPING__
347         /* clipping */
348         float nearclip = cam->nearclip;
349         ray->P += nearclip * ray->D;
350         ray->dP.dx += nearclip * ray->dD.dx;
351         ray->dP.dy += nearclip * ray->dD.dy;
352         ray->t = cam->cliplength;
353 #else
354         ray->t = FLT_MAX;
355 #endif
356 }
357
358 /* Common */
359
360 ccl_device_inline void camera_sample(KernelGlobals *kg,
361                                      int x, int y,
362                                      float filter_u, float filter_v,
363                                      float lens_u, float lens_v,
364                                      float time,
365                                      ccl_addr_space Ray *ray)
366 {
367         /* pixel filter */
368         int filter_table_offset = kernel_data.film.filter_table_offset;
369         float raster_x = x + lookup_table_read(kg, filter_u, filter_table_offset, FILTER_TABLE_SIZE);
370         float raster_y = y + lookup_table_read(kg, filter_v, filter_table_offset, FILTER_TABLE_SIZE);
371
372 #ifdef __CAMERA_MOTION__
373         /* motion blur */
374         if(kernel_data.cam.shuttertime == -1.0f) {
375                 ray->time = 0.5f;
376         }
377         else {
378                 /* TODO(sergey): Such lookup is unneeded when there's rolling shutter
379                  * effect in use but rolling shutter duration is set to 0.0.
380                  */
381                 const int shutter_table_offset = kernel_data.cam.shutter_table_offset;
382                 ray->time = lookup_table_read(kg, time, shutter_table_offset, SHUTTER_TABLE_SIZE);
383                 /* TODO(sergey): Currently single rolling shutter effect type only
384                  * where scanlines are acquired from top to bottom and whole scanline
385                  * is acquired at once (no delay in acquisition happens between pixels
386                  * of single scanline).
387                  *
388                  * Might want to support more models in the future.
389                  */
390                 if(kernel_data.cam.rolling_shutter_type) {
391                         /* Time corresponding to a fully rolling shutter only effect:
392                          * top of the frame is time 0.0, bottom of the frame is time 1.0.
393                          */
394                         const float time = 1.0f - (float)y / kernel_data.cam.height;
395                         const float duration = kernel_data.cam.rolling_shutter_duration;
396                         if(duration != 0.0f) {
397                                 /* This isn't fully physical correct, but lets us to have simple
398                                  * controls in the interface. The idea here is basically sort of
399                                  * linear interpolation between how much rolling shutter effect
400                                  * exist on the frame and how much of it is a motion blur effect.
401                                  */
402                                 ray->time = (ray->time - 0.5f) * duration;
403                                 ray->time += (time - 0.5f) * (1.0f - duration) + 0.5f;
404                         }
405                         else {
406                                 ray->time = time;
407                         }
408                 }
409         }
410 #endif
411
412         /* sample */
413         if(kernel_data.cam.type == CAMERA_PERSPECTIVE)
414                 camera_sample_perspective(kg, raster_x, raster_y, lens_u, lens_v, ray);
415         else if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC)
416                 camera_sample_orthographic(kg, raster_x, raster_y, lens_u, lens_v, ray);
417         else
418                 camera_sample_panorama(&kernel_data.cam, raster_x, raster_y, lens_u, lens_v, ray);
419 }
420
421 /* Utilities */
422
423 ccl_device_inline float3 camera_position(KernelGlobals *kg)
424 {
425         Transform cameratoworld = kernel_data.cam.cameratoworld;
426         return make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
427 }
428
429 ccl_device_inline float camera_distance(KernelGlobals *kg, float3 P)
430 {
431         Transform cameratoworld = kernel_data.cam.cameratoworld;
432         float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
433
434         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
435                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
436                 return fabsf(dot((P - camP), camD));
437         }
438         else
439                 return len(P - camP);
440 }
441
442 ccl_device_inline float3 camera_direction_from_point(KernelGlobals *kg, float3 P)
443 {
444         Transform cameratoworld = kernel_data.cam.cameratoworld;
445
446         if(kernel_data.cam.type == CAMERA_ORTHOGRAPHIC) {
447                 float3 camD = make_float3(cameratoworld.x.z, cameratoworld.y.z, cameratoworld.z.z);
448                 return -camD;
449         }
450         else {
451                 float3 camP = make_float3(cameratoworld.x.w, cameratoworld.y.w, cameratoworld.z.w);
452                 return normalize(camP - P);
453         }
454 }
455
456 ccl_device_inline float3 camera_world_to_ndc(KernelGlobals *kg, ShaderData *sd, float3 P)
457 {
458         if(kernel_data.cam.type != CAMERA_PANORAMA) {
459                 /* perspective / ortho */
460                 if(sd->object == PRIM_NONE && kernel_data.cam.type == CAMERA_PERSPECTIVE)
461                         P += camera_position(kg);
462
463                 Transform tfm = kernel_data.cam.worldtondc;
464                 return transform_perspective(&tfm, P);
465         }
466         else {
467                 /* panorama */
468                 Transform tfm = kernel_data.cam.worldtocamera;
469
470                 if(sd->object != OBJECT_NONE)
471                         P = normalize(transform_point(&tfm, P));
472                 else
473                         P = normalize(transform_direction(&tfm, P));
474
475                 float2 uv = direction_to_panorama(&kernel_data.cam, P);
476
477                 return make_float3(uv.x, uv.y, 0.0f);
478         }
479 }
480
481 CCL_NAMESPACE_END