Fix build error on Windows 32 bit.
[blender-staging.git] / intern / cycles / render / camera.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "render/camera.h"
18 #include "render/mesh.h"
19 #include "render/object.h"
20 #include "render/scene.h"
21 #include "render/tables.h"
22
23 #include "device/device.h"
24
25 #include "util/util_foreach.h"
26 #include "util/util_function.h"
27 #include "util/util_math_cdf.h"
28 #include "util/util_vector.h"
29
30 /* needed for calculating differentials */
31 #include "kernel/kernel_compat_cpu.h"
32 #include "kernel/split/kernel_split_data.h"
33 #include "kernel/kernel_globals.h"
34 #include "kernel/kernel_projection.h"
35 #include "kernel/kernel_differential.h"
36 #include "kernel/kernel_montecarlo.h"
37 #include "kernel/kernel_camera.h"
38
39 CCL_NAMESPACE_BEGIN
40
41 static float shutter_curve_eval(float x,
42                                 array<float>& shutter_curve)
43 {
44         if(shutter_curve.size() == 0) {
45                 return 1.0f;
46         }
47
48         x *= shutter_curve.size();
49         int index = (int)x;
50         float frac = x - index;
51         if(index < shutter_curve.size() - 1) {
52                 return lerp(shutter_curve[index], shutter_curve[index + 1], frac);
53         }
54         else {
55                 return shutter_curve[shutter_curve.size() - 1];
56         }
57 }
58
59 NODE_DEFINE(Camera)
60 {
61         NodeType* type = NodeType::add("camera", create);
62
63         SOCKET_FLOAT(shuttertime, "Shutter Time", 1.0f);
64
65         static NodeEnum motion_position_enum;
66         motion_position_enum.insert("start", MOTION_POSITION_START);
67         motion_position_enum.insert("center", MOTION_POSITION_CENTER);
68         motion_position_enum.insert("end", MOTION_POSITION_END);
69         SOCKET_ENUM(motion_position, "Motion Position", motion_position_enum, MOTION_POSITION_CENTER);
70
71         static NodeEnum rolling_shutter_type_enum;
72         rolling_shutter_type_enum.insert("none", ROLLING_SHUTTER_NONE);
73         rolling_shutter_type_enum.insert("top", ROLLING_SHUTTER_TOP);
74         SOCKET_ENUM(rolling_shutter_type, "Rolling Shutter Type", rolling_shutter_type_enum,  ROLLING_SHUTTER_NONE);
75         SOCKET_FLOAT(rolling_shutter_duration, "Rolling Shutter Duration", 0.1f);
76
77         SOCKET_FLOAT_ARRAY(shutter_curve, "Shutter Curve", array<float>());
78
79         SOCKET_FLOAT(aperturesize, "Aperture Size", 0.0f);
80         SOCKET_FLOAT(focaldistance, "Focal Distance", 10.0f);
81         SOCKET_UINT(blades, "Blades", 0);
82         SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
83
84         SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
85         SOCKET_TRANSFORM_ARRAY(motion, "Motion", array<Transform>());
86
87         SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
88
89         static NodeEnum type_enum;
90         type_enum.insert("perspective", CAMERA_PERSPECTIVE);
91         type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC);
92         type_enum.insert("panorama", CAMERA_PANORAMA);
93         SOCKET_ENUM(type, "Type", type_enum, CAMERA_PERSPECTIVE);
94
95         static NodeEnum panorama_type_enum;
96         panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR);
97         panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL);
98         panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT);
99         panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID);
100         SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR);
101
102         SOCKET_FLOAT(fisheye_fov, "Fisheye FOV", M_PI_F);
103         SOCKET_FLOAT(fisheye_lens, "Fisheye Lens", 10.5f);
104         SOCKET_FLOAT(latitude_min, "Latitude Min", -M_PI_2_F);
105         SOCKET_FLOAT(latitude_max, "Latitude Max", M_PI_2_F);
106         SOCKET_FLOAT(longitude_min, "Longitude Min", -M_PI_F);
107         SOCKET_FLOAT(longitude_max, "Longitude Max", M_PI_F);
108         SOCKET_FLOAT(fov, "FOV", M_PI_4_F);
109         SOCKET_FLOAT(fov_pre, "FOV Pre", M_PI_4_F);
110         SOCKET_FLOAT(fov_post, "FOV Post", M_PI_4_F);
111
112         static NodeEnum stereo_eye_enum;
113         stereo_eye_enum.insert("none", STEREO_NONE);
114         stereo_eye_enum.insert("left", STEREO_LEFT);
115         stereo_eye_enum.insert("right", STEREO_RIGHT);
116         SOCKET_ENUM(stereo_eye, "Stereo Eye", stereo_eye_enum, STEREO_NONE);
117
118         SOCKET_FLOAT(interocular_distance, "Interocular Distance", 0.065f);
119         SOCKET_FLOAT(convergence_distance, "Convergence Distance", 30.0f * 0.065f);
120
121         SOCKET_BOOLEAN(use_pole_merge, "Use Pole Merge", false);
122         SOCKET_FLOAT(pole_merge_angle_from, "Pole Merge Angle From",  60.0f * M_PI_F / 180.0f);
123         SOCKET_FLOAT(pole_merge_angle_to, "Pole Merge Angle To", 75.0f * M_PI_F / 180.0f);
124
125         SOCKET_FLOAT(sensorwidth, "Sensor Width", 0.036f);
126         SOCKET_FLOAT(sensorheight, "Sensor Height", 0.024f);
127
128         SOCKET_FLOAT(nearclip, "Near Clip", 1e-5f);
129         SOCKET_FLOAT(farclip, "Far Clip", 1e5f);
130
131         SOCKET_FLOAT(viewplane.left, "Viewplane Left", 0);
132         SOCKET_FLOAT(viewplane.right, "Viewplane Right", 0);
133         SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0);
134         SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0);
135
136         SOCKET_FLOAT(border.left, "Border Left", 0);
137         SOCKET_FLOAT(border.right, "Border Right", 0);
138         SOCKET_FLOAT(border.bottom, "Border Bottom", 0);
139         SOCKET_FLOAT(border.top, "Border Top", 0);
140
141         SOCKET_FLOAT(offscreen_dicing_scale, "Offscreen Dicing Scale", 1.0f);
142
143         return type;
144 }
145
146 Camera::Camera()
147 : Node(node_type)
148 {
149         shutter_table_offset = TABLE_OFFSET_INVALID;
150
151         width = 1024;
152         height = 512;
153         resolution = 1;
154
155         use_perspective_motion = false;
156
157         shutter_curve.resize(RAMP_TABLE_SIZE);
158         for(int i = 0; i < shutter_curve.size(); ++i) {
159                 shutter_curve[i] = 1.0f;
160         }
161
162         compute_auto_viewplane();
163
164         screentoworld = projection_identity();
165         rastertoworld = projection_identity();
166         ndctoworld = projection_identity();
167         rastertocamera = projection_identity();
168         cameratoworld = transform_identity();
169         worldtoraster = projection_identity();
170
171         dx = make_float3(0.0f, 0.0f, 0.0f);
172         dy = make_float3(0.0f, 0.0f, 0.0f);
173
174         need_update = true;
175         need_device_update = true;
176         need_flags_update = true;
177         previous_need_motion = -1;
178
179         memset(&kernel_camera, 0, sizeof(kernel_camera));
180 }
181
182 Camera::~Camera()
183 {
184 }
185
186 void Camera::compute_auto_viewplane()
187 {
188         if(type == CAMERA_PANORAMA) {
189                 viewplane.left = 0.0f;
190                 viewplane.right = 1.0f;
191                 viewplane.bottom = 0.0f;
192                 viewplane.top = 1.0f;
193         }
194         else {
195                 float aspect = (float)width/(float)height;
196                 if(width >= height) {
197                         viewplane.left = -aspect;
198                         viewplane.right = aspect;
199                         viewplane.bottom = -1.0f;
200                         viewplane.top = 1.0f;
201                 }
202                 else {
203                         viewplane.left = -1.0f;
204                         viewplane.right = 1.0f;
205                         viewplane.bottom = -1.0f/aspect;
206                         viewplane.top = 1.0f/aspect;
207                 }
208         }
209 }
210
211 void Camera::update(Scene *scene)
212 {
213         Scene::MotionType need_motion = scene->need_motion();
214
215         if(previous_need_motion != need_motion) {
216                 /* scene's motion model could have been changed since previous device
217                  * camera update this could happen for example in case when one render
218                  * layer has got motion pass and another not */
219                 need_device_update = true;
220         }
221
222         if(!need_update)
223                 return;
224
225         /* Full viewport to camera border in the viewport. */
226         Transform fulltoborder = transform_from_viewplane(viewport_camera_border);
227         Transform bordertofull = transform_inverse(fulltoborder);
228
229         /* ndc to raster */
230         Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull;
231         Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull;
232
233         /* raster to screen */
234         Transform screentondc = fulltoborder * transform_from_viewplane(viewplane);
235
236         Transform screentoraster = ndctoraster * screentondc;
237         Transform rastertoscreen = transform_inverse(screentoraster);
238         Transform full_screentoraster = full_ndctoraster * screentondc;
239         Transform full_rastertoscreen = transform_inverse(full_screentoraster);
240
241         /* screen to camera */
242         ProjectionTransform cameratoscreen;
243         if(type == CAMERA_PERSPECTIVE)
244                 cameratoscreen = projection_perspective(fov, nearclip, farclip);
245         else if(type == CAMERA_ORTHOGRAPHIC)
246                 cameratoscreen = projection_orthographic(nearclip, farclip);
247         else
248                 cameratoscreen = projection_identity();
249         
250         ProjectionTransform screentocamera = projection_inverse(cameratoscreen);
251
252         rastertocamera = screentocamera * rastertoscreen;
253         ProjectionTransform full_rastertocamera = screentocamera * full_rastertoscreen;
254         cameratoraster = screentoraster * cameratoscreen;
255
256         cameratoworld = matrix;
257         screentoworld = cameratoworld * screentocamera;
258         rastertoworld = cameratoworld * rastertocamera;
259         ndctoworld = rastertoworld * ndctoraster;
260
261         /* note we recompose matrices instead of taking inverses of the above, this
262          * is needed to avoid inverting near degenerate matrices that happen due to
263          * precision issues with large scenes */
264         worldtocamera = transform_inverse(matrix);
265         worldtoscreen = cameratoscreen * worldtocamera;
266         worldtondc = screentondc * worldtoscreen;
267         worldtoraster = ndctoraster * worldtondc;
268
269         /* differentials */
270         if(type == CAMERA_ORTHOGRAPHIC) {
271                 dx = transform_perspective_direction(&rastertocamera, make_float3(1, 0, 0));
272                 dy = transform_perspective_direction(&rastertocamera, make_float3(0, 1, 0));
273                 full_dx = transform_perspective_direction(&full_rastertocamera, make_float3(1, 0, 0));
274                 full_dy = transform_perspective_direction(&full_rastertocamera, make_float3(0, 1, 0));
275         }
276         else if(type == CAMERA_PERSPECTIVE) {
277                 dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) -
278                      transform_perspective(&rastertocamera, make_float3(0, 0, 0));
279                 dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) -
280                      transform_perspective(&rastertocamera, make_float3(0, 0, 0));
281                 full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) -
282                      transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
283                 full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) -
284                      transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
285         }
286         else {
287                 dx = make_float3(0.0f, 0.0f, 0.0f);
288                 dy = make_float3(0.0f, 0.0f, 0.0f);
289         }
290
291         dx = transform_direction(&cameratoworld, dx);
292         dy = transform_direction(&cameratoworld, dy);
293         full_dx = transform_direction(&cameratoworld, full_dx);
294         full_dy = transform_direction(&cameratoworld, full_dy);
295
296         if(type == CAMERA_PERSPECTIVE) {
297                 float3 v = transform_perspective(&full_rastertocamera, make_float3(full_width, full_height, 1.0f));
298
299                 frustum_right_normal = normalize(make_float3(v.z, 0.0f, -v.x));
300                 frustum_top_normal = normalize(make_float3(0.0f, v.z, -v.y));
301         }
302
303         /* Compute kernel camera data. */
304         KernelCamera *kcam = &kernel_camera;
305
306         /* store matrices */
307         kcam->screentoworld = screentoworld;
308         kcam->rastertoworld = rastertoworld;
309         kcam->rastertocamera = rastertocamera;
310         kcam->cameratoworld = cameratoworld;
311         kcam->worldtocamera = worldtocamera;
312         kcam->worldtoscreen = worldtoscreen;
313         kcam->worldtoraster = worldtoraster;
314         kcam->worldtondc = worldtondc;
315         kcam->ndctoworld = ndctoworld;
316
317         /* camera motion */
318         kcam->num_motion_steps = 0;
319         kcam->have_perspective_motion = 0;
320         kernel_camera_motion.clear();
321
322         /* Test if any of the transforms are actually different. */
323         bool have_motion = false;
324         for(size_t i = 0; i < motion.size(); i++) {
325                 have_motion = have_motion || motion[i] != matrix;
326         }
327
328         if(need_motion == Scene::MOTION_PASS) {
329                 /* TODO(sergey): Support perspective (zoom, fov) motion. */
330                 if(type == CAMERA_PANORAMA) {
331                         if(have_motion) {
332                                 kcam->motion_pass_pre = transform_inverse(motion[0]);
333                                 kcam->motion_pass_post = transform_inverse(motion[motion.size()-1]);
334                         }
335                         else {
336                                 kcam->motion_pass_pre = kcam->worldtocamera;
337                                 kcam->motion_pass_post = kcam->worldtocamera;
338                         }
339                 }
340                 else {
341                         if(have_motion) {
342                                 kcam->perspective_pre = cameratoraster * transform_inverse(motion[0]);
343                                 kcam->perspective_post = cameratoraster * transform_inverse(motion[motion.size()-1]);
344                         }
345                         else {
346                                 kcam->perspective_pre = worldtoraster;
347                                 kcam->perspective_post = worldtoraster;
348                         }
349                 }
350         }
351         else if(need_motion == Scene::MOTION_BLUR) {
352                 if(have_motion) {
353                         kernel_camera_motion.resize(motion.size());
354                         transform_motion_decompose(kernel_camera_motion.data(), motion.data(), motion.size());
355                         kcam->num_motion_steps = motion.size();
356                 }
357
358                 /* TODO(sergey): Support other types of camera. */
359                 if(use_perspective_motion && type == CAMERA_PERSPECTIVE) {
360                         /* TODO(sergey): Move to an utility function and de-duplicate with
361                          * calculation above.
362                          */
363                         ProjectionTransform screentocamera_pre =
364                                         projection_inverse(projection_perspective(fov_pre,
365                                                                                   nearclip,
366                                                                                   farclip));
367                         ProjectionTransform screentocamera_post =
368                                         projection_inverse(projection_perspective(fov_post,
369                                                                                   nearclip,
370                                                                                   farclip));
371
372                         kcam->perspective_pre = screentocamera_pre * rastertoscreen;
373                         kcam->perspective_post = screentocamera_post * rastertoscreen;
374                         kcam->have_perspective_motion = 1;
375                 }
376         }
377
378         /* depth of field */
379         kcam->aperturesize = aperturesize;
380         kcam->focaldistance = focaldistance;
381         kcam->blades = (blades < 3)? 0.0f: blades;
382         kcam->bladesrotation = bladesrotation;
383
384         /* motion blur */
385         kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: -1.0f;
386
387         /* type */
388         kcam->type = type;
389
390         /* anamorphic lens bokeh */
391         kcam->inv_aperture_ratio = 1.0f / aperture_ratio;
392
393         /* panorama */
394         kcam->panorama_type = panorama_type;
395         kcam->fisheye_fov = fisheye_fov;
396         kcam->fisheye_lens = fisheye_lens;
397         kcam->equirectangular_range = make_float4(longitude_min - longitude_max, -longitude_min,
398                                                   latitude_min -  latitude_max, -latitude_min + M_PI_2_F);
399
400         switch(stereo_eye) {
401                 case STEREO_LEFT:
402                         kcam->interocular_offset = -interocular_distance * 0.5f;
403                         break;
404                 case STEREO_RIGHT:
405                         kcam->interocular_offset = interocular_distance * 0.5f;
406                         break;
407                 case STEREO_NONE:
408                 default:
409                         kcam->interocular_offset = 0.0f;
410                         break;
411         }
412
413         kcam->convergence_distance = convergence_distance;
414         if(use_pole_merge) {
415                 kcam->pole_merge_angle_from = pole_merge_angle_from;
416                 kcam->pole_merge_angle_to = pole_merge_angle_to;
417         }
418         else {
419                 kcam->pole_merge_angle_from = -1.0f;
420                 kcam->pole_merge_angle_to = -1.0f;
421         }
422
423         /* sensor size */
424         kcam->sensorwidth = sensorwidth;
425         kcam->sensorheight = sensorheight;
426
427         /* render size */
428         kcam->width = width;
429         kcam->height = height;
430         kcam->resolution = resolution;
431
432         /* store differentials */
433         kcam->dx = float3_to_float4(dx);
434         kcam->dy = float3_to_float4(dy);
435
436         /* clipping */
437         kcam->nearclip = nearclip;
438         kcam->cliplength = (farclip == FLT_MAX)? FLT_MAX: farclip - nearclip;
439
440         /* Camera in volume. */
441         kcam->is_inside_volume = 0;
442
443         /* Rolling shutter effect */
444         kcam->rolling_shutter_type = rolling_shutter_type;
445         kcam->rolling_shutter_duration = rolling_shutter_duration;
446
447         /* Set further update flags */
448         need_update = false;
449         need_device_update = true;
450         need_flags_update = true;
451         previous_need_motion = need_motion;
452 }
453
454 void Camera::device_update(Device * /* device */,
455                            DeviceScene *dscene,
456                            Scene *scene)
457 {
458         update(scene);
459
460         if(!need_device_update)
461                 return;
462
463         scene->lookup_tables->remove_table(&shutter_table_offset);
464         if(kernel_camera.shuttertime != -1.0f) {
465                 vector<float> shutter_table;
466                 util_cdf_inverted(SHUTTER_TABLE_SIZE,
467                                   0.0f,
468                                   1.0f,
469                                   function_bind(shutter_curve_eval, _1, shutter_curve),
470                                   false,
471                                   shutter_table);
472                 shutter_table_offset = scene->lookup_tables->add_table(dscene,
473                                                                        shutter_table);
474                 kernel_camera.shutter_table_offset = (int)shutter_table_offset;
475         }
476
477         dscene->data.cam = kernel_camera;
478
479         size_t num_motion_steps = kernel_camera_motion.size();
480         if(num_motion_steps) {
481                 DecomposedTransform *camera_motion = dscene->camera_motion.alloc(num_motion_steps);
482                 memcpy(camera_motion, kernel_camera_motion.data(), sizeof(*camera_motion) * num_motion_steps);
483                 dscene->camera_motion.copy_to_device();
484         }
485         else {
486                 dscene->camera_motion.free();
487         }
488 }
489
490 void Camera::device_update_volume(Device * /*device*/,
491                                   DeviceScene *dscene,
492                                   Scene *scene)
493 {
494         if(!need_device_update && !need_flags_update) {
495                 return;
496         }
497         KernelCamera *kcam = &dscene->data.cam;
498         BoundBox viewplane_boundbox = viewplane_bounds_get();
499         for(size_t i = 0; i < scene->objects.size(); ++i) {
500                 Object *object = scene->objects[i];
501                 if(object->mesh->has_volume &&
502                    viewplane_boundbox.intersects(object->bounds))
503                 {
504                         /* TODO(sergey): Consider adding more grained check. */
505                         kcam->is_inside_volume = 1;
506                         break;
507                 }
508         }
509         need_device_update = false;
510         need_flags_update = false;
511 }
512
513 void Camera::device_free(Device * /*device*/,
514                          DeviceScene *dscene,
515                          Scene *scene)
516 {
517         scene->lookup_tables->remove_table(&shutter_table_offset);
518         dscene->camera_motion.free();
519 }
520
521 bool Camera::modified(const Camera& cam)
522 {
523         return !Node::equals(cam);
524 }
525
526 bool Camera::motion_modified(const Camera& cam)
527 {
528         return !((motion == cam.motion) &&
529                  (use_perspective_motion == cam.use_perspective_motion));
530 }
531
532 void Camera::tag_update()
533 {
534         need_update = true;
535 }
536
537 float3 Camera::transform_raster_to_world(float raster_x, float raster_y)
538 {
539         float3 D, P;
540         if(type == CAMERA_PERSPECTIVE) {
541                 D = transform_perspective(&rastertocamera,
542                                           make_float3(raster_x, raster_y, 0.0f));
543                 float3 Pclip = normalize(D);
544                 P = make_float3(0.0f, 0.0f, 0.0f);
545                 /* TODO(sergey): Aperture support? */
546                 P = transform_point(&cameratoworld, P);
547                 D = normalize(transform_direction(&cameratoworld, D));
548                 /* TODO(sergey): Clipping is conditional in kernel, and hence it could
549                  * be mistakes in here, currently leading to wrong camera-in-volume
550                  * detection.
551                  */
552                 P += nearclip * D / Pclip.z;
553         }
554         else if(type == CAMERA_ORTHOGRAPHIC) {
555                 D = make_float3(0.0f, 0.0f, 1.0f);
556                 /* TODO(sergey): Aperture support? */
557                 P = transform_perspective(&rastertocamera,
558                                           make_float3(raster_x, raster_y, 0.0f));
559                 P = transform_point(&cameratoworld, P);
560                 D = normalize(transform_direction(&cameratoworld, D));
561         }
562         else {
563                 assert(!"unsupported camera type");
564         }
565         return P;
566 }
567
568 BoundBox Camera::viewplane_bounds_get()
569 {
570         /* TODO(sergey): This is all rather stupid, but is there a way to perform
571          * checks we need in a more clear and smart fasion?
572          */
573         BoundBox bounds = BoundBox::empty;
574
575         if(type == CAMERA_PANORAMA) {
576                 if(use_spherical_stereo == false) {
577                         bounds.grow(make_float3(cameratoworld.x.w,
578                                                 cameratoworld.y.w,
579                                                 cameratoworld.z.w));
580                 }
581                 else {
582                         float half_eye_distance = interocular_distance * 0.5f;
583
584                         bounds.grow(make_float3(cameratoworld.x.w + half_eye_distance,
585                                                 cameratoworld.y.w,
586                                                 cameratoworld.z.w));
587
588                         bounds.grow(make_float3(cameratoworld.z.w,
589                                                 cameratoworld.y.w + half_eye_distance,
590                                                 cameratoworld.z.w));
591
592                         bounds.grow(make_float3(cameratoworld.x.w - half_eye_distance,
593                                                 cameratoworld.y.w,
594                                                 cameratoworld.z.w));
595
596                         bounds.grow(make_float3(cameratoworld.x.w,
597                                                 cameratoworld.y.w - half_eye_distance,
598                                                 cameratoworld.z.w));
599                 }
600         }
601         else {
602                 bounds.grow(transform_raster_to_world(0.0f, 0.0f));
603                 bounds.grow(transform_raster_to_world(0.0f, (float)height));
604                 bounds.grow(transform_raster_to_world((float)width, (float)height));
605                 bounds.grow(transform_raster_to_world((float)width, 0.0f));
606                 if(type == CAMERA_PERSPECTIVE) {
607                         /* Center point has the most distance in local Z axis,
608                          * use it to construct bounding box/
609                          */
610                         bounds.grow(transform_raster_to_world(0.5f*width, 0.5f*height));
611                 }
612         }
613         return bounds;
614 }
615
616 float Camera::world_to_raster_size(float3 P)
617 {
618         float res = 1.0f;
619
620         if(type == CAMERA_ORTHOGRAPHIC) {
621                 res = min(len(full_dx), len(full_dy));
622
623                 if(offscreen_dicing_scale > 1.0f) {
624                         float3 p = transform_point(&worldtocamera, P);
625                         float3 v = transform_perspective(&rastertocamera, make_float3(width, height, 0.0f));
626
627                         /* Create point clamped to frustum */
628                         float3 c;
629                         c.x = max(-v.x, min(v.x, p.x));
630                         c.y = max(-v.y, min(v.y, p.y));
631                         c.z = max(0.0f, p.z);
632
633                         float f_dist = len(p - c) / sqrtf((v.x*v.x+v.y*v.y)*0.5f);
634
635                         if(f_dist > 0.0f) {
636                                 res += res * f_dist * (offscreen_dicing_scale - 1.0f);
637                         }
638                 }
639         }
640         else if(type == CAMERA_PERSPECTIVE) {
641                 /* Calculate as if point is directly ahead of the camera. */
642                 float3 raster = make_float3(0.5f*width, 0.5f*height, 0.0f);
643                 float3 Pcamera = transform_perspective(&rastertocamera, raster);
644
645                 /* dDdx */
646                 float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
647                 float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy;
648                 float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);
649
650                 /* dPdx */
651                 float dist = len(transform_point(&worldtocamera, P));
652                 float3 D = normalize(Ddiff);
653                 res = len(dist*dDdx - dot(dist*dDdx, D)*D);
654
655                 /* Decent approx distance to frustum (doesn't handle corners correctly, but not that big of a deal) */
656                 float f_dist = 0.0f;
657
658                 if(offscreen_dicing_scale > 1.0f) {
659                         float3 p = transform_point(&worldtocamera, P);
660
661                         /* Distance from the four planes */
662                         float r = dot(p, frustum_right_normal);
663                         float t = dot(p, frustum_top_normal);
664                         p = make_float3(-p.x, -p.y, p.z);
665                         float l = dot(p, frustum_right_normal);
666                         float b = dot(p, frustum_top_normal);
667                         p = make_float3(-p.x, -p.y, p.z);
668
669                         if(r <= 0.0f && l <= 0.0f && t <= 0.0f && b <= 0.0f) {
670                                 /* Point is inside frustum */
671                                 f_dist = 0.0f;
672                         }
673                         else if(r > 0.0f && l > 0.0f && t > 0.0f && b > 0.0f) {
674                                 /* Point is behind frustum */
675                                 f_dist = len(p);
676                         }
677                         else {
678                                 /* Point may be behind or off to the side, need to check */
679                                 float3 along_right = make_float3(-frustum_right_normal.z, 0.0f, frustum_right_normal.x);
680                                 float3 along_left = make_float3(frustum_right_normal.z, 0.0f, frustum_right_normal.x);
681                                 float3 along_top = make_float3(0.0f, -frustum_top_normal.z, frustum_top_normal.y);
682                                 float3 along_bottom = make_float3(0.0f, frustum_top_normal.z, frustum_top_normal.y);
683
684                                 float dist[] = {r, l, t, b};
685                                 float3 along[] = {along_right, along_left, along_top, along_bottom};
686
687                                 bool test_o = false;
688
689                                 float *d = dist;
690                                 float3 *a = along;
691                                 for(int i = 0; i < 4; i++, d++, a++) {
692                                         /* Test if we should check this side at all */
693                                         if(*d > 0.0f) {
694                                                 if(dot(p, *a) >= 0.0f) {
695                                                         /* We are in front of the back edge of this side of the frustum */
696                                                         f_dist = max(f_dist, *d);
697                                                 }
698                                                 else {
699                                                         /* Possibly far enough behind the frustum to use distance to origin instead of edge */
700                                                         test_o = true;
701                                                 }
702                                         }
703                                 }
704
705                                 if(test_o) {
706                                         f_dist = (f_dist > 0) ? min(f_dist, len(p)) : len(p);
707                                 }
708                         }
709
710                         if(f_dist > 0.0f) {
711                                 res += len(dDdx - dot(dDdx, D)*D) * f_dist * (offscreen_dicing_scale - 1.0f);
712                         }
713                 }
714         }
715         else if(type == CAMERA_PANORAMA) {
716                 float3 D = transform_point(&worldtocamera, P);
717                 float dist = len(D);
718
719                 Ray ray;
720
721                 /* Distortion can become so great that the results become meaningless, there
722                  * may be a better way to do this, but calculating differentials from the
723                  * point directly ahead seems to produce good enough results. */
724 #if 0
725                 float2 dir = direction_to_panorama(&kernel_camera, kernel_camera_motion.data(), normalize(D));
726                 float3 raster = transform_perspective(&cameratoraster, make_float3(dir.x, dir.y, 0.0f));
727
728                 ray.t = 1.0f;
729                 camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), raster.x, raster.y, 0.0f, 0.0f, &ray);
730                 if(ray.t == 0.0f) {
731                         /* No differentials, just use from directly ahead. */
732                         camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
733                 }
734 #else
735                 camera_sample_panorama(&kernel_camera, kernel_camera_motion.data(), 0.5f*width, 0.5f*height, 0.0f, 0.0f, &ray);
736 #endif
737
738                 differential_transfer(&ray.dP, ray.dP, ray.D, ray.dD, ray.D, dist);
739
740                 return max(len(ray.dP.dx) * (float(width)/float(full_width)),
741                            len(ray.dP.dy) * (float(height)/float(full_height)));
742         }
743
744         return res;
745 }
746
747 bool Camera::use_motion() const
748 {
749         return motion.size() > 1;
750 }
751
752 float Camera::motion_time(int step) const
753 {
754         return (use_motion()) ? 2.0f * step / (motion.size() - 1) - 1.0f : 0.0f;
755 }
756
757 int Camera::motion_step(float time) const
758 {
759         if(use_motion()) {
760                 for(int step = 0; step < motion.size(); step++) {
761                         if(time == motion_time(step)) {
762                                 return step;
763                         }
764                 }
765         }
766
767         return -1;
768 }
769
770 CCL_NAMESPACE_END