Fix T48604: Crash on undo due to bad drawing code.
[blender.git] / intern / cycles / render / camera.cpp
1 /*
2  * Copyright 2011-2013 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "camera.h"
18 #include "mesh.h"
19 #include "object.h"
20 #include "scene.h"
21 #include "tables.h"
22
23 #include "device.h"
24
25 #include "util_foreach.h"
26 #include "util_function.h"
27 #include "util_math_cdf.h"
28 #include "util_vector.h"
29
30 CCL_NAMESPACE_BEGIN
31
32 static float shutter_curve_eval(float x,
33                                 array<float>& shutter_curve)
34 {
35         if (shutter_curve.size() == 0)
36                 return 1.0f;
37
38         x *= shutter_curve.size();
39         int index = (int)x;
40         float frac = x - index;
41         if(index < shutter_curve.size() - 1) {
42                 return lerp(shutter_curve[index], shutter_curve[index + 1], frac);
43         }
44         else {
45                 return shutter_curve[shutter_curve.size() - 1];
46         }
47 }
48
49 NODE_DEFINE(Camera)
50 {
51         NodeType* type = NodeType::add("camera", create);
52
53         SOCKET_FLOAT(shuttertime, "Shutter Time", 1.0f);
54
55         static NodeEnum motion_position_enum;
56         motion_position_enum.insert("start", MOTION_POSITION_START);
57         motion_position_enum.insert("center", MOTION_POSITION_CENTER);
58         motion_position_enum.insert("end", MOTION_POSITION_END);
59         SOCKET_ENUM(motion_position, "Motion Position", motion_position_enum, MOTION_POSITION_CENTER);
60
61         static NodeEnum rolling_shutter_type_enum;
62         rolling_shutter_type_enum.insert("none", ROLLING_SHUTTER_NONE);
63         rolling_shutter_type_enum.insert("top", ROLLING_SHUTTER_TOP);
64         SOCKET_ENUM(rolling_shutter_type, "Rolling Shutter Type", rolling_shutter_type_enum,  ROLLING_SHUTTER_NONE);
65         SOCKET_FLOAT(rolling_shutter_duration, "Rolling Shutter Duration", 0.1f);
66
67         SOCKET_FLOAT_ARRAY(shutter_curve, "Shutter Curve", array<float>());
68
69         SOCKET_FLOAT(aperturesize, "Aperture Size", 0.0f);
70         SOCKET_FLOAT(focaldistance, "Focal Distance", 10.0f);
71         SOCKET_INT(blades, "Blades", 0);
72         SOCKET_FLOAT(bladesrotation, "Blades Rotation", 0.0f);
73
74         SOCKET_TRANSFORM(matrix, "Matrix", transform_identity());
75
76         SOCKET_FLOAT(aperture_ratio, "Aperture Ratio", 1.0f);
77
78         static NodeEnum type_enum;
79         type_enum.insert("perspective", CAMERA_PERSPECTIVE);
80         type_enum.insert("orthograph", CAMERA_ORTHOGRAPHIC);
81         type_enum.insert("panorama", CAMERA_PANORAMA);
82         SOCKET_ENUM(type, "Type", type_enum, CAMERA_PERSPECTIVE);
83
84         static NodeEnum panorama_type_enum;
85         panorama_type_enum.insert("equirectangular", PANORAMA_EQUIRECTANGULAR);
86         panorama_type_enum.insert("mirrorball", PANORAMA_MIRRORBALL);
87         panorama_type_enum.insert("fisheye_equidistant", PANORAMA_FISHEYE_EQUIDISTANT);
88         panorama_type_enum.insert("fisheye_equisolid", PANORAMA_FISHEYE_EQUISOLID);
89         SOCKET_ENUM(panorama_type, "Panorama Type", panorama_type_enum, PANORAMA_EQUIRECTANGULAR);
90
91         SOCKET_FLOAT(fisheye_fov, "Fisheye FOV", M_PI_F);
92         SOCKET_FLOAT(fisheye_lens, "Fisheye Lens", 10.5f);
93         SOCKET_FLOAT(latitude_min, "Latitude Min", -M_PI_2_F);
94         SOCKET_FLOAT(latitude_max, "Latitude Max", M_PI_2_F);
95         SOCKET_FLOAT(longitude_min, "Longitude Min", -M_PI_F);
96         SOCKET_FLOAT(longitude_max, "Longitude Max", M_PI_F);
97         SOCKET_FLOAT(fov, "FOV", M_PI_4_F);
98         SOCKET_FLOAT(fov_pre, "FOV Pre", M_PI_4_F);
99         SOCKET_FLOAT(fov_post, "FOV Post", M_PI_4_F);
100
101         static NodeEnum stereo_eye_enum;
102         stereo_eye_enum.insert("none", STEREO_NONE);
103         stereo_eye_enum.insert("left", STEREO_LEFT);
104         stereo_eye_enum.insert("right", STEREO_RIGHT);
105         SOCKET_ENUM(stereo_eye, "Stereo Eye", stereo_eye_enum, STEREO_NONE);
106
107         SOCKET_FLOAT(interocular_distance, "Interocular Distance", 0.065f);
108         SOCKET_FLOAT(convergence_distance, "Convergence Distance", 30.0f * 0.065f);
109
110         SOCKET_BOOLEAN(use_pole_merge, "Use Pole Merge", false);
111         SOCKET_FLOAT(pole_merge_angle_from, "Pole Merge Angle From",  60.0f * M_PI_F / 180.0f);
112         SOCKET_FLOAT(pole_merge_angle_to, "Pole Merge Angle To", 75.0f * M_PI_F / 180.0f);
113
114         SOCKET_FLOAT(sensorwidth, "Sensor Width", 0.036f);
115         SOCKET_FLOAT(sensorheight, "Sensor Height", 0.024f);
116
117         SOCKET_FLOAT(nearclip, "Near Clip", 1e-5f);
118         SOCKET_FLOAT(farclip, "Far Clip", 1e5f);
119
120         SOCKET_FLOAT(viewplane.left, "Viewplane Left", 0);
121         SOCKET_FLOAT(viewplane.right, "Viewplane Right", 0);
122         SOCKET_FLOAT(viewplane.bottom, "Viewplane Bottom", 0);
123         SOCKET_FLOAT(viewplane.top, "Viewplane Top", 0);
124
125         SOCKET_FLOAT(border.left, "Border Left", 0);
126         SOCKET_FLOAT(border.right, "Border Right", 0);
127         SOCKET_FLOAT(border.bottom, "Border Bottom", 0);
128         SOCKET_FLOAT(border.top, "Border Top", 0);
129
130         return type;
131 }
132
133 Camera::Camera()
134 : Node(node_type)
135 {
136         shutter_table_offset = TABLE_OFFSET_INVALID;
137
138         width = 1024;
139         height = 512;
140         resolution = 1;
141
142         motion.pre = transform_identity();
143         motion.post = transform_identity();
144         use_motion = false;
145         use_perspective_motion = false;
146
147         shutter_curve.resize(RAMP_TABLE_SIZE);
148         for(int i = 0; i < shutter_curve.size(); ++i) {
149                 shutter_curve[i] = 1.0f;
150         }
151
152         compute_auto_viewplane();
153
154         screentoworld = transform_identity();
155         rastertoworld = transform_identity();
156         ndctoworld = transform_identity();
157         rastertocamera = transform_identity();
158         cameratoworld = transform_identity();
159         worldtoraster = transform_identity();
160
161         dx = make_float3(0.0f, 0.0f, 0.0f);
162         dy = make_float3(0.0f, 0.0f, 0.0f);
163
164         need_update = true;
165         need_device_update = true;
166         need_flags_update = true;
167         previous_need_motion = -1;
168 }
169
170 Camera::~Camera()
171 {
172 }
173
174 void Camera::compute_auto_viewplane()
175 {
176         if(type == CAMERA_PANORAMA) {
177                 viewplane.left = 0.0f;
178                 viewplane.right = 1.0f;
179                 viewplane.bottom = 0.0f;
180                 viewplane.top = 1.0f;
181         }
182         else {
183                 float aspect = (float)width/(float)height;
184                 if(width >= height) {
185                         viewplane.left = -aspect;
186                         viewplane.right = aspect;
187                         viewplane.bottom = -1.0f;
188                         viewplane.top = 1.0f;
189                 }
190                 else {
191                         viewplane.left = -1.0f;
192                         viewplane.right = 1.0f;
193                         viewplane.bottom = -1.0f/aspect;
194                         viewplane.top = 1.0f/aspect;
195                 }
196         }
197 }
198
199 void Camera::update()
200 {
201         if(!need_update)
202                 return;
203
204         /* Full viewport to camera border in the viewport. */
205         Transform fulltoborder = transform_from_viewplane(viewport_camera_border);
206         Transform bordertofull = transform_inverse(fulltoborder);
207
208         /* ndc to raster */
209         Transform ndctoraster = transform_scale(width, height, 1.0f) * bordertofull;
210         Transform full_ndctoraster = transform_scale(full_width, full_height, 1.0f) * bordertofull;
211
212         /* raster to screen */
213         Transform screentondc = fulltoborder * transform_from_viewplane(viewplane);
214
215         Transform screentoraster = ndctoraster * screentondc;
216         Transform rastertoscreen = transform_inverse(screentoraster);
217         Transform full_screentoraster = full_ndctoraster * screentondc;
218         Transform full_rastertoscreen = transform_inverse(full_screentoraster);
219
220         /* screen to camera */
221         Transform cameratoscreen;
222         if(type == CAMERA_PERSPECTIVE)
223                 cameratoscreen = transform_perspective(fov, nearclip, farclip);
224         else if(type == CAMERA_ORTHOGRAPHIC)
225                 cameratoscreen = transform_orthographic(nearclip, farclip);
226         else
227                 cameratoscreen = transform_identity();
228         
229         Transform screentocamera = transform_inverse(cameratoscreen);
230
231         rastertocamera = screentocamera * rastertoscreen;
232         Transform full_rastertocamera = screentocamera * full_rastertoscreen;
233         cameratoraster = screentoraster * cameratoscreen;
234
235         cameratoworld = matrix;
236         screentoworld = cameratoworld * screentocamera;
237         rastertoworld = cameratoworld * rastertocamera;
238         ndctoworld = rastertoworld * ndctoraster;
239
240         /* note we recompose matrices instead of taking inverses of the above, this
241          * is needed to avoid inverting near degenerate matrices that happen due to
242          * precision issues with large scenes */
243         worldtocamera = transform_inverse(matrix);
244         worldtoscreen = cameratoscreen * worldtocamera;
245         worldtondc = screentondc * worldtoscreen;
246         worldtoraster = ndctoraster * worldtondc;
247
248         /* differentials */
249         if(type == CAMERA_ORTHOGRAPHIC) {
250                 dx = transform_direction(&rastertocamera, make_float3(1, 0, 0));
251                 dy = transform_direction(&rastertocamera, make_float3(0, 1, 0));
252                 full_dx = transform_direction(&full_rastertocamera, make_float3(1, 0, 0));
253                 full_dy = transform_direction(&full_rastertocamera, make_float3(0, 1, 0));
254         }
255         else if(type == CAMERA_PERSPECTIVE) {
256                 dx = transform_perspective(&rastertocamera, make_float3(1, 0, 0)) -
257                      transform_perspective(&rastertocamera, make_float3(0, 0, 0));
258                 dy = transform_perspective(&rastertocamera, make_float3(0, 1, 0)) -
259                      transform_perspective(&rastertocamera, make_float3(0, 0, 0));
260                 full_dx = transform_perspective(&full_rastertocamera, make_float3(1, 0, 0)) -
261                      transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
262                 full_dy = transform_perspective(&full_rastertocamera, make_float3(0, 1, 0)) -
263                      transform_perspective(&full_rastertocamera, make_float3(0, 0, 0));
264         }
265         else {
266                 dx = make_float3(0.0f, 0.0f, 0.0f);
267                 dy = make_float3(0.0f, 0.0f, 0.0f);
268         }
269
270         dx = transform_direction(&cameratoworld, dx);
271         dy = transform_direction(&cameratoworld, dy);
272         full_dx = transform_direction(&cameratoworld, full_dx);
273         full_dy = transform_direction(&cameratoworld, full_dy);
274
275         /* TODO(sergey): Support other types of camera. */
276         if(type == CAMERA_PERSPECTIVE) {
277                 /* TODO(sergey): Move to an utility function and de-duplicate with
278                  * calculation above.
279                  */
280                 Transform screentocamera_pre =
281                         transform_inverse(transform_perspective(fov_pre,
282                                                                 nearclip,
283                                                                 farclip));
284                 Transform screentocamera_post =
285                         transform_inverse(transform_perspective(fov_post,
286                                                                 nearclip,
287                                                                 farclip));
288                 perspective_motion.pre = screentocamera_pre * rastertoscreen;
289                 perspective_motion.post = screentocamera_post * rastertoscreen;
290         }
291
292         need_update = false;
293         need_device_update = true;
294         need_flags_update = true;
295 }
296
297 void Camera::device_update(Device *device, DeviceScene *dscene, Scene *scene)
298 {
299         Scene::MotionType need_motion = scene->need_motion(device->info.advanced_shading);
300
301         update();
302
303         if(previous_need_motion != need_motion) {
304                 /* scene's motion model could have been changed since previous device
305                  * camera update this could happen for example in case when one render
306                  * layer has got motion pass and another not */
307                 need_device_update = true;
308         }
309
310         if(!need_device_update)
311                 return;
312         
313         KernelCamera *kcam = &dscene->data.cam;
314
315         /* store matrices */
316         kcam->screentoworld = screentoworld;
317         kcam->rastertoworld = rastertoworld;
318         kcam->rastertocamera = rastertocamera;
319         kcam->cameratoworld = cameratoworld;
320         kcam->worldtocamera = worldtocamera;
321         kcam->worldtoscreen = worldtoscreen;
322         kcam->worldtoraster = worldtoraster;
323         kcam->worldtondc = worldtondc;
324
325         /* camera motion */
326         kcam->have_motion = 0;
327         kcam->have_perspective_motion = 0;
328
329         if(need_motion == Scene::MOTION_PASS) {
330                 /* TODO(sergey): Support perspective (zoom, fov) motion. */
331                 if(type == CAMERA_PANORAMA) {
332                         if(use_motion) {
333                                 kcam->motion.pre = transform_inverse(motion.pre);
334                                 kcam->motion.post = transform_inverse(motion.post);
335                         }
336                         else {
337                                 kcam->motion.pre = kcam->worldtocamera;
338                                 kcam->motion.post = kcam->worldtocamera;
339                         }
340                 }
341                 else {
342                         if(use_motion) {
343                                 kcam->motion.pre = cameratoraster * transform_inverse(motion.pre);
344                                 kcam->motion.post = cameratoraster * transform_inverse(motion.post);
345                         }
346                         else {
347                                 kcam->motion.pre = worldtoraster;
348                                 kcam->motion.post = worldtoraster;
349                         }
350                 }
351         }
352 #ifdef __CAMERA_MOTION__
353         else if(need_motion == Scene::MOTION_BLUR) {
354                 if(use_motion) {
355                         transform_motion_decompose((DecompMotionTransform*)&kcam->motion, &motion, &matrix);
356                         kcam->have_motion = 1;
357                 }
358                 if(use_perspective_motion) {
359                         kcam->perspective_motion = perspective_motion;
360                         kcam->have_perspective_motion = 1;
361                 }
362         }
363 #endif
364
365         /* depth of field */
366         kcam->aperturesize = aperturesize;
367         kcam->focaldistance = focaldistance;
368         kcam->blades = (blades < 3)? 0.0f: blades;
369         kcam->bladesrotation = bladesrotation;
370
371         /* motion blur */
372 #ifdef __CAMERA_MOTION__
373         kcam->shuttertime = (need_motion == Scene::MOTION_BLUR) ? shuttertime: -1.0f;
374
375         scene->lookup_tables->remove_table(&shutter_table_offset);
376         if(need_motion == Scene::MOTION_BLUR) {
377                 vector<float> shutter_table;
378                 util_cdf_inverted(SHUTTER_TABLE_SIZE,
379                                   0.0f,
380                                   1.0f,
381                                   function_bind(shutter_curve_eval, _1, shutter_curve),
382                                   false,
383                                   shutter_table);
384                 shutter_table_offset = scene->lookup_tables->add_table(dscene,
385                                                                        shutter_table);
386                 kcam->shutter_table_offset = (int)shutter_table_offset;
387         }
388 #else
389         kcam->shuttertime = -1.0f;
390 #endif
391
392         /* type */
393         kcam->type = type;
394
395         /* anamorphic lens bokeh */
396         kcam->inv_aperture_ratio = 1.0f / aperture_ratio;
397
398         /* panorama */
399         kcam->panorama_type = panorama_type;
400         kcam->fisheye_fov = fisheye_fov;
401         kcam->fisheye_lens = fisheye_lens;
402         kcam->equirectangular_range = make_float4(longitude_min - longitude_max, -longitude_min,
403                                                   latitude_min -  latitude_max, -latitude_min + M_PI_2_F);
404
405         switch(stereo_eye) {
406                 case STEREO_LEFT:
407                         kcam->interocular_offset = -interocular_distance * 0.5f;
408                         break;
409                 case STEREO_RIGHT:
410                         kcam->interocular_offset = interocular_distance * 0.5f;
411                         break;
412                 case STEREO_NONE:
413                 default:
414                         kcam->interocular_offset = 0.0f;
415                         break;
416         }
417
418         kcam->convergence_distance = convergence_distance;
419         if(use_pole_merge) {
420                 kcam->pole_merge_angle_from = pole_merge_angle_from;
421                 kcam->pole_merge_angle_to = pole_merge_angle_to;
422         }
423         else {
424                 kcam->pole_merge_angle_from = -1.0f;
425                 kcam->pole_merge_angle_to = -1.0f;
426         }
427
428         /* sensor size */
429         kcam->sensorwidth = sensorwidth;
430         kcam->sensorheight = sensorheight;
431
432         /* render size */
433         kcam->width = width;
434         kcam->height = height;
435         kcam->resolution = resolution;
436
437         /* store differentials */
438         kcam->dx = float3_to_float4(dx);
439         kcam->dy = float3_to_float4(dy);
440
441         /* clipping */
442         kcam->nearclip = nearclip;
443         kcam->cliplength = (farclip == FLT_MAX)? FLT_MAX: farclip - nearclip;
444
445         /* Camera in volume. */
446         kcam->is_inside_volume = 0;
447
448         /* Rolling shutter effect */
449         kcam->rolling_shutter_type = rolling_shutter_type;
450         kcam->rolling_shutter_duration = rolling_shutter_duration;
451
452         previous_need_motion = need_motion;
453 }
454
455 void Camera::device_update_volume(Device * /*device*/,
456                                   DeviceScene *dscene,
457                                   Scene *scene)
458 {
459         if(!need_device_update && !need_flags_update) {
460                 return;
461         }
462         KernelCamera *kcam = &dscene->data.cam;
463         BoundBox viewplane_boundbox = viewplane_bounds_get();
464         for(size_t i = 0; i < scene->objects.size(); ++i) {
465                 Object *object = scene->objects[i];
466                 if(object->mesh->has_volume &&
467                    viewplane_boundbox.intersects(object->bounds))
468                 {
469                         /* TODO(sergey): Consider adding more grained check. */
470                         kcam->is_inside_volume = 1;
471                         break;
472                 }
473         }
474         need_device_update = false;
475         need_flags_update = false;
476 }
477
478 void Camera::device_free(Device * /*device*/,
479                          DeviceScene * /*dscene*/,
480                          Scene *scene)
481 {
482         scene->lookup_tables->remove_table(&shutter_table_offset);
483 }
484
485 bool Camera::modified(const Camera& cam)
486 {
487         return !Node::equals(cam);
488 }
489
490 bool Camera::motion_modified(const Camera& cam)
491 {
492         return !((motion == cam.motion) &&
493                  (use_motion == cam.use_motion) &&
494                  (use_perspective_motion == cam.use_perspective_motion));
495 }
496
497 void Camera::tag_update()
498 {
499         need_update = true;
500 }
501
502 float3 Camera::transform_raster_to_world(float raster_x, float raster_y)
503 {
504         float3 D, P;
505         if(type == CAMERA_PERSPECTIVE) {
506                 D = transform_perspective(&rastertocamera,
507                                           make_float3(raster_x, raster_y, 0.0f));
508                 float3 Pclip = normalize(D);
509                 P = make_float3(0.0f, 0.0f, 0.0f);
510                 /* TODO(sergey): Aperture support? */
511                 P = transform_point(&cameratoworld, P);
512                 D = normalize(transform_direction(&cameratoworld, D));
513                 /* TODO(sergey): Clipping is conditional in kernel, and hence it could
514                  * be mistakes in here, currently leading to wrong camera-in-volume
515                  * detection.
516                  */
517                 P += nearclip * D / Pclip.z;
518         }
519         else if(type == CAMERA_ORTHOGRAPHIC) {
520                 D = make_float3(0.0f, 0.0f, 1.0f);
521                 /* TODO(sergey): Aperture support? */
522                 P = transform_perspective(&rastertocamera,
523                                           make_float3(raster_x, raster_y, 0.0f));
524                 P = transform_point(&cameratoworld, P);
525                 D = normalize(transform_direction(&cameratoworld, D));
526         }
527         else {
528                 assert(!"unsupported camera type");
529         }
530         return P;
531 }
532
533 BoundBox Camera::viewplane_bounds_get()
534 {
535         /* TODO(sergey): This is all rather stupid, but is there a way to perform
536          * checks we need in a more clear and smart fasion?
537          */
538         BoundBox bounds = BoundBox::empty;
539
540         if(type == CAMERA_PANORAMA) {
541                 if(use_spherical_stereo == false) {
542                         bounds.grow(make_float3(cameratoworld.x.w,
543                                                 cameratoworld.y.w,
544                                                 cameratoworld.z.w));
545                 }
546                 else {
547                         float half_eye_distance = interocular_distance * 0.5f;
548
549                         bounds.grow(make_float3(cameratoworld.x.w + half_eye_distance,
550                                                 cameratoworld.y.w,
551                                                 cameratoworld.z.w));
552
553                         bounds.grow(make_float3(cameratoworld.z.w,
554                                                 cameratoworld.y.w + half_eye_distance,
555                                                 cameratoworld.z.w));
556
557                         bounds.grow(make_float3(cameratoworld.x.w - half_eye_distance,
558                                                 cameratoworld.y.w,
559                                                 cameratoworld.z.w));
560
561                         bounds.grow(make_float3(cameratoworld.x.w,
562                                                 cameratoworld.y.w - half_eye_distance,
563                                                 cameratoworld.z.w));
564                 }
565         }
566         else {
567                 bounds.grow(transform_raster_to_world(0.0f, 0.0f));
568                 bounds.grow(transform_raster_to_world(0.0f, (float)height));
569                 bounds.grow(transform_raster_to_world((float)width, (float)height));
570                 bounds.grow(transform_raster_to_world((float)width, 0.0f));
571                 if(type == CAMERA_PERSPECTIVE) {
572                         /* Center point has the most distance in local Z axis,
573                          * use it to construct bounding box/
574                          */
575                         bounds.grow(transform_raster_to_world(0.5f*width, 0.5f*height));
576                 }
577         }
578         return bounds;
579 }
580
581 float Camera::world_to_raster_size(float3 P)
582 {
583         if(type == CAMERA_ORTHOGRAPHIC) {
584                 return min(len(full_dx), len(full_dy));
585         }
586         else if(type == CAMERA_PERSPECTIVE) {
587                 /* Calculate as if point is directly ahead of the camera. */
588                 float3 raster = make_float3(0.5f*width, 0.5f*height, 0.0f);
589                 float3 Pcamera = transform_perspective(&rastertocamera, raster);
590
591                 /* dDdx */
592                 float3 Ddiff = transform_direction(&cameratoworld, Pcamera);
593                 float3 dx = len_squared(full_dx) < len_squared(full_dy) ? full_dx : full_dy;
594                 float3 dDdx = normalize(Ddiff + dx) - normalize(Ddiff);
595
596                 /* dPdx */
597                 float dist = len(transform_point(&worldtocamera, P));
598                 float3 D = normalize(Ddiff);
599                 return len(dist*dDdx - dot(dist*dDdx, D)*D);
600         }
601         else {
602                 // TODO(mai): implement for CAMERA_PANORAMA
603                 assert(!"pixel width calculation for panoramic projection not implemented yet");
604         }
605
606         return 1.0f;
607 }
608
609 CCL_NAMESPACE_END