Code cleanup: spelling fix, Tesselection->tesselation
[blender.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23
24 from mathutils import Vector, Matrix
25
26
27 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
28     screens = bpy.data.screens if all_screens else [context.screen]
29
30     for screen in screens:
31         for area in screen.areas:
32             if area.type == tarea:
33                 for space in area.spaces:
34                     if space.type == tspace:
35                         callback(space, *args)
36
37
38 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
39     def set_background(space_v3d, clip, user):
40         bgpic = None
41
42         for x in space_v3d.background_images:
43             if x.source == 'MOVIE_CLIP':
44                 bgpic = x
45                 break
46
47         if not bgpic:
48             bgpic = space_v3d.background_images.new()
49
50         bgpic.source = 'MOVIE_CLIP'
51         bgpic.clip = clip
52         bgpic.clip_user.proxy_render_size = user.proxy_render_size
53         bgpic.clip_user.use_render_undistorted = True
54         bgpic.use_camera_clip = False
55         bgpic.view_axis = 'CAMERA'
56
57         space_v3d.show_background_images = True
58
59     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
60                       set_background, clip, clip_user)
61
62
63 def CLIP_camera_for_clip(context, clip):
64     scene = context.scene
65
66     camera = scene.camera
67
68     for ob in scene.objects:
69         if ob.type == 'CAMERA':
70             for con in ob.constraints:
71                 if con.type == 'CAMERA_SOLVER':
72                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
73
74                     if cur_clip == clip:
75                         return ob
76
77     return camera
78
79
80 def CLIP_track_view_selected(sc, track):
81     if track.select_anchor:
82         return True
83
84     if sc.show_marker_pattern and track.select_pattern:
85         return True
86
87     if sc.show_marker_search and track.select_search:
88         return True
89
90     return False
91
92
93 def CLIP_default_settings_from_track(clip, track):
94     settings = clip.tracking.settings
95
96     width = clip.size[0]
97     height = clip.size[1]
98
99     pattern = track.pattern_max - track.pattern_min
100     search = track.search_max - track.search_min
101
102     pattern[0] = pattern[0] * clip.size[0]
103     pattern[1] = pattern[1] * clip.size[1]
104
105     search[0] = search[0] * clip.size[0]
106     search[1] = search[1] * clip.size[1]
107
108     settings.default_tracker = track.tracker
109     settings.default_pyramid_levels = track.pyramid_levels
110     settings.default_correlation_min = track.correlation_min
111     settings.default_pattern_size = max(pattern[0], pattern[1])
112     settings.default_search_size = max(search[0], search[1])
113     settings.default_frames_limit = track.frames_limit
114     settings.default_pattern_match = track.pattern_match
115     settings.default_margin = track.margin
116     settings.use_default_red_channel = track.use_red_channel
117     settings.use_default_green_channel = track.use_green_channel
118     settings.use_default_blue_channel = track.use_blue_channel
119
120
121 class CLIP_OT_track_to_empty(Operator):
122     """Create an Empty object which will be copying movement of active track"""
123
124     bl_idname = "clip.track_to_empty"
125     bl_label = "Link Empty to Track"
126     bl_options = {'UNDO', 'REGISTER'}
127
128     def _link_track(self, context, clip, tracking_object, track):
129         sc = context.space_data
130         constraint = None
131         ob = None
132
133         ob = bpy.data.objects.new(name=track.name, object_data=None)
134         ob.select = True
135         context.scene.objects.link(ob)
136         context.scene.objects.active = ob
137
138         for con in ob.constraints:
139             if con.type == 'FOLLOW_TRACK':
140                 constraint = con
141                 break
142
143         if constraint is None:
144             constraint = ob.constraints.new(type='FOLLOW_TRACK')
145
146         constraint.clip = sc.clip
147         constraint.track = track.name
148         constraint.use_3d_position = False
149         constraint.object = tracking_object.name
150         constraint.camera = CLIP_camera_for_clip(context, clip)
151
152     def execute(self, context):
153         sc = context.space_data
154         clip = sc.clip
155         tracking_object = clip.tracking.objects.active
156
157         for track in tracking_object.tracks:
158             if CLIP_track_view_selected(sc, track):
159                 self._link_track(context, clip, tracking_object, track)
160
161         return {'FINISHED'}
162
163
164 class CLIP_OT_bundles_to_mesh(Operator):
165     """Create vertex cloud using coordinates of reconstructed tracks"""
166
167     bl_idname = "clip.bundles_to_mesh"
168     bl_label = "3D Markers to Mesh"
169     bl_options = {'UNDO', 'REGISTER'}
170
171     @classmethod
172     def poll(cls, context):
173         sc = context.space_data
174         return (sc.type == 'CLIP_EDITOR') and sc.clip
175
176     def execute(self, context):
177         from bpy_extras.io_utils import unpack_list
178
179         sc = context.space_data
180         clip = sc.clip
181         tracking_object = clip.tracking.objects.active
182
183         new_verts = []
184
185         mesh = bpy.data.meshes.new(name="Tracks")
186         for track in tracking_object.tracks:
187             if track.has_bundle:
188                 new_verts.append(track.bundle)
189
190         if new_verts:
191             mesh.vertices.add(len(new_verts))
192             mesh.vertices.foreach_set("co", unpack_list(new_verts))
193
194         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
195
196         context.scene.objects.link(ob)
197
198         return {'FINISHED'}
199
200
201 class CLIP_OT_delete_proxy(Operator):
202     """Delete movie clip proxy files from the hard drive"""
203
204     bl_idname = "clip.delete_proxy"
205     bl_label = "Delete Proxy"
206     bl_options = {'REGISTER'}
207
208     @classmethod
209     def poll(cls, context):
210         if context.space_data.type != 'CLIP_EDITOR':
211             return False
212
213         sc = context.space_data
214
215         return sc.clip
216
217     def invoke(self, context, event):
218         wm = context.window_manager
219
220         return wm.invoke_confirm(self, event)
221
222     def _rmproxy(self, abspath):
223         import shutil
224
225         if not os.path.exists(abspath):
226             return
227
228         if os.path.isdir(abspath):
229             shutil.rmtree(abspath)
230         else:
231             os.remove(abspath)
232
233     def execute(self, context):
234         sc = context.space_data
235         clip = sc.clip
236         if clip.use_proxy_custom_directory:
237             proxydir = clip.proxy.directory
238         else:
239             clipdir = os.path.dirname(clip.filepath)
240             proxydir = os.path.join(clipdir, "BL_proxy")
241
242         clipfile = os.path.basename(clip.filepath)
243         proxy = os.path.join(proxydir, clipfile)
244         absproxy = bpy.path.abspath(proxy)
245
246         # proxy_<quality>[_undostorted]
247         for x in (25, 50, 75, 100):
248             d = os.path.join(absproxy, "proxy_%d" % x)
249
250             self._rmproxy(d)
251             self._rmproxy(d + "_undistorted")
252             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
253
254         tc = ("free_run.blen_tc",
255               "interp_free_run.blen_tc",
256               "record_run.blen_tc")
257
258         for x in tc:
259             self._rmproxy(os.path.join(absproxy, x))
260
261         # remove proxy per-clip directory
262         try:
263             os.rmdir(absproxy)
264         except OSError:
265             pass
266
267         # remove [custom] proxy directory if empty
268         try:
269             absdir = bpy.path.abspath(proxydir)
270             os.rmdir(absdir)
271         except OSError:
272             pass
273
274         return {'FINISHED'}
275
276
277 class CLIP_OT_set_viewport_background(Operator):
278     """Set current movie clip as a camera background in 3D viewport \
279 (works only when a 3D viewport is visible)"""
280
281     bl_idname = "clip.set_viewport_background"
282     bl_label = "Set as Background"
283     bl_options = {'REGISTER'}
284
285     @classmethod
286     def poll(cls, context):
287         if context.space_data.type != 'CLIP_EDITOR':
288             return False
289
290         sc = context.space_data
291
292         return sc.clip
293
294     def execute(self, context):
295         sc = context.space_data
296         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
297
298         return {'FINISHED'}
299
300
301 class CLIP_OT_constraint_to_fcurve(Operator):
302     """Create F-Curves for object which will copy \
303 object's movement caused by this constraint"""
304
305     bl_idname = "clip.constraint_to_fcurve"
306     bl_label = "Constraint to F-Curve"
307     bl_options = {'UNDO', 'REGISTER'}
308
309     def _bake_object(self, scene, ob):
310         con = None
311         clip = None
312         sfra = None
313         efra = None
314         frame_current = scene.frame_current
315         matrices = []
316
317         # Find constraint which would eb converting
318         # TODO: several camera solvers and track followers would fail,
319         #       but can't think about eal workflow where it'll be useful
320         for x in ob.constraints:
321             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
322                 con = x
323
324         if not con:
325             self.report({'ERROR'},
326                 "Motion Tracking constraint to be converted not found")
327
328             return {'CANCELLED'}
329
330         # Get clip used for parenting
331         if con.use_active_clip:
332             clip = scene.active_clip
333         else:
334             clip = con.clip
335
336         if not clip:
337             self.report({'ERROR'},
338                 "Movie clip to use tracking data from isn't set")
339
340             return {'CANCELLED'}
341
342         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
343             mat = ob.matrix_world.copy()
344             ob.constraints.remove(con)
345             ob.matrix_world = mat
346
347             return {'FINISHED'}
348
349         # Find start and end frames
350         for track in clip.tracking.tracks:
351             if sfra is None:
352                 sfra = track.markers[0].frame
353             else:
354                 sfra = min(sfra, track.markers[0].frame)
355
356             if efra is None:
357                 efra = track.markers[-1].frame
358             else:
359                 efra = max(efra, track.markers[-1].frame)
360
361         if sfra is None or efra is None:
362             return
363
364         # Store object matrices
365         for x in range(sfra, efra + 1):
366             scene.frame_set(x)
367             matrices.append(ob.matrix_world.copy())
368
369         ob.animation_data_create()
370
371         # Apply matrices on object and insert keyframes
372         i = 0
373         for x in range(sfra, efra + 1):
374             scene.frame_set(x)
375             ob.matrix_world = matrices[i]
376
377             ob.keyframe_insert("location")
378
379             if ob.rotation_mode == 'QUATERNION':
380                 ob.keyframe_insert("rotation_quaternion")
381             else:
382                 ob.keyframe_insert("rotation_euler")
383
384             i += 1
385
386         ob.constraints.remove(con)
387
388         scene.frame_set(frame_current)
389
390     def execute(self, context):
391         scene = context.scene
392         # XXX, should probably use context.selected_editable_objects
393         # since selected objects can be from a lib or in hidden layer!
394         for ob in scene.objects:
395             if ob.select:
396                 self._bake_object(scene, ob)
397
398         return {'FINISHED'}
399
400
401 class CLIP_OT_setup_tracking_scene(Operator):
402     """Prepare scene for compositing 3D objects into this footage"""
403
404     bl_idname = "clip.setup_tracking_scene"
405     bl_label = "Setup Tracking Scene"
406     bl_options = {'UNDO', 'REGISTER'}
407
408     @classmethod
409     def poll(cls, context):
410         sc = context.space_data
411
412         if sc.type != 'CLIP_EDITOR':
413             return False
414
415         clip = sc.clip
416
417         return clip and clip.tracking.reconstruction.is_valid
418
419     @staticmethod
420     def _setupScene(context):
421         scene = context.scene
422         scene.active_clip = context.space_data.clip
423
424     @staticmethod
425     def _setupWorld(context):
426         scene = context.scene
427         world = scene.world
428
429         if not world:
430             world = bpy.data.worlds.new(name="World")
431             scene.world = world
432
433         world.light_settings.use_ambient_occlusion = True
434         world.light_settings.ao_blend_type = 'MULTIPLY'
435
436         world.light_settings.use_environment_light = True
437         world.light_settings.environment_energy = 0.1
438
439         world.light_settings.distance = 1.0
440         world.light_settings.sample_method = 'ADAPTIVE_QMC'
441         world.light_settings.samples = 7
442         world.light_settings.threshold = 0.005
443
444     @staticmethod
445     def _findOrCreateCamera(context):
446         scene = context.scene
447
448         if scene.camera:
449             return scene.camera
450
451         cam = bpy.data.cameras.new(name="Camera")
452         camob = bpy.data.objects.new(name="Camera", object_data=cam)
453         scene.objects.link(camob)
454
455         scene.camera = camob
456
457         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
458             Matrix.Rotation(0.815, 4, 'Z') *
459             Matrix.Rotation(0.011, 4, 'Y') *
460             Matrix.Rotation(1.109, 4, 'X'))
461
462         return camob
463
464     @staticmethod
465     def _setupCamera(context):
466         sc = context.space_data
467         clip = sc.clip
468         tracking = clip.tracking
469
470         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
471         cam = camob.data
472
473         # Remove all constraints to be sure motion is fine
474         camob.constraints.clear()
475
476         # Append camera solver constraint
477         con = camob.constraints.new(type='CAMERA_SOLVER')
478         con.use_active_clip = True
479         con.influence = 1.0
480
481         cam.sensor_width = tracking.camera.sensor_width
482         cam.lens = tracking.camera.focal_length
483
484     @staticmethod
485     def _setupViewport(context):
486         sc = context.space_data
487         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
488
489     @staticmethod
490     def _setupRenderLayers(context):
491         scene = context.scene
492         rlayers = scene.render.layers
493
494         if not scene.render.layers.get("Foreground"):
495             if len(rlayers) == 1:
496                 fg = rlayers[0]
497                 fg.name = 'Foreground'
498             else:
499                 fg = scene.render.layers.new('Foreground')
500
501             fg.use_sky = False
502             fg.layers = [True] + [False] * 19
503             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
504             fg.use_pass_vector = True
505
506         if not scene.render.layers.get("Background"):
507             bg = scene.render.layers.new('Background')
508             bg.use_pass_shadow = True
509             bg.use_pass_ambient_occlusion = True
510             bg.layers = [False] * 10 + [True] + [False] * 9
511
512     @staticmethod
513     def _findNode(tree, type):
514         for node in tree.nodes:
515             if node.type == type:
516                 return node
517
518         return None
519
520     @staticmethod
521     def _findOrCreateNode(tree, type):
522         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
523
524         if not node:
525             node = tree.nodes.new(type=type)
526
527         return node
528
529     @staticmethod
530     def _needSetupNodes(context):
531         scene = context.scene
532         tree = scene.node_tree
533
534         if not tree:
535             # No compositor node tree found, time to create it!
536             return True
537
538         for node in tree.nodes:
539             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
540                 return False
541
542         return True
543
544     @staticmethod
545     def _offsetNodes(tree):
546         for a in tree.nodes:
547             for b in tree.nodes:
548                 if a != b and a.location == b.location:
549                     b.location += Vector((40.0, 20.0))
550
551     def _setupNodes(self, context):
552         if not self._needSetupNodes(context):
553             # compositor nodes were already setup or even changes already
554             # do nothing to prevent nodes damage
555             return
556
557         # Enable backdrop for all compositor spaces
558         def setup_space(space):
559             space.show_backdrop = True
560
561         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
562                           setup_space)
563
564         sc = context.space_data
565         scene = context.scene
566         scene.use_nodes = True
567         tree = scene.node_tree
568         clip = sc.clip
569
570         need_stabilization = False
571
572         # create nodes
573         rlayer_fg = self._findOrCreateNode(tree, 'R_LAYERS')
574         rlayer_bg = tree.nodes.new(type='R_LAYERS')
575         composite = self._findOrCreateNode(tree, 'COMPOSITE')
576
577         movieclip = tree.nodes.new(type='MOVIECLIP')
578         distortion = tree.nodes.new(type='MOVIEDISTORTION')
579
580         if need_stabilization:
581             stabilize = tree.nodes.new(type='STABILIZE2D')
582
583         scale = tree.nodes.new(type='SCALE')
584         invert = tree.nodes.new(type='INVERT')
585         add_ao = tree.nodes.new(type='MIX_RGB')
586         add_shadow = tree.nodes.new(type='MIX_RGB')
587         mul_shadow = tree.nodes.new(type='MIX_RGB')
588         mul_image = tree.nodes.new(type='MIX_RGB')
589         vector_blur = tree.nodes.new(type='VECBLUR')
590         alphaover = tree.nodes.new(type='ALPHAOVER')
591         viewer = tree.nodes.new(type='VIEWER')
592
593         # setup nodes
594         movieclip.clip = clip
595
596         distortion.clip = clip
597         distortion.distortion_type = 'UNDISTORT'
598
599         if need_stabilization:
600             stabilize.clip = clip
601
602         scale.space = 'RENDER_SIZE'
603
604         rlayer_bg.scene = scene
605         rlayer_bg.layer = "Background"
606
607         rlayer_fg.scene = scene
608         rlayer_fg.layer = "Foreground"
609
610         add_ao.blend_type = 'ADD'
611         add_shadow.blend_type = 'ADD'
612
613         mul_shadow.blend_type = 'MULTIPLY'
614         mul_shadow.inputs['Fac'].default_value = 0.8
615
616         mul_image.blend_type = 'MULTIPLY'
617         mul_image.inputs['Fac'].default_value = 0.8
618
619         vector_blur.factor = 0.75
620
621         # create links
622         tree.links.new(movieclip.outputs['Image'], distortion.inputs['Image'])
623
624         if need_stabilization:
625             tree.links.new(distortion.outputs['Image'],
626                 stabilize.inputs['Image'])
627             tree.links.new(stabilize.outputs['Image'], scale.inputs['Image'])
628         else:
629             tree.links.new(distortion.outputs['Image'], scale.inputs['Image'])
630
631         tree.links.new(rlayer_bg.outputs['Alpha'], invert.inputs['Color'])
632
633         tree.links.new(invert.outputs['Color'], add_shadow.inputs[1])
634         tree.links.new(rlayer_bg.outputs['Shadow'], add_shadow.inputs[2])
635
636         tree.links.new(invert.outputs['Color'], add_ao.inputs[1])
637         tree.links.new(rlayer_bg.outputs['AO'], add_ao.inputs[2])
638
639         tree.links.new(add_ao.outputs['Image'], mul_shadow.inputs[1])
640         tree.links.new(add_shadow.outputs['Image'], mul_shadow.inputs[2])
641
642         tree.links.new(scale.outputs['Image'], mul_image.inputs[1])
643         tree.links.new(mul_shadow.outputs['Image'], mul_image.inputs[2])
644
645         tree.links.new(rlayer_fg.outputs['Image'], vector_blur.inputs['Image'])
646         tree.links.new(rlayer_fg.outputs['Z'], vector_blur.inputs['Z'])
647         tree.links.new(rlayer_fg.outputs['Speed'], vector_blur.inputs['Speed'])
648
649         tree.links.new(mul_image.outputs['Image'], alphaover.inputs[1])
650         tree.links.new(vector_blur.outputs['Image'], alphaover.inputs[2])
651
652         tree.links.new(alphaover.outputs['Image'], composite.inputs['Image'])
653         tree.links.new(alphaover.outputs['Image'], viewer.inputs['Image'])
654
655         # place nodes
656         movieclip.location = Vector((-300.0, 350.0))
657
658         distortion.location = movieclip.location
659         distortion.location += Vector((200.0, 0.0))
660
661         if need_stabilization:
662             stabilize.location = distortion.location
663             stabilize.location += Vector((200.0, 0.0))
664
665             scale.location = stabilize.location
666             scale.location += Vector((200.0, 0.0))
667         else:
668             scale.location = distortion.location
669             scale.location += Vector((200.0, 0.0))
670
671         rlayer_bg.location = movieclip.location
672         rlayer_bg.location -= Vector((0.0, 350.0))
673
674         invert.location = rlayer_bg.location
675         invert.location += Vector((250.0, 50.0))
676
677         add_ao.location = invert.location
678         add_ao.location[0] += 200
679         add_ao.location[1] = rlayer_bg.location[1]
680
681         add_shadow.location = add_ao.location
682         add_shadow.location -= Vector((0.0, 250.0))
683
684         mul_shadow.location = add_ao.location
685         mul_shadow.location += Vector((200.0, -50.0))
686
687         mul_image.location = mul_shadow.location
688         mul_image.location += Vector((300.0, 200.0))
689
690         rlayer_fg.location = rlayer_bg.location
691         rlayer_fg.location -= Vector((0.0, 500.0))
692
693         vector_blur.location[0] = mul_image.location[0]
694         vector_blur.location[1] = rlayer_fg.location[1]
695
696         alphaover.location[0] = vector_blur.location[0] + 350
697         alphaover.location[1] = \
698             (vector_blur.location[1] + mul_image.location[1]) / 2
699
700         composite.location = alphaover.location
701         composite.location += Vector((200.0, -100.0))
702
703         viewer.location = composite.location
704         composite.location += Vector((0.0, 200.0))
705
706         # ensure no nodes were creates on position of existing node
707         self._offsetNodes(tree)
708
709     @staticmethod
710     def _createMesh(scene, name, vertices, faces):
711         from bpy_extras.io_utils import unpack_list, unpack_face_list
712
713         mesh = bpy.data.meshes.new(name=name)
714
715         mesh.vertices.add(len(vertices))
716         mesh.vertices.foreach_set("co", unpack_list(vertices))
717
718         mesh.faces.add(len(faces))
719         mesh.faces.foreach_set("vertices_raw", unpack_face_list(faces))
720
721         mesh.update(calc_edges=True)
722
723         ob = bpy.data.objects.new(name=name, object_data=mesh)
724
725         scene.objects.link(ob)
726
727         return ob
728
729     @staticmethod
730     def _getPlaneVertices(half_size, z):
731
732         return [(-half_size, -half_size, z),
733                 (-half_size, half_size, z),
734                 (half_size, half_size, z),
735                 (half_size, -half_size, z)]
736
737     def _createGround(self, scene):
738         vertices = self._getPlaneVertices(4.0, 0.0)
739         faces = [(0, 1, 2, 3)]
740
741         ob = self._createMesh(scene, "Ground", vertices, faces)
742         ob["is_ground"] = True
743
744         return ob
745
746     @staticmethod
747     def _findGround(context):
748         scene = context.scene
749
750         for ob in scene.objects:
751             if ob.type == 'MESH' and "is_ground" in ob:
752                 return ob
753
754         return None
755
756     @staticmethod
757     def _mergeLayers(layers_a, layers_b):
758
759         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
760
761     @staticmethod
762     def _createLamp(scene):
763         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
764         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
765         scene.objects.link(lampob)
766
767         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
768
769         lamp.distance = 30
770         lamp.shadow_method = 'RAY_SHADOW'
771
772         return lampob
773
774     def _createSampleObject(self, scene):
775         vertices = self._getPlaneVertices(1.0, -1.0) + \
776             self._getPlaneVertices(1.0, 1.0)
777         faces = ((0, 1, 2, 3),
778                  (4, 7, 6, 5),
779                  (0, 4, 5, 1),
780                  (1, 5, 6, 2),
781                  (2, 6, 7, 3),
782                  (3, 7, 4, 0))
783
784         return self._createMesh(scene, "Cube", vertices, faces)
785
786     def _setupObjects(self, context):
787         scene = context.scene
788
789         fg = scene.render.layers.get("Foreground")
790         bg = scene.render.layers.get("Background")
791
792         all_layers = self._mergeLayers(fg.layers, bg.layers)
793
794         # enshure all lamps are active on foreground and background
795         has_lamp = False
796         has_mesh = False
797         for ob in scene.objects:
798             if ob.type == 'LAMP':
799                 ob.layers = all_layers
800                 has_lamp = True
801             elif ob.type == 'MESH' and "is_ground" not in ob:
802                 has_mesh = True
803
804         # create sample lamp if there's no lamps in the scene
805         if not has_lamp:
806             lamp = self._createLamp(scene)
807             lamp.layers = all_layers
808
809         # create sample object if there's no meshes in the scene
810         if not has_mesh:
811             ob = self._createSampleObject(scene)
812             ob.layers = fg.layers
813
814         # create ground object if needed
815         ground = self._findGround(context)
816         if not ground:
817             ground = self._createGround(scene)
818             ground.layers = bg.layers
819         else:
820             # make sure ground is available on Background layer
821             ground.layers = self._mergeLayers(ground.layers, bg.layers)
822
823         # layers with background and foreground should be rendered
824         scene.layers = self._mergeLayers(scene.layers, all_layers)
825
826     def execute(self, context):
827         self._setupScene(context)
828         self._setupWorld(context)
829         self._setupCamera(context)
830         self._setupViewport(context)
831         self._setupRenderLayers(context)
832         self._setupNodes(context)
833         self._setupObjects(context)
834
835         return {'FINISHED'}
836
837
838 class CLIP_OT_track_settings_as_default(Operator):
839     """Copy tracking settings from active track to default settings"""
840
841     bl_idname = "clip.track_settings_as_default"
842     bl_label = "Track Settings As Default"
843     bl_options = {'UNDO', 'REGISTER'}
844
845     @classmethod
846     def poll(cls, context):
847         sc = context.space_data
848
849         if sc.type != 'CLIP_EDITOR':
850             return False
851
852         clip = sc.clip
853
854         return clip and clip.tracking.tracks.active
855
856     def execute(self, context):
857         sc = context.space_data
858         clip = sc.clip
859
860         CLIP_default_settings_from_track(clip, clip.tracking.tracks.active)
861
862         return {'FINISHED'}