Merged revision(s) 57423-57498 from trunk/blender into soc-2013-dingto
[blender-staging.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23
24 from mathutils import Vector, Matrix
25
26
27 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
28     screens = bpy.data.screens if all_screens else [context.screen]
29
30     for screen in screens:
31         for area in screen.areas:
32             if area.type == tarea:
33                 for space in area.spaces:
34                     if space.type == tspace:
35                         callback(space, *args)
36
37
38 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
39     def set_background(space_v3d, clip, user):
40         bgpic = None
41
42         for x in space_v3d.background_images:
43             if x.source == 'MOVIE_CLIP':
44                 bgpic = x
45                 break
46
47         if not bgpic:
48             bgpic = space_v3d.background_images.new()
49
50         bgpic.source = 'MOVIE_CLIP'
51         bgpic.clip = clip
52         bgpic.clip_user.proxy_render_size = user.proxy_render_size
53         bgpic.clip_user.use_render_undistorted = True
54         bgpic.use_camera_clip = False
55         bgpic.view_axis = 'CAMERA'
56
57         space_v3d.show_background_images = True
58
59     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
60                      set_background, clip, clip_user)
61
62
63 def CLIP_camera_for_clip(context, clip):
64     scene = context.scene
65
66     camera = scene.camera
67
68     for ob in scene.objects:
69         if ob.type == 'CAMERA':
70             for con in ob.constraints:
71                 if con.type == 'CAMERA_SOLVER':
72                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
73
74                     if cur_clip == clip:
75                         return ob
76
77     return camera
78
79
80 def CLIP_track_view_selected(sc, track):
81     if track.select_anchor:
82         return True
83
84     if sc.show_marker_pattern and track.select_pattern:
85         return True
86
87     if sc.show_marker_search and track.select_search:
88         return True
89
90     return False
91
92
93 def CLIP_default_settings_from_track(clip, track, framenr):
94     settings = clip.tracking.settings
95
96     width = clip.size[0]
97     height = clip.size[1]
98
99     marker = track.markers.find_frame(framenr, False)
100     pattern_bb = marker.pattern_bound_box
101
102     pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
103     search = marker.search_max - marker.search_min
104
105     pattern[0] = pattern[0] * width
106     pattern[1] = pattern[1] * height
107
108     search[0] = search[0] * width
109     search[1] = search[1] * height
110
111     settings.default_correlation_min = track.correlation_min
112     settings.default_pattern_size = max(pattern[0], pattern[1])
113     settings.default_search_size = max(search[0], search[1])
114     settings.default_frames_limit = track.frames_limit
115     settings.default_pattern_match = track.pattern_match
116     settings.default_margin = track.margin
117     settings.default_motion_model = track.motion_model
118     settings.use_default_brute = track.use_brute
119     settings.use_default_normalization = track.use_normalization
120     settings.use_default_mask = track.use_mask
121     settings.use_default_red_channel = track.use_red_channel
122     settings.use_default_green_channel = track.use_green_channel
123     settings.use_default_blue_channel = track.use_blue_channel
124
125
126 class CLIP_OT_track_to_empty(Operator):
127     """Create an Empty object which will be copying movement of active track"""
128
129     bl_idname = "clip.track_to_empty"
130     bl_label = "Link Empty to Track"
131     bl_options = {'UNDO', 'REGISTER'}
132
133     def _link_track(self, context, clip, tracking_object, track):
134         sc = context.space_data
135         constraint = None
136         ob = None
137
138         ob = bpy.data.objects.new(name=track.name, object_data=None)
139         ob.select = True
140         context.scene.objects.link(ob)
141         context.scene.objects.active = ob
142
143         for con in ob.constraints:
144             if con.type == 'FOLLOW_TRACK':
145                 constraint = con
146                 break
147
148         if constraint is None:
149             constraint = ob.constraints.new(type='FOLLOW_TRACK')
150
151         constraint.use_active_clip = False
152         constraint.clip = sc.clip
153         constraint.track = track.name
154         constraint.use_3d_position = False
155         constraint.object = tracking_object.name
156         constraint.camera = CLIP_camera_for_clip(context, clip)
157
158     def execute(self, context):
159         sc = context.space_data
160         clip = sc.clip
161         tracking_object = clip.tracking.objects.active
162
163         for track in tracking_object.tracks:
164             if CLIP_track_view_selected(sc, track):
165                 self._link_track(context, clip, tracking_object, track)
166
167         return {'FINISHED'}
168
169
170 class CLIP_OT_bundles_to_mesh(Operator):
171     """Create vertex cloud using coordinates of reconstructed tracks"""
172
173     bl_idname = "clip.bundles_to_mesh"
174     bl_label = "3D Markers to Mesh"
175     bl_options = {'UNDO', 'REGISTER'}
176
177     @classmethod
178     def poll(cls, context):
179         sc = context.space_data
180         return (sc.type == 'CLIP_EDITOR') and sc.clip
181
182     def execute(self, context):
183         from bpy_extras.io_utils import unpack_list
184
185         sc = context.space_data
186         clip = sc.clip
187         tracking_object = clip.tracking.objects.active
188
189         new_verts = []
190
191         mesh = bpy.data.meshes.new(name="Tracks")
192         for track in tracking_object.tracks:
193             if track.has_bundle:
194                 new_verts.append(track.bundle)
195
196         if new_verts:
197             mesh.vertices.add(len(new_verts))
198             mesh.vertices.foreach_set("co", unpack_list(new_verts))
199
200         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
201
202         context.scene.objects.link(ob)
203
204         return {'FINISHED'}
205
206
207 class CLIP_OT_delete_proxy(Operator):
208     """Delete movie clip proxy files from the hard drive"""
209
210     bl_idname = "clip.delete_proxy"
211     bl_label = "Delete Proxy"
212     bl_options = {'REGISTER'}
213
214     @classmethod
215     def poll(cls, context):
216         if context.space_data.type != 'CLIP_EDITOR':
217             return False
218
219         sc = context.space_data
220
221         return sc.clip
222
223     def invoke(self, context, event):
224         wm = context.window_manager
225
226         return wm.invoke_confirm(self, event)
227
228     def _rmproxy(self, abspath):
229         import shutil
230
231         if not os.path.exists(abspath):
232             return
233
234         if os.path.isdir(abspath):
235             shutil.rmtree(abspath)
236         else:
237             os.remove(abspath)
238
239     def execute(self, context):
240         sc = context.space_data
241         clip = sc.clip
242         if clip.use_proxy_custom_directory:
243             proxydir = clip.proxy.directory
244         else:
245             clipdir = os.path.dirname(clip.filepath)
246             proxydir = os.path.join(clipdir, "BL_proxy")
247
248         clipfile = os.path.basename(clip.filepath)
249         proxy = os.path.join(proxydir, clipfile)
250         absproxy = bpy.path.abspath(proxy)
251
252         # proxy_<quality>[_undostorted]
253         for x in (25, 50, 75, 100):
254             d = os.path.join(absproxy, "proxy_%d" % x)
255
256             self._rmproxy(d)
257             self._rmproxy(d + "_undistorted")
258             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
259
260         tc = ("free_run.blen_tc",
261               "interp_free_run.blen_tc",
262               "record_run.blen_tc")
263
264         for x in tc:
265             self._rmproxy(os.path.join(absproxy, x))
266
267         # remove proxy per-clip directory
268         try:
269             os.rmdir(absproxy)
270         except OSError:
271             pass
272
273         # remove [custom] proxy directory if empty
274         try:
275             absdir = bpy.path.abspath(proxydir)
276             os.rmdir(absdir)
277         except OSError:
278             pass
279
280         return {'FINISHED'}
281
282
283 class CLIP_OT_set_viewport_background(Operator):
284     """Set current movie clip as a camera background in 3D view-port """ \
285     """(works only when a 3D view-port is visible)"""
286
287     bl_idname = "clip.set_viewport_background"
288     bl_label = "Set as Background"
289     bl_options = {'REGISTER'}
290
291     @classmethod
292     def poll(cls, context):
293         if context.space_data.type != 'CLIP_EDITOR':
294             return False
295
296         sc = context.space_data
297
298         return sc.clip
299
300     def execute(self, context):
301         sc = context.space_data
302         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
303
304         return {'FINISHED'}
305
306
307 class CLIP_OT_constraint_to_fcurve(Operator):
308     """Create F-Curves for object which will copy \
309 object's movement caused by this constraint"""
310
311     bl_idname = "clip.constraint_to_fcurve"
312     bl_label = "Constraint to F-Curve"
313     bl_options = {'UNDO', 'REGISTER'}
314
315     def _bake_object(self, scene, ob):
316         con = None
317         clip = None
318         sfra = None
319         efra = None
320         frame_current = scene.frame_current
321         matrices = []
322
323         # Find constraint which would be converting
324         # TODO: several camera solvers and track followers would fail,
325         #       but can't think about real work-flow where it'll be useful
326         for x in ob.constraints:
327             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
328                 con = x
329
330         if not con:
331             self.report({'ERROR'},
332                         "Motion Tracking constraint to be converted not found")
333
334             return {'CANCELLED'}
335
336         # Get clip used for parenting
337         if con.use_active_clip:
338             clip = scene.active_clip
339         else:
340             clip = con.clip
341
342         if not clip:
343             self.report({'ERROR'},
344                         "Movie clip to use tracking data from isn't set")
345
346             return {'CANCELLED'}
347
348         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
349             mat = ob.matrix_world.copy()
350             ob.constraints.remove(con)
351             ob.matrix_world = mat
352
353             return {'FINISHED'}
354
355         # Find start and end frames
356         for track in clip.tracking.tracks:
357             if sfra is None:
358                 sfra = track.markers[0].frame
359             else:
360                 sfra = min(sfra, track.markers[0].frame)
361
362             if efra is None:
363                 efra = track.markers[-1].frame
364             else:
365                 efra = max(efra, track.markers[-1].frame)
366
367         if sfra is None or efra is None:
368             return
369
370         # Store object matrices
371         for x in range(sfra, efra + 1):
372             scene.frame_set(x)
373             matrices.append(ob.matrix_world.copy())
374
375         ob.animation_data_create()
376
377         # Apply matrices on object and insert key-frames
378         i = 0
379         for x in range(sfra, efra + 1):
380             scene.frame_set(x)
381             ob.matrix_world = matrices[i]
382
383             ob.keyframe_insert("location")
384
385             if ob.rotation_mode == 'QUATERNION':
386                 ob.keyframe_insert("rotation_quaternion")
387             else:
388                 ob.keyframe_insert("rotation_euler")
389
390             i += 1
391
392         ob.constraints.remove(con)
393
394         scene.frame_set(frame_current)
395
396     def execute(self, context):
397         scene = context.scene
398         # XXX, should probably use context.selected_editable_objects
399         # since selected objects can be from a lib or in hidden layer!
400         for ob in scene.objects:
401             if ob.select:
402                 self._bake_object(scene, ob)
403
404         return {'FINISHED'}
405
406
407 class CLIP_OT_setup_tracking_scene(Operator):
408     """Prepare scene for compositing 3D objects into this footage"""
409
410     bl_idname = "clip.setup_tracking_scene"
411     bl_label = "Setup Tracking Scene"
412     bl_options = {'UNDO', 'REGISTER'}
413
414     @classmethod
415     def poll(cls, context):
416         sc = context.space_data
417
418         if sc.type != 'CLIP_EDITOR':
419             return False
420
421         clip = sc.clip
422
423         return clip and clip.tracking.reconstruction.is_valid
424
425     @staticmethod
426     def _setupScene(context):
427         scene = context.scene
428         scene.active_clip = context.space_data.clip
429
430     @staticmethod
431     def _setupWorld(context):
432         scene = context.scene
433         world = scene.world
434
435         if not world:
436             world = bpy.data.worlds.new(name="World")
437             scene.world = world
438
439         world.light_settings.use_ambient_occlusion = True
440         world.light_settings.ao_blend_type = 'MULTIPLY'
441
442         world.light_settings.use_environment_light = True
443         world.light_settings.environment_energy = 0.1
444
445         world.light_settings.distance = 1.0
446         world.light_settings.sample_method = 'ADAPTIVE_QMC'
447         world.light_settings.samples = 7
448         world.light_settings.threshold = 0.005
449
450     @staticmethod
451     def _findOrCreateCamera(context):
452         scene = context.scene
453
454         if scene.camera:
455             return scene.camera
456
457         cam = bpy.data.cameras.new(name="Camera")
458         camob = bpy.data.objects.new(name="Camera", object_data=cam)
459         scene.objects.link(camob)
460
461         scene.camera = camob
462
463         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
464                               Matrix.Rotation(0.815, 4, 'Z') *
465                               Matrix.Rotation(0.011, 4, 'Y') *
466                               Matrix.Rotation(1.109, 4, 'X'))
467
468         return camob
469
470     @staticmethod
471     def _setupCamera(context):
472         sc = context.space_data
473         clip = sc.clip
474         tracking = clip.tracking
475
476         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
477         cam = camob.data
478
479         # Remove all constraints to be sure motion is fine
480         camob.constraints.clear()
481
482         # Append camera solver constraint
483         con = camob.constraints.new(type='CAMERA_SOLVER')
484         con.use_active_clip = True
485         con.influence = 1.0
486
487         cam.sensor_width = tracking.camera.sensor_width
488         cam.lens = tracking.camera.focal_length
489
490     @staticmethod
491     def _setupViewport(context):
492         sc = context.space_data
493         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
494
495     @staticmethod
496     def _setupRenderLayers(context):
497         scene = context.scene
498         rlayers = scene.render.layers
499
500         if not scene.render.layers.get("Foreground"):
501             if len(rlayers) == 1:
502                 fg = rlayers[0]
503                 fg.name = 'Foreground'
504             else:
505                 fg = scene.render.layers.new("Foreground")
506
507             fg.use_sky = False
508             fg.layers = [True] + [False] * 19
509             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
510             fg.use_pass_vector = True
511
512         if not scene.render.layers.get("Background"):
513             bg = scene.render.layers.new("Background")
514             bg.use_pass_shadow = True
515             bg.use_pass_ambient_occlusion = True
516             bg.layers = [False] * 10 + [True] + [False] * 9
517
518     @staticmethod
519     def _findNode(tree, type):
520         for node in tree.nodes:
521             if node.type == type:
522                 return node
523
524         return None
525
526     @staticmethod
527     def _findOrCreateNode(tree, type):
528         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
529
530         if not node:
531             node = tree.nodes.new(type=type)
532
533         return node
534
535     @staticmethod
536     def _needSetupNodes(context):
537         scene = context.scene
538         tree = scene.node_tree
539
540         if not tree:
541             # No compositor node tree found, time to create it!
542             return True
543
544         for node in tree.nodes:
545             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
546                 return False
547
548         return True
549
550     @staticmethod
551     def _offsetNodes(tree):
552         for a in tree.nodes:
553             for b in tree.nodes:
554                 if a != b and a.location == b.location:
555                     b.location += Vector((40.0, 20.0))
556
557     def _setupNodes(self, context):
558         if not self._needSetupNodes(context):
559             # compositor nodes were already setup or even changes already
560             # do nothing to prevent nodes damage
561             return
562
563         # Enable backdrop for all compositor spaces
564         def setup_space(space):
565             space.show_backdrop = True
566
567         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
568                          setup_space)
569
570         sc = context.space_data
571         scene = context.scene
572         scene.use_nodes = True
573         tree = scene.node_tree
574         clip = sc.clip
575
576         need_stabilization = False
577
578         # create nodes
579         rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers')
580         rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers')
581         composite = self._findOrCreateNode(tree, 'CompositorNodeComposite')
582
583         movieclip = tree.nodes.new(type='CompositorNodeMovieClip')
584         distortion = tree.nodes.new(type='CompositorNodeMovieDistortion')
585
586         if need_stabilization:
587             stabilize = tree.nodes.new(type='CompositorNodeStabilize2D')
588
589         scale = tree.nodes.new(type='CompositorNodeScale')
590         invert = tree.nodes.new(type='CompositorNodeInvert')
591         add_ao = tree.nodes.new(type='CompositorNodeMixRGB')
592         add_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
593         mul_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
594         mul_image = tree.nodes.new(type='CompositorNodeMixRGB')
595         vector_blur = tree.nodes.new(type='CompositorNodeVecBlur')
596         alphaover = tree.nodes.new(type='CompositorNodeAlphaOver')
597         viewer = tree.nodes.new(type='CompositorNodeViewer')
598
599         # setup nodes
600         movieclip.clip = clip
601
602         distortion.clip = clip
603         distortion.distortion_type = 'UNDISTORT'
604
605         if need_stabilization:
606             stabilize.clip = clip
607
608         scale.space = 'RENDER_SIZE'
609
610         rlayer_bg.scene = scene
611         rlayer_bg.layer = "Background"
612
613         rlayer_fg.scene = scene
614         rlayer_fg.layer = "Foreground"
615
616         add_ao.blend_type = 'ADD'
617         add_ao.show_preview = False
618         add_shadow.blend_type = 'ADD'
619         add_shadow.show_preview = False
620
621         mul_shadow.blend_type = 'MULTIPLY'
622         mul_shadow.inputs["Fac"].default_value = 0.8
623         mul_shadow.show_preview = False
624
625         mul_image.blend_type = 'MULTIPLY'
626         mul_image.inputs["Fac"].default_value = 0.8
627         mul_image.show_preview = False
628
629         vector_blur.factor = 0.75
630
631         # create links
632         tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
633
634         if need_stabilization:
635             tree.links.new(distortion.outputs["Image"],
636                            stabilize.inputs["Image"])
637             tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
638         else:
639             tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
640
641         tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
642
643         tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
644         tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
645
646         tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
647         tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
648
649         tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
650         tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
651
652         tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
653         tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
654
655         tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
656         tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"])
657         tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"])
658
659         tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
660         tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
661
662         tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
663         tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
664
665         # place nodes
666         movieclip.location = Vector((-300.0, 350.0))
667
668         distortion.location = movieclip.location
669         distortion.location += Vector((200.0, 0.0))
670
671         if need_stabilization:
672             stabilize.location = distortion.location
673             stabilize.location += Vector((200.0, 0.0))
674
675             scale.location = stabilize.location
676             scale.location += Vector((200.0, 0.0))
677         else:
678             scale.location = distortion.location
679             scale.location += Vector((200.0, 0.0))
680
681         rlayer_bg.location = movieclip.location
682         rlayer_bg.location -= Vector((0.0, 350.0))
683
684         invert.location = rlayer_bg.location
685         invert.location += Vector((250.0, 50.0))
686
687         add_ao.location = invert.location
688         add_ao.location[0] += 200
689         add_ao.location[1] = rlayer_bg.location[1]
690
691         add_shadow.location = add_ao.location
692         add_shadow.location -= Vector((0.0, 250.0))
693
694         mul_shadow.location = add_ao.location
695         mul_shadow.location += Vector((200.0, -50.0))
696
697         mul_image.location = mul_shadow.location
698         mul_image.location += Vector((300.0, 200.0))
699
700         rlayer_fg.location = rlayer_bg.location
701         rlayer_fg.location -= Vector((0.0, 500.0))
702
703         vector_blur.location[0] = mul_image.location[0]
704         vector_blur.location[1] = rlayer_fg.location[1]
705
706         alphaover.location[0] = vector_blur.location[0] + 350
707         alphaover.location[1] = \
708             (vector_blur.location[1] + mul_image.location[1]) / 2
709
710         composite.location = alphaover.location
711         composite.location += Vector((200.0, -100.0))
712
713         viewer.location = composite.location
714         composite.location += Vector((0.0, 200.0))
715
716         # ensure no nodes were creates on position of existing node
717         self._offsetNodes(tree)
718
719         scene.render.alpha_mode = 'TRANSPARENT'
720         if scene.cycles:
721             scene.cycles.film_transparent = True
722
723     @staticmethod
724     def _createMesh(scene, name, vertices, faces):
725         from bpy_extras.io_utils import unpack_list
726
727         mesh = bpy.data.meshes.new(name=name)
728
729         mesh.vertices.add(len(vertices))
730         mesh.vertices.foreach_set("co", unpack_list(vertices))
731
732         nbr_loops = len(faces)
733         nbr_polys = nbr_loops // 4
734         mesh.loops.add(nbr_loops)
735         mesh.polygons.add(nbr_polys)
736
737         mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
738         mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
739         mesh.loops.foreach_set("vertex_index", faces)
740
741         mesh.update()
742
743         ob = bpy.data.objects.new(name=name, object_data=mesh)
744
745         scene.objects.link(ob)
746
747         return ob
748
749     @staticmethod
750     def _getPlaneVertices(half_size, z):
751
752         return [(-half_size, -half_size, z),
753                 (-half_size, half_size, z),
754                 (half_size, half_size, z),
755                 (half_size, -half_size, z)]
756
757     def _createGround(self, scene):
758         vertices = self._getPlaneVertices(4.0, 0.0)
759         faces = [0, 1, 2, 3]
760
761         ob = self._createMesh(scene, "Ground", vertices, faces)
762         ob["is_ground"] = True
763
764         return ob
765
766     @staticmethod
767     def _findGround(context):
768         scene = context.scene
769
770         for ob in scene.objects:
771             if ob.type == 'MESH' and "is_ground" in ob:
772                 return ob
773
774         return None
775
776     @staticmethod
777     def _mergeLayers(layers_a, layers_b):
778
779         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
780
781     @staticmethod
782     def _createLamp(scene):
783         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
784         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
785         scene.objects.link(lampob)
786
787         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
788
789         lamp.distance = 30
790         lamp.shadow_method = 'RAY_SHADOW'
791
792         return lampob
793
794     def _createSampleObject(self, scene):
795         vertices = self._getPlaneVertices(1.0, -1.0) + \
796             self._getPlaneVertices(1.0, 1.0)
797         faces = (0, 1, 2, 3,
798                  4, 7, 6, 5,
799                  0, 4, 5, 1,
800                  1, 5, 6, 2,
801                  2, 6, 7, 3,
802                  3, 7, 4, 0)
803
804         return self._createMesh(scene, "Cube", vertices, faces)
805
806     def _setupObjects(self, context):
807         scene = context.scene
808
809         fg = scene.render.layers.get("Foreground")
810         bg = scene.render.layers.get("Background")
811
812         all_layers = self._mergeLayers(fg.layers, bg.layers)
813
814         # ensure all lamps are active on foreground and background
815         has_lamp = False
816         has_mesh = False
817         for ob in scene.objects:
818             if ob.type == 'LAMP':
819                 ob.layers = all_layers
820                 has_lamp = True
821             elif ob.type == 'MESH' and "is_ground" not in ob:
822                 has_mesh = True
823
824         # create sample lamp if there's no lamps in the scene
825         if not has_lamp:
826             lamp = self._createLamp(scene)
827             lamp.layers = all_layers
828
829         # create sample object if there's no meshes in the scene
830         if not has_mesh:
831             ob = self._createSampleObject(scene)
832             ob.layers = fg.layers
833
834         # create ground object if needed
835         ground = self._findGround(context)
836         if not ground:
837             ground = self._createGround(scene)
838             ground.layers = bg.layers
839         else:
840             # make sure ground is available on Background layer
841             ground.layers = self._mergeLayers(ground.layers, bg.layers)
842
843         # layers with background and foreground should be rendered
844         scene.layers = self._mergeLayers(scene.layers, all_layers)
845
846     def execute(self, context):
847         self._setupScene(context)
848         self._setupWorld(context)
849         self._setupCamera(context)
850         self._setupViewport(context)
851         self._setupRenderLayers(context)
852         self._setupNodes(context)
853         self._setupObjects(context)
854
855         return {'FINISHED'}
856
857
858 class CLIP_OT_track_settings_as_default(Operator):
859     """Copy tracking settings from active track to default settings"""
860
861     bl_idname = "clip.track_settings_as_default"
862     bl_label = "Track Settings As Default"
863     bl_options = {'UNDO', 'REGISTER'}
864
865     @classmethod
866     def poll(cls, context):
867         sc = context.space_data
868
869         if sc.type != 'CLIP_EDITOR':
870             return False
871
872         clip = sc.clip
873
874         return clip and clip.tracking.tracks.active
875
876     def execute(self, context):
877         sc = context.space_data
878         clip = sc.clip
879
880         track = clip.tracking.tracks.active
881         framenr = context.scene.frame_current - clip.frame_start + 1
882
883         CLIP_default_settings_from_track(clip, track, framenr)
884
885         return {'FINISHED'}