Fix for [#32016] Tracking Settings Presets not working
[blender.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23
24 from mathutils import Vector, Matrix
25
26
27 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
28     screens = bpy.data.screens if all_screens else [context.screen]
29
30     for screen in screens:
31         for area in screen.areas:
32             if area.type == tarea:
33                 for space in area.spaces:
34                     if space.type == tspace:
35                         callback(space, *args)
36
37
38 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
39     def set_background(space_v3d, clip, user):
40         bgpic = None
41
42         for x in space_v3d.background_images:
43             if x.source == 'MOVIE_CLIP':
44                 bgpic = x
45                 break
46
47         if not bgpic:
48             bgpic = space_v3d.background_images.new()
49
50         bgpic.source = 'MOVIE_CLIP'
51         bgpic.clip = clip
52         bgpic.clip_user.proxy_render_size = user.proxy_render_size
53         bgpic.clip_user.use_render_undistorted = True
54         bgpic.use_camera_clip = False
55         bgpic.view_axis = 'CAMERA'
56
57         space_v3d.show_background_images = True
58
59     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
60                       set_background, clip, clip_user)
61
62
63 def CLIP_camera_for_clip(context, clip):
64     scene = context.scene
65
66     camera = scene.camera
67
68     for ob in scene.objects:
69         if ob.type == 'CAMERA':
70             for con in ob.constraints:
71                 if con.type == 'CAMERA_SOLVER':
72                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
73
74                     if cur_clip == clip:
75                         return ob
76
77     return camera
78
79
80 def CLIP_track_view_selected(sc, track):
81     if track.select_anchor:
82         return True
83
84     if sc.show_marker_pattern and track.select_pattern:
85         return True
86
87     if sc.show_marker_search and track.select_search:
88         return True
89
90     return False
91
92
93 def CLIP_default_settings_from_track(clip, track):
94     settings = clip.tracking.settings
95
96     width = clip.size[0]
97     height = clip.size[1]
98
99     pattern = track.pattern_max - track.pattern_min
100     search = track.search_max - track.search_min
101
102     pattern[0] = pattern[0] * width
103     pattern[1] = pattern[1] * height
104
105     search[0] = search[0] * width
106     search[1] = search[1] * height
107
108     settings.default_correlation_min = track.correlation_min
109     settings.default_pattern_size = max(pattern[0], pattern[1])
110     settings.default_search_size = max(search[0], search[1])
111     settings.default_frames_limit = track.frames_limit
112     settings.default_pattern_match = track.pattern_match
113     settings.default_margin = track.margin
114     settings.use_default_red_channel = track.use_red_channel
115     settings.use_default_green_channel = track.use_green_channel
116     settings.use_default_blue_channel = track.use_blue_channel
117
118
119 class CLIP_OT_track_to_empty(Operator):
120     """Create an Empty object which will be copying movement of active track"""
121
122     bl_idname = "clip.track_to_empty"
123     bl_label = "Link Empty to Track"
124     bl_options = {'UNDO', 'REGISTER'}
125
126     def _link_track(self, context, clip, tracking_object, track):
127         sc = context.space_data
128         constraint = None
129         ob = None
130
131         ob = bpy.data.objects.new(name=track.name, object_data=None)
132         ob.select = True
133         context.scene.objects.link(ob)
134         context.scene.objects.active = ob
135
136         for con in ob.constraints:
137             if con.type == 'FOLLOW_TRACK':
138                 constraint = con
139                 break
140
141         if constraint is None:
142             constraint = ob.constraints.new(type='FOLLOW_TRACK')
143
144         constraint.use_active_clip = False
145         constraint.clip = sc.clip
146         constraint.track = track.name
147         constraint.use_3d_position = False
148         constraint.object = tracking_object.name
149         constraint.camera = CLIP_camera_for_clip(context, clip)
150
151     def execute(self, context):
152         sc = context.space_data
153         clip = sc.clip
154         tracking_object = clip.tracking.objects.active
155
156         for track in tracking_object.tracks:
157             if CLIP_track_view_selected(sc, track):
158                 self._link_track(context, clip, tracking_object, track)
159
160         return {'FINISHED'}
161
162
163 class CLIP_OT_bundles_to_mesh(Operator):
164     """Create vertex cloud using coordinates of reconstructed tracks"""
165
166     bl_idname = "clip.bundles_to_mesh"
167     bl_label = "3D Markers to Mesh"
168     bl_options = {'UNDO', 'REGISTER'}
169
170     @classmethod
171     def poll(cls, context):
172         sc = context.space_data
173         return (sc.type == 'CLIP_EDITOR') and sc.clip
174
175     def execute(self, context):
176         from bpy_extras.io_utils import unpack_list
177
178         sc = context.space_data
179         clip = sc.clip
180         tracking_object = clip.tracking.objects.active
181
182         new_verts = []
183
184         mesh = bpy.data.meshes.new(name="Tracks")
185         for track in tracking_object.tracks:
186             if track.has_bundle:
187                 new_verts.append(track.bundle)
188
189         if new_verts:
190             mesh.vertices.add(len(new_verts))
191             mesh.vertices.foreach_set("co", unpack_list(new_verts))
192
193         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
194
195         context.scene.objects.link(ob)
196
197         return {'FINISHED'}
198
199
200 class CLIP_OT_delete_proxy(Operator):
201     """Delete movie clip proxy files from the hard drive"""
202
203     bl_idname = "clip.delete_proxy"
204     bl_label = "Delete Proxy"
205     bl_options = {'REGISTER'}
206
207     @classmethod
208     def poll(cls, context):
209         if context.space_data.type != 'CLIP_EDITOR':
210             return False
211
212         sc = context.space_data
213
214         return sc.clip
215
216     def invoke(self, context, event):
217         wm = context.window_manager
218
219         return wm.invoke_confirm(self, event)
220
221     def _rmproxy(self, abspath):
222         import shutil
223
224         if not os.path.exists(abspath):
225             return
226
227         if os.path.isdir(abspath):
228             shutil.rmtree(abspath)
229         else:
230             os.remove(abspath)
231
232     def execute(self, context):
233         sc = context.space_data
234         clip = sc.clip
235         if clip.use_proxy_custom_directory:
236             proxydir = clip.proxy.directory
237         else:
238             clipdir = os.path.dirname(clip.filepath)
239             proxydir = os.path.join(clipdir, "BL_proxy")
240
241         clipfile = os.path.basename(clip.filepath)
242         proxy = os.path.join(proxydir, clipfile)
243         absproxy = bpy.path.abspath(proxy)
244
245         # proxy_<quality>[_undostorted]
246         for x in (25, 50, 75, 100):
247             d = os.path.join(absproxy, "proxy_%d" % x)
248
249             self._rmproxy(d)
250             self._rmproxy(d + "_undistorted")
251             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
252
253         tc = ("free_run.blen_tc",
254               "interp_free_run.blen_tc",
255               "record_run.blen_tc")
256
257         for x in tc:
258             self._rmproxy(os.path.join(absproxy, x))
259
260         # remove proxy per-clip directory
261         try:
262             os.rmdir(absproxy)
263         except OSError:
264             pass
265
266         # remove [custom] proxy directory if empty
267         try:
268             absdir = bpy.path.abspath(proxydir)
269             os.rmdir(absdir)
270         except OSError:
271             pass
272
273         return {'FINISHED'}
274
275
276 class CLIP_OT_set_viewport_background(Operator):
277     """Set current movie clip as a camera background in 3D view-port """ \
278     """(works only when a 3D view-port is visible)"""
279
280     bl_idname = "clip.set_viewport_background"
281     bl_label = "Set as Background"
282     bl_options = {'REGISTER'}
283
284     @classmethod
285     def poll(cls, context):
286         if context.space_data.type != 'CLIP_EDITOR':
287             return False
288
289         sc = context.space_data
290
291         return sc.clip
292
293     def execute(self, context):
294         sc = context.space_data
295         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
296
297         return {'FINISHED'}
298
299
300 class CLIP_OT_constraint_to_fcurve(Operator):
301     """Create F-Curves for object which will copy \
302 object's movement caused by this constraint"""
303
304     bl_idname = "clip.constraint_to_fcurve"
305     bl_label = "Constraint to F-Curve"
306     bl_options = {'UNDO', 'REGISTER'}
307
308     def _bake_object(self, scene, ob):
309         con = None
310         clip = None
311         sfra = None
312         efra = None
313         frame_current = scene.frame_current
314         matrices = []
315
316         # Find constraint which would be converting
317         # TODO: several camera solvers and track followers would fail,
318         #       but can't think about real work-flow where it'll be useful
319         for x in ob.constraints:
320             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
321                 con = x
322
323         if not con:
324             self.report({'ERROR'},
325                 "Motion Tracking constraint to be converted not found")
326
327             return {'CANCELLED'}
328
329         # Get clip used for parenting
330         if con.use_active_clip:
331             clip = scene.active_clip
332         else:
333             clip = con.clip
334
335         if not clip:
336             self.report({'ERROR'},
337                 "Movie clip to use tracking data from isn't set")
338
339             return {'CANCELLED'}
340
341         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
342             mat = ob.matrix_world.copy()
343             ob.constraints.remove(con)
344             ob.matrix_world = mat
345
346             return {'FINISHED'}
347
348         # Find start and end frames
349         for track in clip.tracking.tracks:
350             if sfra is None:
351                 sfra = track.markers[0].frame
352             else:
353                 sfra = min(sfra, track.markers[0].frame)
354
355             if efra is None:
356                 efra = track.markers[-1].frame
357             else:
358                 efra = max(efra, track.markers[-1].frame)
359
360         if sfra is None or efra is None:
361             return
362
363         # Store object matrices
364         for x in range(sfra, efra + 1):
365             scene.frame_set(x)
366             matrices.append(ob.matrix_world.copy())
367
368         ob.animation_data_create()
369
370         # Apply matrices on object and insert key-frames
371         i = 0
372         for x in range(sfra, efra + 1):
373             scene.frame_set(x)
374             ob.matrix_world = matrices[i]
375
376             ob.keyframe_insert("location")
377
378             if ob.rotation_mode == 'QUATERNION':
379                 ob.keyframe_insert("rotation_quaternion")
380             else:
381                 ob.keyframe_insert("rotation_euler")
382
383             i += 1
384
385         ob.constraints.remove(con)
386
387         scene.frame_set(frame_current)
388
389     def execute(self, context):
390         scene = context.scene
391         # XXX, should probably use context.selected_editable_objects
392         # since selected objects can be from a lib or in hidden layer!
393         for ob in scene.objects:
394             if ob.select:
395                 self._bake_object(scene, ob)
396
397         return {'FINISHED'}
398
399
400 class CLIP_OT_setup_tracking_scene(Operator):
401     """Prepare scene for compositing 3D objects into this footage"""
402
403     bl_idname = "clip.setup_tracking_scene"
404     bl_label = "Setup Tracking Scene"
405     bl_options = {'UNDO', 'REGISTER'}
406
407     @classmethod
408     def poll(cls, context):
409         sc = context.space_data
410
411         if sc.type != 'CLIP_EDITOR':
412             return False
413
414         clip = sc.clip
415
416         return clip and clip.tracking.reconstruction.is_valid
417
418     @staticmethod
419     def _setupScene(context):
420         scene = context.scene
421         scene.active_clip = context.space_data.clip
422
423     @staticmethod
424     def _setupWorld(context):
425         scene = context.scene
426         world = scene.world
427
428         if not world:
429             world = bpy.data.worlds.new(name="World")
430             scene.world = world
431
432         world.light_settings.use_ambient_occlusion = True
433         world.light_settings.ao_blend_type = 'MULTIPLY'
434
435         world.light_settings.use_environment_light = True
436         world.light_settings.environment_energy = 0.1
437
438         world.light_settings.distance = 1.0
439         world.light_settings.sample_method = 'ADAPTIVE_QMC'
440         world.light_settings.samples = 7
441         world.light_settings.threshold = 0.005
442
443     @staticmethod
444     def _findOrCreateCamera(context):
445         scene = context.scene
446
447         if scene.camera:
448             return scene.camera
449
450         cam = bpy.data.cameras.new(name="Camera")
451         camob = bpy.data.objects.new(name="Camera", object_data=cam)
452         scene.objects.link(camob)
453
454         scene.camera = camob
455
456         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
457             Matrix.Rotation(0.815, 4, 'Z') *
458             Matrix.Rotation(0.011, 4, 'Y') *
459             Matrix.Rotation(1.109, 4, 'X'))
460
461         return camob
462
463     @staticmethod
464     def _setupCamera(context):
465         sc = context.space_data
466         clip = sc.clip
467         tracking = clip.tracking
468
469         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
470         cam = camob.data
471
472         # Remove all constraints to be sure motion is fine
473         camob.constraints.clear()
474
475         # Append camera solver constraint
476         con = camob.constraints.new(type='CAMERA_SOLVER')
477         con.use_active_clip = True
478         con.influence = 1.0
479
480         cam.sensor_width = tracking.camera.sensor_width
481         cam.lens = tracking.camera.focal_length
482
483     @staticmethod
484     def _setupViewport(context):
485         sc = context.space_data
486         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
487
488     @staticmethod
489     def _setupRenderLayers(context):
490         scene = context.scene
491         rlayers = scene.render.layers
492
493         if not scene.render.layers.get("Foreground"):
494             if len(rlayers) == 1:
495                 fg = rlayers[0]
496                 fg.name = 'Foreground'
497             else:
498                 fg = scene.render.layers.new("Foreground")
499
500             fg.use_sky = False
501             fg.layers = [True] + [False] * 19
502             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
503             fg.use_pass_vector = True
504
505         if not scene.render.layers.get("Background"):
506             bg = scene.render.layers.new("Background")
507             bg.use_pass_shadow = True
508             bg.use_pass_ambient_occlusion = True
509             bg.layers = [False] * 10 + [True] + [False] * 9
510
511     @staticmethod
512     def _findNode(tree, type):
513         for node in tree.nodes:
514             if node.type == type:
515                 return node
516
517         return None
518
519     @staticmethod
520     def _findOrCreateNode(tree, type):
521         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
522
523         if not node:
524             node = tree.nodes.new(type=type)
525
526         return node
527
528     @staticmethod
529     def _needSetupNodes(context):
530         scene = context.scene
531         tree = scene.node_tree
532
533         if not tree:
534             # No compositor node tree found, time to create it!
535             return True
536
537         for node in tree.nodes:
538             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
539                 return False
540
541         return True
542
543     @staticmethod
544     def _offsetNodes(tree):
545         for a in tree.nodes:
546             for b in tree.nodes:
547                 if a != b and a.location == b.location:
548                     b.location += Vector((40.0, 20.0))
549
550     def _setupNodes(self, context):
551         if not self._needSetupNodes(context):
552             # compositor nodes were already setup or even changes already
553             # do nothing to prevent nodes damage
554             return
555
556         # Enable backdrop for all compositor spaces
557         def setup_space(space):
558             space.show_backdrop = True
559
560         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
561                          setup_space)
562
563         sc = context.space_data
564         scene = context.scene
565         scene.use_nodes = True
566         tree = scene.node_tree
567         clip = sc.clip
568
569         need_stabilization = False
570
571         # create nodes
572         rlayer_fg = self._findOrCreateNode(tree, 'R_LAYERS')
573         rlayer_bg = tree.nodes.new(type='R_LAYERS')
574         composite = self._findOrCreateNode(tree, 'COMPOSITE')
575
576         movieclip = tree.nodes.new(type='MOVIECLIP')
577         distortion = tree.nodes.new(type='MOVIEDISTORTION')
578
579         if need_stabilization:
580             stabilize = tree.nodes.new(type='STABILIZE2D')
581
582         scale = tree.nodes.new(type='SCALE')
583         invert = tree.nodes.new(type='INVERT')
584         add_ao = tree.nodes.new(type='MIX_RGB')
585         add_shadow = tree.nodes.new(type='MIX_RGB')
586         mul_shadow = tree.nodes.new(type='MIX_RGB')
587         mul_image = tree.nodes.new(type='MIX_RGB')
588         vector_blur = tree.nodes.new(type='VECBLUR')
589         alphaover = tree.nodes.new(type='ALPHAOVER')
590         viewer = tree.nodes.new(type='VIEWER')
591
592         # setup nodes
593         movieclip.clip = clip
594
595         distortion.clip = clip
596         distortion.distortion_type = 'UNDISTORT'
597
598         if need_stabilization:
599             stabilize.clip = clip
600
601         scale.space = 'RENDER_SIZE'
602
603         rlayer_bg.scene = scene
604         rlayer_bg.layer = "Background"
605
606         rlayer_fg.scene = scene
607         rlayer_fg.layer = "Foreground"
608
609         add_ao.blend_type = 'ADD'
610         add_shadow.blend_type = 'ADD'
611
612         mul_shadow.blend_type = 'MULTIPLY'
613         mul_shadow.inputs["Fac"].default_value = 0.8
614
615         mul_image.blend_type = 'MULTIPLY'
616         mul_image.inputs["Fac"].default_value = 0.8
617
618         vector_blur.factor = 0.75
619
620         # create links
621         tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
622
623         if need_stabilization:
624             tree.links.new(distortion.outputs["Image"],
625                 stabilize.inputs["Image"])
626             tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
627         else:
628             tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
629
630         tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
631
632         tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
633         tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
634
635         tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
636         tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
637
638         tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
639         tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
640
641         tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
642         tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
643
644         tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
645         tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"])
646         tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"])
647
648         tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
649         tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
650
651         tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
652         tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
653
654         # place nodes
655         movieclip.location = Vector((-300.0, 350.0))
656
657         distortion.location = movieclip.location
658         distortion.location += Vector((200.0, 0.0))
659
660         if need_stabilization:
661             stabilize.location = distortion.location
662             stabilize.location += Vector((200.0, 0.0))
663
664             scale.location = stabilize.location
665             scale.location += Vector((200.0, 0.0))
666         else:
667             scale.location = distortion.location
668             scale.location += Vector((200.0, 0.0))
669
670         rlayer_bg.location = movieclip.location
671         rlayer_bg.location -= Vector((0.0, 350.0))
672
673         invert.location = rlayer_bg.location
674         invert.location += Vector((250.0, 50.0))
675
676         add_ao.location = invert.location
677         add_ao.location[0] += 200
678         add_ao.location[1] = rlayer_bg.location[1]
679
680         add_shadow.location = add_ao.location
681         add_shadow.location -= Vector((0.0, 250.0))
682
683         mul_shadow.location = add_ao.location
684         mul_shadow.location += Vector((200.0, -50.0))
685
686         mul_image.location = mul_shadow.location
687         mul_image.location += Vector((300.0, 200.0))
688
689         rlayer_fg.location = rlayer_bg.location
690         rlayer_fg.location -= Vector((0.0, 500.0))
691
692         vector_blur.location[0] = mul_image.location[0]
693         vector_blur.location[1] = rlayer_fg.location[1]
694
695         alphaover.location[0] = vector_blur.location[0] + 350
696         alphaover.location[1] = \
697             (vector_blur.location[1] + mul_image.location[1]) / 2
698
699         composite.location = alphaover.location
700         composite.location += Vector((200.0, -100.0))
701
702         viewer.location = composite.location
703         composite.location += Vector((0.0, 200.0))
704
705         # ensure no nodes were creates on position of existing node
706         self._offsetNodes(tree)
707
708     @staticmethod
709     def _createMesh(scene, name, vertices, faces):
710         from bpy_extras.io_utils import unpack_list
711
712         mesh = bpy.data.meshes.new(name=name)
713
714         mesh.vertices.add(len(vertices))
715         mesh.vertices.foreach_set("co", unpack_list(vertices))
716
717         nbr_loops = len(faces)
718         nbr_polys = nbr_loops // 4
719         mesh.loops.add(nbr_loops)
720         mesh.polygons.add(nbr_polys)
721
722         mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
723         mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
724         mesh.loops.foreach_set("vertex_index", faces)
725
726         mesh.update()
727
728         ob = bpy.data.objects.new(name=name, object_data=mesh)
729
730         scene.objects.link(ob)
731
732         return ob
733
734     @staticmethod
735     def _getPlaneVertices(half_size, z):
736
737         return [(-half_size, -half_size, z),
738                 (-half_size, half_size, z),
739                 (half_size, half_size, z),
740                 (half_size, -half_size, z)]
741
742     def _createGround(self, scene):
743         vertices = self._getPlaneVertices(4.0, 0.0)
744         faces = [0, 1, 2, 3]
745
746         ob = self._createMesh(scene, "Ground", vertices, faces)
747         ob["is_ground"] = True
748
749         return ob
750
751     @staticmethod
752     def _findGround(context):
753         scene = context.scene
754
755         for ob in scene.objects:
756             if ob.type == 'MESH' and "is_ground" in ob:
757                 return ob
758
759         return None
760
761     @staticmethod
762     def _mergeLayers(layers_a, layers_b):
763
764         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
765
766     @staticmethod
767     def _createLamp(scene):
768         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
769         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
770         scene.objects.link(lampob)
771
772         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
773
774         lamp.distance = 30
775         lamp.shadow_method = 'RAY_SHADOW'
776
777         return lampob
778
779     def _createSampleObject(self, scene):
780         vertices = self._getPlaneVertices(1.0, -1.0) + \
781             self._getPlaneVertices(1.0, 1.0)
782         faces = (0, 1, 2, 3,
783                  4, 7, 6, 5,
784                  0, 4, 5, 1,
785                  1, 5, 6, 2,
786                  2, 6, 7, 3,
787                  3, 7, 4, 0)
788
789         return self._createMesh(scene, "Cube", vertices, faces)
790
791     def _setupObjects(self, context):
792         scene = context.scene
793
794         fg = scene.render.layers.get("Foreground")
795         bg = scene.render.layers.get("Background")
796
797         all_layers = self._mergeLayers(fg.layers, bg.layers)
798
799         # ensure all lamps are active on foreground and background
800         has_lamp = False
801         has_mesh = False
802         for ob in scene.objects:
803             if ob.type == 'LAMP':
804                 ob.layers = all_layers
805                 has_lamp = True
806             elif ob.type == 'MESH' and "is_ground" not in ob:
807                 has_mesh = True
808
809         # create sample lamp if there's no lamps in the scene
810         if not has_lamp:
811             lamp = self._createLamp(scene)
812             lamp.layers = all_layers
813
814         # create sample object if there's no meshes in the scene
815         if not has_mesh:
816             ob = self._createSampleObject(scene)
817             ob.layers = fg.layers
818
819         # create ground object if needed
820         ground = self._findGround(context)
821         if not ground:
822             ground = self._createGround(scene)
823             ground.layers = bg.layers
824         else:
825             # make sure ground is available on Background layer
826             ground.layers = self._mergeLayers(ground.layers, bg.layers)
827
828         # layers with background and foreground should be rendered
829         scene.layers = self._mergeLayers(scene.layers, all_layers)
830
831     def execute(self, context):
832         self._setupScene(context)
833         self._setupWorld(context)
834         self._setupCamera(context)
835         self._setupViewport(context)
836         self._setupRenderLayers(context)
837         self._setupNodes(context)
838         self._setupObjects(context)
839
840         return {'FINISHED'}
841
842
843 class CLIP_OT_track_settings_as_default(Operator):
844     """Copy tracking settings from active track to default settings"""
845
846     bl_idname = "clip.track_settings_as_default"
847     bl_label = "Track Settings As Default"
848     bl_options = {'UNDO', 'REGISTER'}
849
850     @classmethod
851     def poll(cls, context):
852         sc = context.space_data
853
854         if sc.type != 'CLIP_EDITOR':
855             return False
856
857         clip = sc.clip
858
859         return clip and clip.tracking.tracks.active
860
861     def execute(self, context):
862         sc = context.space_data
863         clip = sc.clip
864
865         CLIP_default_settings_from_track(clip, clip.tracking.tracks.active)
866
867         return {'FINISHED'}