cleanup
[blender.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23 from bpy.props import FloatProperty
24 from mathutils import Vector, Matrix
25
26
27 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
28     screens = bpy.data.screens if all_screens else [context.screen]
29
30     for screen in screens:
31         for area in screen.areas:
32             if area.type == tarea:
33                 for space in area.spaces:
34                     if space.type == tspace:
35                         callback(space, *args)
36
37
38 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
39     def set_background(space_v3d, clip, user):
40         bgpic = None
41
42         for x in space_v3d.background_images:
43             if x.source == 'MOVIE_CLIP':
44                 bgpic = x
45                 break
46
47         if not bgpic:
48             bgpic = space_v3d.background_images.new()
49
50         bgpic.source = 'MOVIE_CLIP'
51         bgpic.clip = clip
52         bgpic.clip_user.proxy_render_size = user.proxy_render_size
53         bgpic.clip_user.use_render_undistorted = True
54         bgpic.use_camera_clip = False
55         bgpic.view_axis = 'CAMERA'
56
57         space_v3d.show_background_images = True
58
59     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
60                      set_background, clip, clip_user)
61
62
63 def CLIP_camera_for_clip(context, clip):
64     scene = context.scene
65
66     camera = scene.camera
67
68     for ob in scene.objects:
69         if ob.type == 'CAMERA':
70             for con in ob.constraints:
71                 if con.type == 'CAMERA_SOLVER':
72                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
73
74                     if cur_clip == clip:
75                         return ob
76
77     return camera
78
79
80 def CLIP_track_view_selected(sc, track):
81     if track.select_anchor:
82         return True
83
84     if sc.show_marker_pattern and track.select_pattern:
85         return True
86
87     if sc.show_marker_search and track.select_search:
88         return True
89
90     return False
91
92
93 def CLIP_default_settings_from_track(clip, track, framenr):
94     settings = clip.tracking.settings
95
96     width = clip.size[0]
97     height = clip.size[1]
98
99     marker = track.markers.find_frame(framenr, False)
100     pattern_bb = marker.pattern_bound_box
101
102     pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
103     search = marker.search_max - marker.search_min
104
105     pattern[0] = pattern[0] * width
106     pattern[1] = pattern[1] * height
107
108     search[0] = search[0] * width
109     search[1] = search[1] * height
110
111     settings.default_correlation_min = track.correlation_min
112     settings.default_pattern_size = max(pattern[0], pattern[1])
113     settings.default_search_size = max(search[0], search[1])
114     settings.default_frames_limit = track.frames_limit
115     settings.default_pattern_match = track.pattern_match
116     settings.default_margin = track.margin
117     settings.default_motion_model = track.motion_model
118     settings.use_default_brute = track.use_brute
119     settings.use_default_normalization = track.use_normalization
120     settings.use_default_mask = track.use_mask
121     settings.use_default_red_channel = track.use_red_channel
122     settings.use_default_green_channel = track.use_green_channel
123     settings.use_default_blue_channel = track.use_blue_channel
124     settings.default_weight = track.weight
125
126
127 class CLIP_OT_filter_tracks(bpy.types.Operator):
128     bl_label = "Filter Tracks"
129     bl_idname = "clip.filter_tracks"
130     bl_options = {'UNDO', 'REGISTER'}
131
132     track_threshold = FloatProperty(
133             name="Track Threshold",
134             description="Filter Threshold to select problematic track",
135             default=5.0,
136             )
137
138     @staticmethod
139     def _filter_values(context, threshold):
140
141         def get_marker_coordinates_in_pixels(clip_size, track, frame_number):
142             marker = track.markers.find_frame(frame_number)
143             return Vector((marker.co[0] * clip_size[0], marker.co[1] * clip_size[1]))
144
145         def marker_velocity(clip_size, track, frame):
146             marker_a = get_marker_coordinates_in_pixels(clip_size, track, frame)
147             marker_b = get_marker_coordinates_in_pixels(clip_size, track, frame - 1)
148             return marker_a - marker_b
149
150         scene = context.scene
151         frame_start = scene.frame_start
152         frame_end = scene.frame_end
153         clip = context.space_data.clip
154         clip_size = clip.size[:]
155
156         bpy.ops.clip.clean_tracks(frames=10, action='DELETE_TRACK')
157
158         tracks_to_clean = set()
159
160         for frame in range(frame_start, frame_end + 1):
161
162             # Find tracks with markers in both this frame and the previous one.
163             relevant_tracks = [
164                     track for track in clip.tracking.tracks
165                     if (track.markers.find_frame(frame) and
166                         track.markers.find_frame(frame - 1))]
167
168             if not relevant_tracks:
169                 continue
170
171             # Get average velocity and deselect track.
172             average_velocity = Vector((0.0, 0.0))
173             for track in relevant_tracks:
174                 track.select = False
175                 average_velocity += marker_velocity(clip_size, track, frame)
176             if len(relevant_tracks) >= 1:
177                 average_velocity = average_velocity / len(relevant_tracks)
178
179             # Then find all markers that behave differently than the average.
180             for track in relevant_tracks:
181                 track_velocity = marker_velocity(clip_size, track, frame)
182                 distance = (average_velocity - track_velocity).length
183
184                 if distance > threshold:
185                     tracks_to_clean.add(track)
186
187         for track in tracks_to_clean:
188             track.select = True
189         return len(tracks_to_clean)
190
191     @classmethod
192     def poll(cls, context):
193         space = context.space_data
194         return (space.type == 'CLIP_EDITOR') and space.clip
195
196     def execute(self, context):
197         num_tracks = self._filter_values(context, self.track_threshold)
198         self.report({'INFO'}, "Identified %d problematic tracks" % num_tracks)
199         return {'FINISHED'}
200
201
202 class CLIP_OT_set_active_clip(bpy.types.Operator):
203     bl_label = "Set Active Clip"
204     bl_idname = "clip.set_active_clip"
205
206     @classmethod
207     def poll(cls, context):
208         space = context.space_data
209         return space.type == 'CLIP_EDITOR'
210
211     def execute(self, context):
212         clip = context.space_data.clip
213         scene = context.scene
214         scene.active_clip = clip
215         scene.render.resolution_x = clip.size[0]
216         scene.render.resolution_y = clip.size[1]
217         return {'FINISHED'}
218
219
220 class CLIP_OT_track_to_empty(Operator):
221     """Create an Empty object which will be copying movement of active track"""
222
223     bl_idname = "clip.track_to_empty"
224     bl_label = "Link Empty to Track"
225     bl_options = {'UNDO', 'REGISTER'}
226
227     def _link_track(self, context, clip, tracking_object, track):
228         sc = context.space_data
229         constraint = None
230         ob = None
231
232         ob = bpy.data.objects.new(name=track.name, object_data=None)
233         ob.select = True
234         context.scene.objects.link(ob)
235         context.scene.objects.active = ob
236
237         for con in ob.constraints:
238             if con.type == 'FOLLOW_TRACK':
239                 constraint = con
240                 break
241
242         if constraint is None:
243             constraint = ob.constraints.new(type='FOLLOW_TRACK')
244
245         constraint.use_active_clip = False
246         constraint.clip = sc.clip
247         constraint.track = track.name
248         constraint.use_3d_position = False
249         constraint.object = tracking_object.name
250         constraint.camera = CLIP_camera_for_clip(context, clip)
251
252     def execute(self, context):
253         sc = context.space_data
254         clip = sc.clip
255         tracking_object = clip.tracking.objects.active
256
257         for track in tracking_object.tracks:
258             if CLIP_track_view_selected(sc, track):
259                 self._link_track(context, clip, tracking_object, track)
260
261         return {'FINISHED'}
262
263
264 class CLIP_OT_bundles_to_mesh(Operator):
265     """Create vertex cloud using coordinates of reconstructed tracks"""
266
267     bl_idname = "clip.bundles_to_mesh"
268     bl_label = "3D Markers to Mesh"
269     bl_options = {'UNDO', 'REGISTER'}
270
271     @classmethod
272     def poll(cls, context):
273         sc = context.space_data
274         return (sc.type == 'CLIP_EDITOR') and sc.clip
275
276     def execute(self, context):
277         from bpy_extras.io_utils import unpack_list
278
279         sc = context.space_data
280         clip = sc.clip
281         tracking_object = clip.tracking.objects.active
282
283         new_verts = []
284
285         scene = context.scene
286         camera = scene.camera
287         matrix = Matrix.Identity(4)
288         if camera:
289             reconstruction = tracking_object.reconstruction
290             framenr = scene.frame_current - clip.frame_start + 1
291             reconstructed_matrix = reconstruction.cameras.matrix_from_frame(framenr)
292             matrix = camera.matrix_world * reconstructed_matrix.inverted()
293
294         mesh = bpy.data.meshes.new(name="Tracks")
295         for track in tracking_object.tracks:
296             if track.has_bundle:
297                 new_verts.append(track.bundle)
298
299         if new_verts:
300             mesh.vertices.add(len(new_verts))
301             mesh.vertices.foreach_set("co", unpack_list(new_verts))
302
303         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
304
305         ob.matrix_world = matrix
306
307         context.scene.objects.link(ob)
308
309         return {'FINISHED'}
310
311
312 class CLIP_OT_delete_proxy(Operator):
313     """Delete movie clip proxy files from the hard drive"""
314
315     bl_idname = "clip.delete_proxy"
316     bl_label = "Delete Proxy"
317     bl_options = {'REGISTER'}
318
319     @classmethod
320     def poll(cls, context):
321         if context.space_data.type != 'CLIP_EDITOR':
322             return False
323
324         sc = context.space_data
325
326         return sc.clip
327
328     def invoke(self, context, event):
329         wm = context.window_manager
330
331         return wm.invoke_confirm(self, event)
332
333     def _rmproxy(self, abspath):
334         import shutil
335
336         if not os.path.exists(abspath):
337             return
338
339         if os.path.isdir(abspath):
340             shutil.rmtree(abspath)
341         else:
342             os.remove(abspath)
343
344     def execute(self, context):
345         sc = context.space_data
346         clip = sc.clip
347         if clip.use_proxy_custom_directory:
348             proxydir = clip.proxy.directory
349         else:
350             clipdir = os.path.dirname(clip.filepath)
351             proxydir = os.path.join(clipdir, "BL_proxy")
352
353         clipfile = os.path.basename(clip.filepath)
354         proxy = os.path.join(proxydir, clipfile)
355         absproxy = bpy.path.abspath(proxy)
356
357         # proxy_<quality>[_undostorted]
358         for x in (25, 50, 75, 100):
359             d = os.path.join(absproxy, "proxy_%d" % x)
360
361             self._rmproxy(d)
362             self._rmproxy(d + "_undistorted")
363             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
364
365         tc = ("free_run.blen_tc",
366               "interp_free_run.blen_tc",
367               "record_run.blen_tc")
368
369         for x in tc:
370             self._rmproxy(os.path.join(absproxy, x))
371
372         # remove proxy per-clip directory
373         try:
374             os.rmdir(absproxy)
375         except OSError:
376             pass
377
378         # remove [custom] proxy directory if empty
379         try:
380             absdir = bpy.path.abspath(proxydir)
381             os.rmdir(absdir)
382         except OSError:
383             pass
384
385         return {'FINISHED'}
386
387
388 class CLIP_OT_set_viewport_background(Operator):
389     """Set current movie clip as a camera background in 3D view-port """ \
390     """(works only when a 3D view-port is visible)"""
391
392     bl_idname = "clip.set_viewport_background"
393     bl_label = "Set as Background"
394     bl_options = {'REGISTER'}
395
396     @classmethod
397     def poll(cls, context):
398         if context.space_data.type != 'CLIP_EDITOR':
399             return False
400
401         sc = context.space_data
402
403         return sc.clip
404
405     def execute(self, context):
406         sc = context.space_data
407         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
408
409         return {'FINISHED'}
410
411
412 class CLIP_OT_constraint_to_fcurve(Operator):
413     """Create F-Curves for object which will copy \
414 object's movement caused by this constraint"""
415
416     bl_idname = "clip.constraint_to_fcurve"
417     bl_label = "Constraint to F-Curve"
418     bl_options = {'UNDO', 'REGISTER'}
419
420     def _bake_object(self, scene, ob):
421         con = None
422         clip = None
423         sfra = None
424         efra = None
425         frame_current = scene.frame_current
426         matrices = []
427
428         # Find constraint which would be converting
429         # TODO: several camera solvers and track followers would fail,
430         #       but can't think about real work-flow where it'll be useful
431         for x in ob.constraints:
432             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
433                 con = x
434
435         if not con:
436             self.report({'ERROR'},
437                         "Motion Tracking constraint to be converted not found")
438
439             return {'CANCELLED'}
440
441         # Get clip used for parenting
442         if con.use_active_clip:
443             clip = scene.active_clip
444         else:
445             clip = con.clip
446
447         if not clip:
448             self.report({'ERROR'},
449                         "Movie clip to use tracking data from isn't set")
450
451             return {'CANCELLED'}
452
453         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
454             mat = ob.matrix_world.copy()
455             ob.constraints.remove(con)
456             ob.matrix_world = mat
457
458             return {'FINISHED'}
459
460         # Find start and end frames
461         for track in clip.tracking.tracks:
462             if sfra is None:
463                 sfra = track.markers[0].frame
464             else:
465                 sfra = min(sfra, track.markers[0].frame)
466
467             if efra is None:
468                 efra = track.markers[-1].frame
469             else:
470                 efra = max(efra, track.markers[-1].frame)
471
472         if sfra is None or efra is None:
473             return
474
475         # Store object matrices
476         for x in range(sfra, efra + 1):
477             scene.frame_set(x)
478             matrices.append(ob.matrix_world.copy())
479
480         ob.animation_data_create()
481
482         # Apply matrices on object and insert key-frames
483         i = 0
484         for x in range(sfra, efra + 1):
485             scene.frame_set(x)
486             ob.matrix_world = matrices[i]
487
488             ob.keyframe_insert("location")
489
490             if ob.rotation_mode == 'QUATERNION':
491                 ob.keyframe_insert("rotation_quaternion")
492             else:
493                 ob.keyframe_insert("rotation_euler")
494
495             i += 1
496
497         ob.constraints.remove(con)
498
499         scene.frame_set(frame_current)
500
501     def execute(self, context):
502         scene = context.scene
503         # XXX, should probably use context.selected_editable_objects
504         # since selected objects can be from a lib or in hidden layer!
505         for ob in scene.objects:
506             if ob.select:
507                 self._bake_object(scene, ob)
508
509         return {'FINISHED'}
510
511
512 class CLIP_OT_setup_tracking_scene(Operator):
513     """Prepare scene for compositing 3D objects into this footage"""
514
515     bl_idname = "clip.setup_tracking_scene"
516     bl_label = "Setup Tracking Scene"
517     bl_options = {'UNDO', 'REGISTER'}
518
519     @classmethod
520     def poll(cls, context):
521         sc = context.space_data
522
523         if sc.type != 'CLIP_EDITOR':
524             return False
525
526         clip = sc.clip
527
528         return clip and clip.tracking.reconstruction.is_valid
529
530     @staticmethod
531     def _setupScene(context):
532         scene = context.scene
533         scene.active_clip = context.space_data.clip
534
535     @staticmethod
536     def _setupWorld(context):
537         scene = context.scene
538         world = scene.world
539
540         if not world:
541             world = bpy.data.worlds.new(name="World")
542             scene.world = world
543
544         world.light_settings.use_ambient_occlusion = True
545         world.light_settings.ao_blend_type = 'MULTIPLY'
546
547         world.light_settings.use_environment_light = True
548         world.light_settings.environment_energy = 0.1
549
550         world.light_settings.distance = 1.0
551         world.light_settings.sample_method = 'ADAPTIVE_QMC'
552         world.light_settings.samples = 7
553         world.light_settings.threshold = 0.005
554         if hasattr(scene, 'cycles'):
555                 world.light_settings.ao_factor = 0.05
556
557     @staticmethod
558     def _findOrCreateCamera(context):
559         scene = context.scene
560
561         if scene.camera:
562             return scene.camera
563
564         cam = bpy.data.cameras.new(name="Camera")
565         camob = bpy.data.objects.new(name="Camera", object_data=cam)
566         scene.objects.link(camob)
567
568         scene.camera = camob
569
570         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
571                               Matrix.Rotation(0.815, 4, 'Z') *
572                               Matrix.Rotation(0.011, 4, 'Y') *
573                               Matrix.Rotation(1.109, 4, 'X'))
574
575         return camob
576
577     @staticmethod
578     def _setupCamera(context):
579         sc = context.space_data
580         clip = sc.clip
581         tracking = clip.tracking
582
583         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
584         cam = camob.data
585
586         # Remove all constraints to be sure motion is fine
587         camob.constraints.clear()
588
589         # Append camera solver constraint
590         con = camob.constraints.new(type='CAMERA_SOLVER')
591         con.use_active_clip = True
592         con.influence = 1.0
593
594         cam.sensor_width = tracking.camera.sensor_width
595         cam.lens = tracking.camera.focal_length
596
597     @staticmethod
598     def _setupViewport(context):
599         sc = context.space_data
600         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
601
602     @staticmethod
603     def _setupRenderLayers(context):
604         scene = context.scene
605         rlayers = scene.render.layers
606
607         if not scene.render.layers.get("Foreground"):
608             if len(rlayers) == 1:
609                 fg = rlayers[0]
610                 fg.name = 'Foreground'
611             else:
612                 fg = scene.render.layers.new("Foreground")
613
614             fg.use_sky = True
615             fg.layers = [True] + [False] * 19
616             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
617             fg.use_pass_vector = True
618
619         if not scene.render.layers.get("Background"):
620             bg = scene.render.layers.new("Background")
621             bg.use_pass_shadow = True
622             bg.use_pass_ambient_occlusion = True
623             bg.layers = [False] * 10 + [True] + [False] * 9
624
625     @staticmethod
626     def _wipeDefaultNodes(tree):
627         if len(tree.nodes) != 2:
628             return False
629         types = [node.type for node in tree.nodes]
630         types.sort()
631
632         if types[0] == 'COMPOSITE' and types[1] == 'R_LAYERS':
633             while tree.nodes:
634                 tree.nodes.remove(tree.nodes[0])
635
636     @staticmethod
637     def _findNode(tree, type):
638         for node in tree.nodes:
639             if node.type == type:
640                 return node
641
642         return None
643
644     @staticmethod
645     def _findOrCreateNode(tree, type):
646         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
647
648         if not node:
649             node = tree.nodes.new(type=type)
650
651         return node
652
653     @staticmethod
654     def _needSetupNodes(context):
655         scene = context.scene
656         tree = scene.node_tree
657
658         if not tree:
659             # No compositor node tree found, time to create it!
660             return True
661
662         for node in tree.nodes:
663             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
664                 return False
665
666         return True
667
668     @staticmethod
669     def _offsetNodes(tree):
670         for a in tree.nodes:
671             for b in tree.nodes:
672                 if a != b and a.location == b.location:
673                     b.location += Vector((40.0, 20.0))
674
675     def _setupNodes(self, context):
676         if not self._needSetupNodes(context):
677             # compositor nodes were already setup or even changes already
678             # do nothing to prevent nodes damage
679             return
680
681         # Enable backdrop for all compositor spaces
682         def setup_space(space):
683             space.show_backdrop = True
684
685         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
686                          setup_space)
687
688         sc = context.space_data
689         scene = context.scene
690         scene.use_nodes = True
691         tree = scene.node_tree
692         clip = sc.clip
693
694         need_stabilization = False
695
696         # Remove all the nodes if they came from default node setup.
697         # This is simplest way to make it so final node setup is
698         # is correct.
699         self._wipeDefaultNodes(tree)
700
701         # create nodes
702         rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers')
703         rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers')
704         composite = self._findOrCreateNode(tree, 'CompositorNodeComposite')
705
706         movieclip = tree.nodes.new(type='CompositorNodeMovieClip')
707         distortion = tree.nodes.new(type='CompositorNodeMovieDistortion')
708
709         if need_stabilization:
710             stabilize = tree.nodes.new(type='CompositorNodeStabilize2D')
711
712         scale = tree.nodes.new(type='CompositorNodeScale')
713         invert = tree.nodes.new(type='CompositorNodeInvert')
714         add_ao = tree.nodes.new(type='CompositorNodeMixRGB')
715         add_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
716         mul_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
717         mul_image = tree.nodes.new(type='CompositorNodeMixRGB')
718         vector_blur = tree.nodes.new(type='CompositorNodeVecBlur')
719         alphaover = tree.nodes.new(type='CompositorNodeAlphaOver')
720         viewer = tree.nodes.new(type='CompositorNodeViewer')
721
722         # setup nodes
723         movieclip.clip = clip
724
725         distortion.clip = clip
726         distortion.distortion_type = 'UNDISTORT'
727
728         if need_stabilization:
729             stabilize.clip = clip
730
731         scale.space = 'RENDER_SIZE'
732
733         rlayer_bg.scene = scene
734         rlayer_bg.layer = "Background"
735
736         rlayer_fg.scene = scene
737         rlayer_fg.layer = "Foreground"
738
739         add_ao.blend_type = 'ADD'
740         add_ao.show_preview = False
741         add_shadow.blend_type = 'ADD'
742         add_shadow.show_preview = False
743
744         mul_shadow.blend_type = 'MULTIPLY'
745         mul_shadow.inputs["Fac"].default_value = 0.8
746         mul_shadow.show_preview = False
747
748         mul_image.blend_type = 'MULTIPLY'
749         mul_image.inputs["Fac"].default_value = 0.8
750         mul_image.show_preview = False
751
752         vector_blur.factor = 0.75
753
754         # create links
755         tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
756
757         if need_stabilization:
758             tree.links.new(distortion.outputs["Image"],
759                            stabilize.inputs["Image"])
760             tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
761         else:
762             tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
763
764         tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
765
766         tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
767         tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
768
769         tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
770         tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
771
772         tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
773         tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
774
775         tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
776         tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
777
778         tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
779         tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"])
780         tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"])
781
782         tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
783         tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
784
785         tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
786         tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
787
788         # place nodes
789         movieclip.location = Vector((-300.0, 350.0))
790
791         distortion.location = movieclip.location
792         distortion.location += Vector((200.0, 0.0))
793
794         if need_stabilization:
795             stabilize.location = distortion.location
796             stabilize.location += Vector((200.0, 0.0))
797
798             scale.location = stabilize.location
799             scale.location += Vector((200.0, 0.0))
800         else:
801             scale.location = distortion.location
802             scale.location += Vector((200.0, 0.0))
803
804         rlayer_bg.location = movieclip.location
805         rlayer_bg.location -= Vector((0.0, 350.0))
806
807         invert.location = rlayer_bg.location
808         invert.location += Vector((250.0, 50.0))
809
810         add_ao.location = invert.location
811         add_ao.location[0] += 200
812         add_ao.location[1] = rlayer_bg.location[1]
813
814         add_shadow.location = add_ao.location
815         add_shadow.location -= Vector((0.0, 250.0))
816
817         mul_shadow.location = add_ao.location
818         mul_shadow.location += Vector((200.0, -50.0))
819
820         mul_image.location = mul_shadow.location
821         mul_image.location += Vector((300.0, 200.0))
822
823         rlayer_fg.location = rlayer_bg.location
824         rlayer_fg.location -= Vector((0.0, 500.0))
825
826         vector_blur.location[0] = mul_image.location[0]
827         vector_blur.location[1] = rlayer_fg.location[1]
828
829         alphaover.location[0] = vector_blur.location[0] + 350
830         alphaover.location[1] = \
831             (vector_blur.location[1] + mul_image.location[1]) / 2
832
833         composite.location = alphaover.location
834         composite.location += Vector((200.0, -100.0))
835
836         viewer.location = composite.location
837         composite.location += Vector((0.0, 200.0))
838
839         # ensure no nodes were creates on position of existing node
840         self._offsetNodes(tree)
841
842         scene.render.alpha_mode = 'TRANSPARENT'
843         if hasattr(scene, 'cycles'):
844             scene.cycles.film_transparent = True
845
846     @staticmethod
847     def _createMesh(scene, name, vertices, faces):
848         from bpy_extras.io_utils import unpack_list
849
850         mesh = bpy.data.meshes.new(name=name)
851
852         mesh.vertices.add(len(vertices))
853         mesh.vertices.foreach_set("co", unpack_list(vertices))
854
855         nbr_loops = len(faces)
856         nbr_polys = nbr_loops // 4
857         mesh.loops.add(nbr_loops)
858         mesh.polygons.add(nbr_polys)
859
860         mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
861         mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
862         mesh.loops.foreach_set("vertex_index", faces)
863
864         mesh.update()
865
866         ob = bpy.data.objects.new(name=name, object_data=mesh)
867
868         scene.objects.link(ob)
869
870         return ob
871
872     @staticmethod
873     def _getPlaneVertices(half_size, z):
874
875         return [(-half_size, -half_size, z),
876                 (half_size, -half_size, z),
877                 (half_size, half_size, z),
878                 (-half_size, half_size, z)]
879
880     def _createGround(self, scene):
881         vertices = self._getPlaneVertices(4.0, 0.0)
882         faces = [0, 1, 2, 3]
883
884         ob = self._createMesh(scene, "Ground", vertices, faces)
885         ob["is_ground"] = True
886
887         return ob
888
889     @staticmethod
890     def _findGround(context):
891         scene = context.scene
892
893         for ob in scene.objects:
894             if ob.type == 'MESH' and "is_ground" in ob:
895                 return ob
896
897         return None
898
899     @staticmethod
900     def _mergeLayers(layers_a, layers_b):
901
902         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
903
904     @staticmethod
905     def _createLamp(scene):
906         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
907         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
908         scene.objects.link(lampob)
909
910         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
911
912         lamp.distance = 30
913         lamp.shadow_method = 'RAY_SHADOW'
914
915         return lampob
916
917     def _createSampleObject(self, scene):
918         vertices = self._getPlaneVertices(1.0, -1.0) + \
919             self._getPlaneVertices(1.0, 1.0)
920         faces = (0, 1, 2, 3,
921                  4, 7, 6, 5,
922                  0, 4, 5, 1,
923                  1, 5, 6, 2,
924                  2, 6, 7, 3,
925                  3, 7, 4, 0)
926
927         return self._createMesh(scene, "Cube", vertices, faces)
928
929     def _setupObjects(self, context):
930         scene = context.scene
931
932         fg = scene.render.layers.get("Foreground")
933         bg = scene.render.layers.get("Background")
934
935         all_layers = self._mergeLayers(fg.layers, bg.layers)
936
937         # ensure all lamps are active on foreground and background
938         has_lamp = False
939         has_mesh = False
940         for ob in scene.objects:
941             if ob.type == 'LAMP':
942                 ob.layers = all_layers
943                 has_lamp = True
944             elif ob.type == 'MESH' and "is_ground" not in ob:
945                 has_mesh = True
946
947         # create sample lamp if there's no lamps in the scene
948         if not has_lamp:
949             lamp = self._createLamp(scene)
950             lamp.layers = all_layers
951
952         # create sample object if there's no meshes in the scene
953         if not has_mesh:
954             ob = self._createSampleObject(scene)
955             ob.layers = fg.layers
956
957         # create ground object if needed
958         ground = self._findGround(context)
959         if not ground:
960             ground = self._createGround(scene)
961             ground.layers = bg.layers
962         else:
963             # make sure ground is available on Background layer
964             ground.layers = self._mergeLayers(ground.layers, bg.layers)
965
966         # layers with background and foreground should be rendered
967         scene.layers = self._mergeLayers(scene.layers, all_layers)
968
969     def execute(self, context):
970         scene = context.scene
971         current_active_layer = scene.active_layer
972
973         self._setupScene(context)
974         self._setupWorld(context)
975         self._setupCamera(context)
976         self._setupViewport(context)
977         self._setupRenderLayers(context)
978         self._setupNodes(context)
979         self._setupObjects(context)
980
981         # Active layer has probably changed, set it back to the original value.
982         # NOTE: The active layer is always true.
983         scene.layers[current_active_layer] = True
984
985         return {'FINISHED'}
986
987
988 class CLIP_OT_track_settings_as_default(Operator):
989     """Copy tracking settings from active track to default settings"""
990
991     bl_idname = "clip.track_settings_as_default"
992     bl_label = "Track Settings As Default"
993     bl_options = {'UNDO', 'REGISTER'}
994
995     @classmethod
996     def poll(cls, context):
997         sc = context.space_data
998
999         if sc.type != 'CLIP_EDITOR':
1000             return False
1001
1002         clip = sc.clip
1003
1004         return clip and clip.tracking.tracks.active
1005
1006     def execute(self, context):
1007         sc = context.space_data
1008         clip = sc.clip
1009
1010         track = clip.tracking.tracks.active
1011         framenr = context.scene.frame_current - clip.frame_start + 1
1012
1013         CLIP_default_settings_from_track(clip, track, framenr)
1014
1015         return {'FINISHED'}
1016
1017
1018 class CLIP_OT_track_settings_to_track(bpy.types.Operator):
1019     """Copy tracking settings from active track to selected tracks"""
1020
1021     bl_label = "Copy Track Settings"
1022     bl_idname = "clip.track_settings_to_track"
1023     bl_options = {'UNDO', 'REGISTER'}
1024
1025     _attrs_track = (
1026         "correlation_min",
1027         "frames_limit",
1028         "pattern_match",
1029         "margin",
1030         "motion_model",
1031         "use_brute",
1032         "use_normalization",
1033         "use_mask",
1034         "use_red_channel",
1035         "use_green_channel",
1036         "use_blue_channel",
1037         "weight"
1038         )
1039
1040     _attrs_marker = (
1041         "pattern_corners",
1042         "search_min",
1043         "search_max",
1044         )
1045
1046     @classmethod
1047     def poll(cls, context):
1048         space = context.space_data
1049         if space.type != 'CLIP_EDITOR':
1050             return False
1051         clip = space.clip
1052         return clip and clip.tracking.tracks.active
1053
1054     def execute(self, context):
1055         space = context.space_data
1056         clip = space.clip
1057         track = clip.tracking.tracks.active
1058
1059         framenr = context.scene.frame_current - clip.frame_start + 1
1060         marker = track.markers.find_frame(framenr, False)
1061
1062         for t in clip.tracking.tracks:
1063             if t.select and t != track:
1064                 marker_selected = t.markers.find_frame(framenr, False)
1065                 for attr in self._attrs_track:
1066                     setattr(t, attr, getattr(track, attr))
1067                 for attr in self._attrs_marker:
1068                     setattr(marker_selected, attr, getattr(marker, attr))
1069
1070         return {'FINISHED'}