Merge branch 'master' into blender2.8
[blender.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23 from bpy.props import FloatProperty
24 from mathutils import (
25     Vector,
26     Matrix,
27 )
28
29
30 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
31     screens = bpy.data.screens if all_screens else [context.screen]
32
33     for screen in screens:
34         for area in screen.areas:
35             if area.type == tarea:
36                 for space in area.spaces:
37                     if space.type == tspace:
38                         callback(space, *args)
39
40
41 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
42     def set_background(space_v3d, clip, user):
43         bgpic = None
44
45         for x in space_v3d.background_images:
46             if x.source == 'MOVIE_CLIP':
47                 bgpic = x
48                 break
49
50         if not bgpic:
51             bgpic = space_v3d.background_images.new()
52
53         bgpic.source = 'MOVIE_CLIP'
54         bgpic.clip = clip
55         bgpic.clip_user.proxy_render_size = user.proxy_render_size
56         bgpic.clip_user.use_render_undistorted = True
57         bgpic.use_camera_clip = False
58         bgpic.view_axis = 'CAMERA'
59
60         space_v3d.show_background_images = True
61
62     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
63                      set_background, clip, clip_user)
64
65
66 def CLIP_camera_for_clip(context, clip):
67     scene = context.scene
68
69     camera = scene.camera
70
71     for ob in scene.objects:
72         if ob.type == 'CAMERA':
73             for con in ob.constraints:
74                 if con.type == 'CAMERA_SOLVER':
75                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
76
77                     if cur_clip == clip:
78                         return ob
79
80     return camera
81
82
83 def CLIP_track_view_selected(sc, track):
84     if track.select_anchor:
85         return True
86
87     if sc.show_marker_pattern and track.select_pattern:
88         return True
89
90     if sc.show_marker_search and track.select_search:
91         return True
92
93     return False
94
95
96 def CLIP_default_settings_from_track(clip, track, framenr):
97     settings = clip.tracking.settings
98
99     width = clip.size[0]
100     height = clip.size[1]
101
102     marker = track.markers.find_frame(framenr, False)
103     pattern_bb = marker.pattern_bound_box
104
105     pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
106     search = marker.search_max - marker.search_min
107
108     pattern[0] = pattern[0] * width
109     pattern[1] = pattern[1] * height
110
111     search[0] = search[0] * width
112     search[1] = search[1] * height
113
114     settings.default_correlation_min = track.correlation_min
115     settings.default_pattern_size = max(pattern[0], pattern[1])
116     settings.default_search_size = max(search[0], search[1])
117     settings.default_frames_limit = track.frames_limit
118     settings.default_pattern_match = track.pattern_match
119     settings.default_margin = track.margin
120     settings.default_motion_model = track.motion_model
121     settings.use_default_brute = track.use_brute
122     settings.use_default_normalization = track.use_normalization
123     settings.use_default_mask = track.use_mask
124     settings.use_default_red_channel = track.use_red_channel
125     settings.use_default_green_channel = track.use_green_channel
126     settings.use_default_blue_channel = track.use_blue_channel
127     settings.default_weight = track.weight
128
129
130 class CLIP_OT_filter_tracks(bpy.types.Operator):
131     """Filter tracks which has weirdly looking spikes in motion curves"""
132     bl_label = "Filter Tracks"
133     bl_idname = "clip.filter_tracks"
134     bl_options = {'UNDO', 'REGISTER'}
135
136     track_threshold = FloatProperty(
137             name="Track Threshold",
138             description="Filter Threshold to select problematic tracks",
139             default=5.0,
140             )
141
142     @staticmethod
143     def _filter_values(context, threshold):
144
145         def get_marker_coordinates_in_pixels(clip_size, track, frame_number):
146             marker = track.markers.find_frame(frame_number)
147             return Vector((marker.co[0] * clip_size[0], marker.co[1] * clip_size[1]))
148
149         def marker_velocity(clip_size, track, frame):
150             marker_a = get_marker_coordinates_in_pixels(clip_size, track, frame)
151             marker_b = get_marker_coordinates_in_pixels(clip_size, track, frame - 1)
152             return marker_a - marker_b
153
154         scene = context.scene
155         frame_start = scene.frame_start
156         frame_end = scene.frame_end
157         clip = context.space_data.clip
158         clip_size = clip.size[:]
159
160         bpy.ops.clip.clean_tracks(frames=10, action='DELETE_TRACK')
161
162         tracks_to_clean = set()
163
164         for frame in range(frame_start, frame_end + 1):
165
166             # Find tracks with markers in both this frame and the previous one.
167             relevant_tracks = [
168                     track for track in clip.tracking.tracks
169                     if (track.markers.find_frame(frame) and
170                         track.markers.find_frame(frame - 1))]
171
172             if not relevant_tracks:
173                 continue
174
175             # Get average velocity and deselect track.
176             average_velocity = Vector((0.0, 0.0))
177             for track in relevant_tracks:
178                 track.select = False
179                 average_velocity += marker_velocity(clip_size, track, frame)
180             if len(relevant_tracks) >= 1:
181                 average_velocity = average_velocity / len(relevant_tracks)
182
183             # Then find all markers that behave differently than the average.
184             for track in relevant_tracks:
185                 track_velocity = marker_velocity(clip_size, track, frame)
186                 distance = (average_velocity - track_velocity).length
187
188                 if distance > threshold:
189                     tracks_to_clean.add(track)
190
191         for track in tracks_to_clean:
192             track.select = True
193         return len(tracks_to_clean)
194
195     @classmethod
196     def poll(cls, context):
197         space = context.space_data
198         return (space.type == 'CLIP_EDITOR') and space.clip
199
200     def execute(self, context):
201         num_tracks = self._filter_values(context, self.track_threshold)
202         self.report({'INFO'}, "Identified %d problematic tracks" % num_tracks)
203         return {'FINISHED'}
204
205
206 class CLIP_OT_set_active_clip(bpy.types.Operator):
207     bl_label = "Set Active Clip"
208     bl_idname = "clip.set_active_clip"
209
210     @classmethod
211     def poll(cls, context):
212         space = context.space_data
213         return space.type == 'CLIP_EDITOR' and space.clip
214
215     def execute(self, context):
216         clip = context.space_data.clip
217         scene = context.scene
218         scene.active_clip = clip
219         scene.render.resolution_x = clip.size[0]
220         scene.render.resolution_y = clip.size[1]
221         return {'FINISHED'}
222
223
224 class CLIP_OT_track_to_empty(Operator):
225     """Create an Empty object which will be copying movement of active track"""
226
227     bl_idname = "clip.track_to_empty"
228     bl_label = "Link Empty to Track"
229     bl_options = {'UNDO', 'REGISTER'}
230
231     @staticmethod
232     def _link_track(context, clip, tracking_object, track):
233         sc = context.space_data
234         constraint = None
235         ob = None
236
237         ob = bpy.data.objects.new(name=track.name, object_data=None)
238         ob.select_set(action='SELECT')
239         context.scene.objects.link(ob)
240         context.scene.objects.active = ob
241
242         for con in ob.constraints:
243             if con.type == 'FOLLOW_TRACK':
244                 constraint = con
245                 break
246
247         if constraint is None:
248             constraint = ob.constraints.new(type='FOLLOW_TRACK')
249
250         constraint.use_active_clip = False
251         constraint.clip = sc.clip
252         constraint.track = track.name
253         constraint.use_3d_position = False
254         constraint.object = tracking_object.name
255         constraint.camera = CLIP_camera_for_clip(context, clip)
256
257     @classmethod
258     def poll(cls, context):
259         space = context.space_data
260         return space.type == 'CLIP_EDITOR' and space.clip
261
262     def execute(self, context):
263         sc = context.space_data
264         clip = sc.clip
265         tracking_object = clip.tracking.objects.active
266
267         for track in tracking_object.tracks:
268             if CLIP_track_view_selected(sc, track):
269                 self._link_track(context, clip, tracking_object, track)
270
271         return {'FINISHED'}
272
273
274 class CLIP_OT_bundles_to_mesh(Operator):
275     """Create vertex cloud using coordinates of reconstructed tracks"""
276
277     bl_idname = "clip.bundles_to_mesh"
278     bl_label = "3D Markers to Mesh"
279     bl_options = {'UNDO', 'REGISTER'}
280
281     @classmethod
282     def poll(cls, context):
283         sc = context.space_data
284         return (sc.type == 'CLIP_EDITOR') and sc.clip
285
286     def execute(self, context):
287         from bpy_extras.io_utils import unpack_list
288
289         sc = context.space_data
290         clip = sc.clip
291         tracking_object = clip.tracking.objects.active
292
293         new_verts = []
294
295         scene = context.scene
296         camera = scene.camera
297         matrix = Matrix.Identity(4)
298         if camera:
299             reconstruction = tracking_object.reconstruction
300             framenr = scene.frame_current - clip.frame_start + 1
301             reconstructed_matrix = reconstruction.cameras.matrix_from_frame(framenr)
302             matrix = camera.matrix_world * reconstructed_matrix.inverted()
303
304         mesh = bpy.data.meshes.new(name="Tracks")
305         for track in tracking_object.tracks:
306             if track.has_bundle and track.select == True:
307                 new_verts.append(track.bundle)
308
309         if new_verts:
310             mesh.vertices.add(len(new_verts))
311             mesh.vertices.foreach_set("co", unpack_list(new_verts))
312
313         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
314
315         ob.matrix_world = matrix
316
317         context.scene.objects.link(ob)
318
319         return {'FINISHED'}
320
321
322 class CLIP_OT_delete_proxy(Operator):
323     """Delete movie clip proxy files from the hard drive"""
324
325     bl_idname = "clip.delete_proxy"
326     bl_label = "Delete Proxy"
327     bl_options = {'REGISTER'}
328
329     @classmethod
330     def poll(cls, context):
331         if context.space_data.type != 'CLIP_EDITOR':
332             return False
333
334         sc = context.space_data
335
336         return sc.clip
337
338     def invoke(self, context, event):
339         wm = context.window_manager
340
341         return wm.invoke_confirm(self, event)
342
343     @staticmethod
344     def _rmproxy(abspath):
345         import shutil
346
347         if not os.path.exists(abspath):
348             return
349
350         if os.path.isdir(abspath):
351             shutil.rmtree(abspath)
352         else:
353             os.remove(abspath)
354
355     def execute(self, context):
356         sc = context.space_data
357         clip = sc.clip
358         if clip.use_proxy_custom_directory:
359             proxydir = clip.proxy.directory
360         else:
361             clipdir = os.path.dirname(clip.filepath)
362             proxydir = os.path.join(clipdir, "BL_proxy")
363
364         clipfile = os.path.basename(clip.filepath)
365         proxy = os.path.join(proxydir, clipfile)
366         absproxy = bpy.path.abspath(proxy)
367
368         # proxy_<quality>[_undostorted]
369         for x in (25, 50, 75, 100):
370             d = os.path.join(absproxy, "proxy_%d" % x)
371
372             self._rmproxy(d)
373             self._rmproxy(d + "_undistorted")
374             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
375
376         tc = ("free_run.blen_tc",
377               "interp_free_run.blen_tc",
378               "record_run.blen_tc")
379
380         for x in tc:
381             self._rmproxy(os.path.join(absproxy, x))
382
383         # remove proxy per-clip directory
384         try:
385             os.rmdir(absproxy)
386         except OSError:
387             pass
388
389         # remove [custom] proxy directory if empty
390         try:
391             absdir = bpy.path.abspath(proxydir)
392             os.rmdir(absdir)
393         except OSError:
394             pass
395
396         return {'FINISHED'}
397
398
399 class CLIP_OT_set_viewport_background(Operator):
400     """Set current movie clip as a camera background in 3D view-port """ \
401     """(works only when a 3D view-port is visible)"""
402
403     bl_idname = "clip.set_viewport_background"
404     bl_label = "Set as Background"
405     bl_options = {'REGISTER'}
406
407     @classmethod
408     def poll(cls, context):
409         if context.space_data.type != 'CLIP_EDITOR':
410             return False
411
412         sc = context.space_data
413
414         return sc.clip
415
416     def execute(self, context):
417         sc = context.space_data
418         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
419
420         return {'FINISHED'}
421
422
423 class CLIP_OT_constraint_to_fcurve(Operator):
424     """Create F-Curves for object which will copy \
425 object's movement caused by this constraint"""
426
427     bl_idname = "clip.constraint_to_fcurve"
428     bl_label = "Constraint to F-Curve"
429     bl_options = {'UNDO', 'REGISTER'}
430
431     def _bake_object(self, scene, ob):
432         con = None
433         clip = None
434         sfra = None
435         efra = None
436         frame_current = scene.frame_current
437         matrices = []
438
439         # Find constraint which would be converting
440         # TODO: several camera solvers and track followers would fail,
441         #       but can't think about real work-flow where it'll be useful
442         for x in ob.constraints:
443             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
444                 con = x
445
446         if not con:
447             self.report({'ERROR'},
448                         "Motion Tracking constraint to be converted not found")
449
450             return {'CANCELLED'}
451
452         # Get clip used for parenting
453         if con.use_active_clip:
454             clip = scene.active_clip
455         else:
456             clip = con.clip
457
458         if not clip:
459             self.report({'ERROR'},
460                         "Movie clip to use tracking data from isn't set")
461
462             return {'CANCELLED'}
463
464         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
465             mat = ob.matrix_world.copy()
466             ob.constraints.remove(con)
467             ob.matrix_world = mat
468
469             return {'FINISHED'}
470
471         # Find start and end frames
472         for track in clip.tracking.tracks:
473             if sfra is None:
474                 sfra = track.markers[0].frame
475             else:
476                 sfra = min(sfra, track.markers[0].frame)
477
478             if efra is None:
479                 efra = track.markers[-1].frame
480             else:
481                 efra = max(efra, track.markers[-1].frame)
482
483         if sfra is None or efra is None:
484             return
485
486         # Store object matrices
487         for x in range(sfra, efra + 1):
488             scene.frame_set(x)
489             matrices.append(ob.matrix_world.copy())
490
491         ob.animation_data_create()
492
493         # Apply matrices on object and insert key-frames
494         i = 0
495         for x in range(sfra, efra + 1):
496             scene.frame_set(x)
497             ob.matrix_world = matrices[i]
498
499             ob.keyframe_insert("location")
500
501             if ob.rotation_mode == 'QUATERNION':
502                 ob.keyframe_insert("rotation_quaternion")
503             else:
504                 ob.keyframe_insert("rotation_euler")
505
506             i += 1
507
508         ob.constraints.remove(con)
509
510         scene.frame_set(frame_current)
511
512     def execute(self, context):
513         scene = context.scene
514         # XXX, should probably use context.selected_editable_objects
515         # since selected objects can be from a lib or in hidden layer!
516         for ob in scene.objects:
517             if ob.select_set(action='SELECT'):
518                 self._bake_object(scene, ob)
519
520         return {'FINISHED'}
521
522
523 class CLIP_OT_setup_tracking_scene(Operator):
524     """Prepare scene for compositing 3D objects into this footage"""
525
526     bl_idname = "clip.setup_tracking_scene"
527     bl_label = "Setup Tracking Scene"
528     bl_options = {'UNDO', 'REGISTER'}
529
530     @classmethod
531     def poll(cls, context):
532         sc = context.space_data
533
534         if sc.type != 'CLIP_EDITOR':
535             return False
536
537         clip = sc.clip
538
539         return clip and clip.tracking.reconstruction.is_valid
540
541     @staticmethod
542     def _setupScene(context):
543         scene = context.scene
544         scene.active_clip = context.space_data.clip
545
546     @staticmethod
547     def _setupWorld(context):
548         scene = context.scene
549         world = scene.world
550
551         if not world:
552             world = bpy.data.worlds.new(name="World")
553             scene.world = world
554
555         world.light_settings.use_ambient_occlusion = True
556         world.light_settings.ao_blend_type = 'MULTIPLY'
557
558         world.light_settings.use_environment_light = True
559         world.light_settings.environment_energy = 0.1
560
561         world.light_settings.distance = 1.0
562         world.light_settings.sample_method = 'ADAPTIVE_QMC'
563         world.light_settings.samples = 7
564         world.light_settings.threshold = 0.005
565         if hasattr(scene, "cycles"):
566             world.light_settings.ao_factor = 0.05
567
568     @staticmethod
569     def _findOrCreateCamera(context):
570         scene = context.scene
571
572         if scene.camera:
573             return scene.camera
574
575         cam = bpy.data.cameras.new(name="Camera")
576         camob = bpy.data.objects.new(name="Camera", object_data=cam)
577         scene.objects.link(camob)
578
579         scene.camera = camob
580
581         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
582                               Matrix.Rotation(0.815, 4, 'Z') *
583                               Matrix.Rotation(0.011, 4, 'Y') *
584                               Matrix.Rotation(1.109, 4, 'X'))
585
586         return camob
587
588     @staticmethod
589     def _setupCamera(context):
590         sc = context.space_data
591         clip = sc.clip
592         tracking = clip.tracking
593
594         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
595         cam = camob.data
596
597         # Remove all constraints to be sure motion is fine
598         camob.constraints.clear()
599
600         # Append camera solver constraint
601         con = camob.constraints.new(type='CAMERA_SOLVER')
602         con.use_active_clip = True
603         con.influence = 1.0
604
605         cam.sensor_width = tracking.camera.sensor_width
606         cam.lens = tracking.camera.focal_length
607
608     @staticmethod
609     def _setupViewport(context):
610         sc = context.space_data
611         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
612
613     @staticmethod
614     def _setupRenderLayers(context):
615         scene = context.scene
616         rlayers = scene.render_layers
617
618         if not scene.render_layers.get("Foreground"):
619             if len(rlayers) == 1:
620                 fg = rlayers[0]
621                 fg.name = 'Foreground'
622             else:
623                 fg = scene.render_layers.new("Foreground")
624
625             fg.use_sky = True
626             fg.layers = [True] + [False] * 19
627             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
628             fg.use_pass_vector = True
629
630         if not scene.render_layers.get("Background"):
631             bg = scene.render_layers.new("Background")
632             bg.use_pass_shadow = True
633             bg.use_pass_ambient_occlusion = True
634             bg.layers = [False] * 10 + [True] + [False] * 9
635
636     @staticmethod
637     def _wipeDefaultNodes(tree):
638         if len(tree.nodes) != 2:
639             return False
640         types = [node.type for node in tree.nodes]
641         types.sort()
642
643         if types[0] == 'COMPOSITE' and types[1] == 'R_LAYERS':
644             while tree.nodes:
645                 tree.nodes.remove(tree.nodes[0])
646
647     @staticmethod
648     def _findNode(tree, type):
649         for node in tree.nodes:
650             if node.type == type:
651                 return node
652
653         return None
654
655     @staticmethod
656     def _findOrCreateNode(tree, type):
657         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
658
659         if not node:
660             node = tree.nodes.new(type=type)
661
662         return node
663
664     @staticmethod
665     def _needSetupNodes(context):
666         scene = context.scene
667         tree = scene.node_tree
668
669         if not tree:
670             # No compositor node tree found, time to create it!
671             return True
672
673         for node in tree.nodes:
674             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
675                 return False
676
677         return True
678
679     @staticmethod
680     def _offsetNodes(tree):
681         for a in tree.nodes:
682             for b in tree.nodes:
683                 if a != b and a.location == b.location:
684                     b.location += Vector((40.0, 20.0))
685
686     def _setupNodes(self, context):
687         if not self._needSetupNodes(context):
688             # compositor nodes were already setup or even changes already
689             # do nothing to prevent nodes damage
690             return
691
692         # Enable backdrop for all compositor spaces
693         def setup_space(space):
694             space.show_backdrop = True
695
696         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
697                          setup_space)
698
699         sc = context.space_data
700         scene = context.scene
701         scene.use_nodes = True
702         tree = scene.node_tree
703         clip = sc.clip
704
705         need_stabilization = False
706
707         # Remove all the nodes if they came from default node setup.
708         # This is simplest way to make it so final node setup is
709         # is correct.
710         self._wipeDefaultNodes(tree)
711
712         # create nodes
713         rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers')
714         rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers')
715         composite = self._findOrCreateNode(tree, 'CompositorNodeComposite')
716
717         movieclip = tree.nodes.new(type='CompositorNodeMovieClip')
718         distortion = tree.nodes.new(type='CompositorNodeMovieDistortion')
719
720         if need_stabilization:
721             stabilize = tree.nodes.new(type='CompositorNodeStabilize2D')
722
723         scale = tree.nodes.new(type='CompositorNodeScale')
724         invert = tree.nodes.new(type='CompositorNodeInvert')
725         add_ao = tree.nodes.new(type='CompositorNodeMixRGB')
726         add_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
727         mul_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
728         mul_image = tree.nodes.new(type='CompositorNodeMixRGB')
729         vector_blur = tree.nodes.new(type='CompositorNodeVecBlur')
730         alphaover = tree.nodes.new(type='CompositorNodeAlphaOver')
731         viewer = tree.nodes.new(type='CompositorNodeViewer')
732
733         # setup nodes
734         movieclip.clip = clip
735
736         distortion.clip = clip
737         distortion.distortion_type = 'UNDISTORT'
738
739         if need_stabilization:
740             stabilize.clip = clip
741
742         scale.space = 'RENDER_SIZE'
743
744         rlayer_bg.scene = scene
745         rlayer_bg.layer = "Background"
746
747         rlayer_fg.scene = scene
748         rlayer_fg.layer = "Foreground"
749
750         add_ao.blend_type = 'ADD'
751         add_ao.show_preview = False
752         add_shadow.blend_type = 'ADD'
753         add_shadow.show_preview = False
754
755         mul_shadow.blend_type = 'MULTIPLY'
756         mul_shadow.inputs["Fac"].default_value = 0.8
757         mul_shadow.show_preview = False
758
759         mul_image.blend_type = 'MULTIPLY'
760         mul_image.inputs["Fac"].default_value = 0.8
761         mul_image.show_preview = False
762
763         vector_blur.factor = 0.75
764
765         # create links
766         tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
767
768         if need_stabilization:
769             tree.links.new(distortion.outputs["Image"],
770                            stabilize.inputs["Image"])
771             tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
772         else:
773             tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
774
775         tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
776
777         tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
778         tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
779
780         tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
781         tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
782
783         tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
784         tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
785
786         tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
787         tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
788
789         tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
790         tree.links.new(rlayer_fg.outputs["Depth"], vector_blur.inputs["Z"])
791         tree.links.new(rlayer_fg.outputs["Vector"], vector_blur.inputs["Speed"])
792
793         tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
794         tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
795
796         tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
797         tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
798
799         # place nodes
800         movieclip.location = Vector((-300.0, 350.0))
801
802         distortion.location = movieclip.location
803         distortion.location += Vector((200.0, 0.0))
804
805         if need_stabilization:
806             stabilize.location = distortion.location
807             stabilize.location += Vector((200.0, 0.0))
808
809             scale.location = stabilize.location
810             scale.location += Vector((200.0, 0.0))
811         else:
812             scale.location = distortion.location
813             scale.location += Vector((200.0, 0.0))
814
815         rlayer_bg.location = movieclip.location
816         rlayer_bg.location -= Vector((0.0, 350.0))
817
818         invert.location = rlayer_bg.location
819         invert.location += Vector((250.0, 50.0))
820
821         add_ao.location = invert.location
822         add_ao.location[0] += 200
823         add_ao.location[1] = rlayer_bg.location[1]
824
825         add_shadow.location = add_ao.location
826         add_shadow.location -= Vector((0.0, 250.0))
827
828         mul_shadow.location = add_ao.location
829         mul_shadow.location += Vector((200.0, -50.0))
830
831         mul_image.location = mul_shadow.location
832         mul_image.location += Vector((300.0, 200.0))
833
834         rlayer_fg.location = rlayer_bg.location
835         rlayer_fg.location -= Vector((0.0, 500.0))
836
837         vector_blur.location[0] = mul_image.location[0]
838         vector_blur.location[1] = rlayer_fg.location[1]
839
840         alphaover.location[0] = vector_blur.location[0] + 350
841         alphaover.location[1] = \
842             (vector_blur.location[1] + mul_image.location[1]) / 2
843
844         composite.location = alphaover.location
845         composite.location += Vector((200.0, -100.0))
846
847         viewer.location = composite.location
848         composite.location += Vector((0.0, 200.0))
849
850         # ensure no nodes were creates on position of existing node
851         self._offsetNodes(tree)
852
853         scene.render.alpha_mode = 'TRANSPARENT'
854         if hasattr(scene, "cycles"):
855             scene.cycles.film_transparent = True
856
857     @staticmethod
858     def _createMesh(scene, name, vertices, faces):
859         from bpy_extras.io_utils import unpack_list
860
861         mesh = bpy.data.meshes.new(name=name)
862
863         mesh.vertices.add(len(vertices))
864         mesh.vertices.foreach_set("co", unpack_list(vertices))
865
866         nbr_loops = len(faces)
867         nbr_polys = nbr_loops // 4
868         mesh.loops.add(nbr_loops)
869         mesh.polygons.add(nbr_polys)
870
871         mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
872         mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
873         mesh.loops.foreach_set("vertex_index", faces)
874
875         mesh.update()
876
877         ob = bpy.data.objects.new(name=name, object_data=mesh)
878
879         scene.objects.link(ob)
880
881         return ob
882
883     @staticmethod
884     def _getPlaneVertices(half_size, z):
885
886         return [(-half_size, -half_size, z),
887                 (half_size, -half_size, z),
888                 (half_size, half_size, z),
889                 (-half_size, half_size, z)]
890
891     def _createGround(self, scene):
892         vertices = self._getPlaneVertices(4.0, 0.0)
893         faces = [0, 1, 2, 3]
894
895         ob = self._createMesh(scene, "Ground", vertices, faces)
896         ob["is_ground"] = True
897
898         return ob
899
900     @staticmethod
901     def _findGround(context):
902         scene = context.scene
903
904         for ob in scene.objects:
905             if ob.type == 'MESH' and "is_ground" in ob:
906                 return ob
907
908         return None
909
910     @staticmethod
911     def _mergeLayers(layers_a, layers_b):
912
913         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
914
915     @staticmethod
916     def _createLamp(scene):
917         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
918         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
919         scene.objects.link(lampob)
920
921         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
922
923         lamp.distance = 30
924         lamp.shadow_method = 'RAY_SHADOW'
925
926         return lampob
927
928     def _createSampleObject(self, scene):
929         vertices = self._getPlaneVertices(1.0, -1.0) + \
930             self._getPlaneVertices(1.0, 1.0)
931         faces = (0, 1, 2, 3,
932                  4, 7, 6, 5,
933                  0, 4, 5, 1,
934                  1, 5, 6, 2,
935                  2, 6, 7, 3,
936                  3, 7, 4, 0)
937
938         return self._createMesh(scene, "Cube", vertices, faces)
939
940     def _setupObjects(self, context):
941         scene = context.scene
942
943         fg = scene.render_layers.get("Foreground")
944         bg = scene.render_layers.get("Background")
945
946         all_layers = self._mergeLayers(fg.layers, bg.layers)
947
948         # ensure all lamps are active on foreground and background
949         has_lamp = False
950         has_mesh = False
951         for ob in scene.objects:
952             if ob.type == 'LAMP':
953                 ob.layers = all_layers
954                 has_lamp = True
955             elif ob.type == 'MESH' and "is_ground" not in ob:
956                 has_mesh = True
957
958         # create sample lamp if there's no lamps in the scene
959         if not has_lamp:
960             lamp = self._createLamp(scene)
961             lamp.layers = all_layers
962
963         # create sample object if there's no meshes in the scene
964         if not has_mesh:
965             ob = self._createSampleObject(scene)
966             ob.layers = fg.layers
967
968         # create ground object if needed
969         ground = self._findGround(context)
970         if not ground:
971             ground = self._createGround(scene)
972             ground.layers = bg.layers
973         else:
974             # make sure ground is available on Background layer
975             ground.layers = self._mergeLayers(ground.layers, bg.layers)
976
977         # layers with background and foreground should be rendered
978         scene.layers = self._mergeLayers(scene.layers, all_layers)
979
980     def execute(self, context):
981         scene = context.scene
982         current_active_layer = scene.active_layer
983
984         self._setupScene(context)
985         self._setupWorld(context)
986         self._setupCamera(context)
987         self._setupViewport(context)
988         self._setupRenderLayers(context)
989         self._setupNodes(context)
990         self._setupObjects(context)
991
992         # Active layer has probably changed, set it back to the original value.
993         # NOTE: The active layer is always true.
994         scene.layers[current_active_layer] = True
995
996         return {'FINISHED'}
997
998
999 class CLIP_OT_track_settings_as_default(Operator):
1000     """Copy tracking settings from active track to default settings"""
1001
1002     bl_idname = "clip.track_settings_as_default"
1003     bl_label = "Track Settings As Default"
1004     bl_options = {'UNDO', 'REGISTER'}
1005
1006     @classmethod
1007     def poll(cls, context):
1008         sc = context.space_data
1009
1010         if sc.type != 'CLIP_EDITOR':
1011             return False
1012
1013         clip = sc.clip
1014
1015         return clip and clip.tracking.tracks.active
1016
1017     def execute(self, context):
1018         sc = context.space_data
1019         clip = sc.clip
1020
1021         track = clip.tracking.tracks.active
1022         framenr = context.scene.frame_current - clip.frame_start + 1
1023
1024         CLIP_default_settings_from_track(clip, track, framenr)
1025
1026         return {'FINISHED'}
1027
1028
1029 class CLIP_OT_track_settings_to_track(bpy.types.Operator):
1030     """Copy tracking settings from active track to selected tracks"""
1031
1032     bl_label = "Copy Track Settings"
1033     bl_idname = "clip.track_settings_to_track"
1034     bl_options = {'UNDO', 'REGISTER'}
1035
1036     _attrs_track = (
1037         "correlation_min",
1038         "frames_limit",
1039         "pattern_match",
1040         "margin",
1041         "motion_model",
1042         "use_brute",
1043         "use_normalization",
1044         "use_mask",
1045         "use_red_channel",
1046         "use_green_channel",
1047         "use_blue_channel",
1048         "weight"
1049         )
1050
1051     _attrs_marker = (
1052         "pattern_corners",
1053         "search_min",
1054         "search_max",
1055         )
1056
1057     @classmethod
1058     def poll(cls, context):
1059         space = context.space_data
1060         if space.type != 'CLIP_EDITOR':
1061             return False
1062         clip = space.clip
1063         return clip and clip.tracking.tracks.active
1064
1065     def execute(self, context):
1066         space = context.space_data
1067         clip = space.clip
1068         track = clip.tracking.tracks.active
1069
1070         framenr = context.scene.frame_current - clip.frame_start + 1
1071         marker = track.markers.find_frame(framenr, False)
1072
1073         for t in clip.tracking.tracks:
1074             if t.select and t != track:
1075                 marker_selected = t.markers.find_frame(framenr, False)
1076                 for attr in self._attrs_track:
1077                     setattr(t, attr, getattr(track, attr))
1078                 for attr in self._attrs_marker:
1079                     setattr(marker_selected, attr, getattr(marker, attr))
1080
1081         return {'FINISHED'}
1082
1083
1084 classes = (
1085     CLIP_OT_bundles_to_mesh,
1086     CLIP_OT_constraint_to_fcurve,
1087     CLIP_OT_delete_proxy,
1088     CLIP_OT_filter_tracks,
1089     CLIP_OT_set_active_clip,
1090     CLIP_OT_set_viewport_background,
1091     CLIP_OT_setup_tracking_scene,
1092     CLIP_OT_track_settings_as_default,
1093     CLIP_OT_track_settings_to_track,
1094     CLIP_OT_track_to_empty,
1095 )