Eevee: Add dimension panel to eevee
[blender.git] / release / scripts / startup / bl_operators / clip.py
1 # ##### BEGIN GPL LICENSE BLOCK #####
2 #
3 #  This program is free software; you can redistribute it and/or
4 #  modify it under the terms of the GNU General Public License
5 #  as published by the Free Software Foundation; either version 2
6 #  of the License, or (at your option) any later version.
7 #
8 #  This program is distributed in the hope that it will be useful,
9 #  but WITHOUT ANY WARRANTY; without even the implied warranty of
10 #  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11 #  GNU General Public License for more details.
12 #
13 #  You should have received a copy of the GNU General Public License
14 #  along with this program; if not, write to the Free Software Foundation,
15 #  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
16 #
17 # ##### END GPL LICENSE BLOCK #####
18
19 # <pep8 compliant>
20 import bpy
21 import os
22 from bpy.types import Operator
23 from bpy.props import FloatProperty
24 from mathutils import (
25     Vector,
26     Matrix,
27 )
28
29
30 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
31     screens = bpy.data.screens if all_screens else [context.screen]
32
33     for screen in screens:
34         for area in screen.areas:
35             if area.type == tarea:
36                 for space in area.spaces:
37                     if space.type == tspace:
38                         callback(space, *args)
39
40
41 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
42     def set_background(space_v3d, clip, user):
43         bgpic = None
44
45         for x in space_v3d.background_images:
46             if x.source == 'MOVIE_CLIP':
47                 bgpic = x
48                 break
49
50         if not bgpic:
51             bgpic = space_v3d.background_images.new()
52
53         bgpic.source = 'MOVIE_CLIP'
54         bgpic.clip = clip
55         bgpic.clip_user.proxy_render_size = user.proxy_render_size
56         bgpic.clip_user.use_render_undistorted = True
57         bgpic.use_camera_clip = False
58         bgpic.view_axis = 'CAMERA'
59
60         space_v3d.show_background_images = True
61
62     CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
63                      set_background, clip, clip_user)
64
65
66 def CLIP_camera_for_clip(context, clip):
67     scene = context.scene
68
69     camera = scene.camera
70
71     for ob in scene.objects:
72         if ob.type == 'CAMERA':
73             for con in ob.constraints:
74                 if con.type == 'CAMERA_SOLVER':
75                     cur_clip = scene.active_clip if con.use_active_clip else con.clip
76
77                     if cur_clip == clip:
78                         return ob
79
80     return camera
81
82
83 def CLIP_track_view_selected(sc, track):
84     if track.select_anchor:
85         return True
86
87     if sc.show_marker_pattern and track.select_pattern:
88         return True
89
90     if sc.show_marker_search and track.select_search:
91         return True
92
93     return False
94
95
96 def CLIP_default_settings_from_track(clip, track, framenr):
97     settings = clip.tracking.settings
98
99     width = clip.size[0]
100     height = clip.size[1]
101
102     marker = track.markers.find_frame(framenr, False)
103     pattern_bb = marker.pattern_bound_box
104
105     pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
106     search = marker.search_max - marker.search_min
107
108     pattern[0] = pattern[0] * width
109     pattern[1] = pattern[1] * height
110
111     search[0] = search[0] * width
112     search[1] = search[1] * height
113
114     settings.default_correlation_min = track.correlation_min
115     settings.default_pattern_size = max(pattern[0], pattern[1])
116     settings.default_search_size = max(search[0], search[1])
117     settings.default_frames_limit = track.frames_limit
118     settings.default_pattern_match = track.pattern_match
119     settings.default_margin = track.margin
120     settings.default_motion_model = track.motion_model
121     settings.use_default_brute = track.use_brute
122     settings.use_default_normalization = track.use_normalization
123     settings.use_default_mask = track.use_mask
124     settings.use_default_red_channel = track.use_red_channel
125     settings.use_default_green_channel = track.use_green_channel
126     settings.use_default_blue_channel = track.use_blue_channel
127     settings.default_weight = track.weight
128
129
130 class CLIP_OT_filter_tracks(bpy.types.Operator):
131     """Filter tracks which has weirdly looking spikes in motion curves"""
132     bl_label = "Filter Tracks"
133     bl_idname = "clip.filter_tracks"
134     bl_options = {'UNDO', 'REGISTER'}
135
136     track_threshold = FloatProperty(
137             name="Track Threshold",
138             description="Filter Threshold to select problematic tracks",
139             default=5.0,
140             )
141
142     @staticmethod
143     def _filter_values(context, threshold):
144
145         def get_marker_coordinates_in_pixels(clip_size, track, frame_number):
146             marker = track.markers.find_frame(frame_number)
147             return Vector((marker.co[0] * clip_size[0], marker.co[1] * clip_size[1]))
148
149         def marker_velocity(clip_size, track, frame):
150             marker_a = get_marker_coordinates_in_pixels(clip_size, track, frame)
151             marker_b = get_marker_coordinates_in_pixels(clip_size, track, frame - 1)
152             return marker_a - marker_b
153
154         scene = context.scene
155         frame_start = scene.frame_start
156         frame_end = scene.frame_end
157         clip = context.space_data.clip
158         clip_size = clip.size[:]
159
160         bpy.ops.clip.clean_tracks(frames=10, action='DELETE_TRACK')
161
162         tracks_to_clean = set()
163
164         for frame in range(frame_start, frame_end + 1):
165
166             # Find tracks with markers in both this frame and the previous one.
167             relevant_tracks = [
168                     track for track in clip.tracking.tracks
169                     if (track.markers.find_frame(frame) and
170                         track.markers.find_frame(frame - 1))]
171
172             if not relevant_tracks:
173                 continue
174
175             # Get average velocity and deselect track.
176             average_velocity = Vector((0.0, 0.0))
177             for track in relevant_tracks:
178                 track.select = False
179                 average_velocity += marker_velocity(clip_size, track, frame)
180             if len(relevant_tracks) >= 1:
181                 average_velocity = average_velocity / len(relevant_tracks)
182
183             # Then find all markers that behave differently than the average.
184             for track in relevant_tracks:
185                 track_velocity = marker_velocity(clip_size, track, frame)
186                 distance = (average_velocity - track_velocity).length
187
188                 if distance > threshold:
189                     tracks_to_clean.add(track)
190
191         for track in tracks_to_clean:
192             track.select = True
193         return len(tracks_to_clean)
194
195     @classmethod
196     def poll(cls, context):
197         space = context.space_data
198         return (space.type == 'CLIP_EDITOR') and space.clip
199
200     def execute(self, context):
201         num_tracks = self._filter_values(context, self.track_threshold)
202         self.report({'INFO'}, "Identified %d problematic tracks" % num_tracks)
203         return {'FINISHED'}
204
205
206 class CLIP_OT_set_active_clip(bpy.types.Operator):
207     bl_label = "Set Active Clip"
208     bl_idname = "clip.set_active_clip"
209
210     @classmethod
211     def poll(cls, context):
212         space = context.space_data
213         return space.type == 'CLIP_EDITOR'
214
215     def execute(self, context):
216         clip = context.space_data.clip
217         scene = context.scene
218         scene.active_clip = clip
219         scene.render.resolution_x = clip.size[0]
220         scene.render.resolution_y = clip.size[1]
221         return {'FINISHED'}
222
223
224 class CLIP_OT_track_to_empty(Operator):
225     """Create an Empty object which will be copying movement of active track"""
226
227     bl_idname = "clip.track_to_empty"
228     bl_label = "Link Empty to Track"
229     bl_options = {'UNDO', 'REGISTER'}
230
231     @staticmethod
232     def _link_track(context, clip, tracking_object, track):
233         sc = context.space_data
234         constraint = None
235         ob = None
236
237         ob = bpy.data.objects.new(name=track.name, object_data=None)
238         ob.select_set(action='SELECT')
239         context.scene.objects.link(ob)
240         context.scene.objects.active = ob
241
242         for con in ob.constraints:
243             if con.type == 'FOLLOW_TRACK':
244                 constraint = con
245                 break
246
247         if constraint is None:
248             constraint = ob.constraints.new(type='FOLLOW_TRACK')
249
250         constraint.use_active_clip = False
251         constraint.clip = sc.clip
252         constraint.track = track.name
253         constraint.use_3d_position = False
254         constraint.object = tracking_object.name
255         constraint.camera = CLIP_camera_for_clip(context, clip)
256
257     def execute(self, context):
258         sc = context.space_data
259         clip = sc.clip
260         tracking_object = clip.tracking.objects.active
261
262         for track in tracking_object.tracks:
263             if CLIP_track_view_selected(sc, track):
264                 self._link_track(context, clip, tracking_object, track)
265
266         return {'FINISHED'}
267
268
269 class CLIP_OT_bundles_to_mesh(Operator):
270     """Create vertex cloud using coordinates of reconstructed tracks"""
271
272     bl_idname = "clip.bundles_to_mesh"
273     bl_label = "3D Markers to Mesh"
274     bl_options = {'UNDO', 'REGISTER'}
275
276     @classmethod
277     def poll(cls, context):
278         sc = context.space_data
279         return (sc.type == 'CLIP_EDITOR') and sc.clip
280
281     def execute(self, context):
282         from bpy_extras.io_utils import unpack_list
283
284         sc = context.space_data
285         clip = sc.clip
286         tracking_object = clip.tracking.objects.active
287
288         new_verts = []
289
290         scene = context.scene
291         camera = scene.camera
292         matrix = Matrix.Identity(4)
293         if camera:
294             reconstruction = tracking_object.reconstruction
295             framenr = scene.frame_current - clip.frame_start + 1
296             reconstructed_matrix = reconstruction.cameras.matrix_from_frame(framenr)
297             matrix = camera.matrix_world * reconstructed_matrix.inverted()
298
299         mesh = bpy.data.meshes.new(name="Tracks")
300         for track in tracking_object.tracks:
301             if track.has_bundle:
302                 new_verts.append(track.bundle)
303
304         if new_verts:
305             mesh.vertices.add(len(new_verts))
306             mesh.vertices.foreach_set("co", unpack_list(new_verts))
307
308         ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
309
310         ob.matrix_world = matrix
311
312         context.scene.objects.link(ob)
313
314         return {'FINISHED'}
315
316
317 class CLIP_OT_delete_proxy(Operator):
318     """Delete movie clip proxy files from the hard drive"""
319
320     bl_idname = "clip.delete_proxy"
321     bl_label = "Delete Proxy"
322     bl_options = {'REGISTER'}
323
324     @classmethod
325     def poll(cls, context):
326         if context.space_data.type != 'CLIP_EDITOR':
327             return False
328
329         sc = context.space_data
330
331         return sc.clip
332
333     def invoke(self, context, event):
334         wm = context.window_manager
335
336         return wm.invoke_confirm(self, event)
337
338     @staticmethod
339     def _rmproxy(abspath):
340         import shutil
341
342         if not os.path.exists(abspath):
343             return
344
345         if os.path.isdir(abspath):
346             shutil.rmtree(abspath)
347         else:
348             os.remove(abspath)
349
350     def execute(self, context):
351         sc = context.space_data
352         clip = sc.clip
353         if clip.use_proxy_custom_directory:
354             proxydir = clip.proxy.directory
355         else:
356             clipdir = os.path.dirname(clip.filepath)
357             proxydir = os.path.join(clipdir, "BL_proxy")
358
359         clipfile = os.path.basename(clip.filepath)
360         proxy = os.path.join(proxydir, clipfile)
361         absproxy = bpy.path.abspath(proxy)
362
363         # proxy_<quality>[_undostorted]
364         for x in (25, 50, 75, 100):
365             d = os.path.join(absproxy, "proxy_%d" % x)
366
367             self._rmproxy(d)
368             self._rmproxy(d + "_undistorted")
369             self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
370
371         tc = ("free_run.blen_tc",
372               "interp_free_run.blen_tc",
373               "record_run.blen_tc")
374
375         for x in tc:
376             self._rmproxy(os.path.join(absproxy, x))
377
378         # remove proxy per-clip directory
379         try:
380             os.rmdir(absproxy)
381         except OSError:
382             pass
383
384         # remove [custom] proxy directory if empty
385         try:
386             absdir = bpy.path.abspath(proxydir)
387             os.rmdir(absdir)
388         except OSError:
389             pass
390
391         return {'FINISHED'}
392
393
394 class CLIP_OT_set_viewport_background(Operator):
395     """Set current movie clip as a camera background in 3D view-port """ \
396     """(works only when a 3D view-port is visible)"""
397
398     bl_idname = "clip.set_viewport_background"
399     bl_label = "Set as Background"
400     bl_options = {'REGISTER'}
401
402     @classmethod
403     def poll(cls, context):
404         if context.space_data.type != 'CLIP_EDITOR':
405             return False
406
407         sc = context.space_data
408
409         return sc.clip
410
411     def execute(self, context):
412         sc = context.space_data
413         CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
414
415         return {'FINISHED'}
416
417
418 class CLIP_OT_constraint_to_fcurve(Operator):
419     """Create F-Curves for object which will copy \
420 object's movement caused by this constraint"""
421
422     bl_idname = "clip.constraint_to_fcurve"
423     bl_label = "Constraint to F-Curve"
424     bl_options = {'UNDO', 'REGISTER'}
425
426     def _bake_object(self, scene, ob):
427         con = None
428         clip = None
429         sfra = None
430         efra = None
431         frame_current = scene.frame_current
432         matrices = []
433
434         # Find constraint which would be converting
435         # TODO: several camera solvers and track followers would fail,
436         #       but can't think about real work-flow where it'll be useful
437         for x in ob.constraints:
438             if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
439                 con = x
440
441         if not con:
442             self.report({'ERROR'},
443                         "Motion Tracking constraint to be converted not found")
444
445             return {'CANCELLED'}
446
447         # Get clip used for parenting
448         if con.use_active_clip:
449             clip = scene.active_clip
450         else:
451             clip = con.clip
452
453         if not clip:
454             self.report({'ERROR'},
455                         "Movie clip to use tracking data from isn't set")
456
457             return {'CANCELLED'}
458
459         if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
460             mat = ob.matrix_world.copy()
461             ob.constraints.remove(con)
462             ob.matrix_world = mat
463
464             return {'FINISHED'}
465
466         # Find start and end frames
467         for track in clip.tracking.tracks:
468             if sfra is None:
469                 sfra = track.markers[0].frame
470             else:
471                 sfra = min(sfra, track.markers[0].frame)
472
473             if efra is None:
474                 efra = track.markers[-1].frame
475             else:
476                 efra = max(efra, track.markers[-1].frame)
477
478         if sfra is None or efra is None:
479             return
480
481         # Store object matrices
482         for x in range(sfra, efra + 1):
483             scene.frame_set(x)
484             matrices.append(ob.matrix_world.copy())
485
486         ob.animation_data_create()
487
488         # Apply matrices on object and insert key-frames
489         i = 0
490         for x in range(sfra, efra + 1):
491             scene.frame_set(x)
492             ob.matrix_world = matrices[i]
493
494             ob.keyframe_insert("location")
495
496             if ob.rotation_mode == 'QUATERNION':
497                 ob.keyframe_insert("rotation_quaternion")
498             else:
499                 ob.keyframe_insert("rotation_euler")
500
501             i += 1
502
503         ob.constraints.remove(con)
504
505         scene.frame_set(frame_current)
506
507     def execute(self, context):
508         scene = context.scene
509         # XXX, should probably use context.selected_editable_objects
510         # since selected objects can be from a lib or in hidden layer!
511         for ob in scene.objects:
512             if ob.select_set(action='SELECT'):
513                 self._bake_object(scene, ob)
514
515         return {'FINISHED'}
516
517
518 class CLIP_OT_setup_tracking_scene(Operator):
519     """Prepare scene for compositing 3D objects into this footage"""
520
521     bl_idname = "clip.setup_tracking_scene"
522     bl_label = "Setup Tracking Scene"
523     bl_options = {'UNDO', 'REGISTER'}
524
525     @classmethod
526     def poll(cls, context):
527         sc = context.space_data
528
529         if sc.type != 'CLIP_EDITOR':
530             return False
531
532         clip = sc.clip
533
534         return clip and clip.tracking.reconstruction.is_valid
535
536     @staticmethod
537     def _setupScene(context):
538         scene = context.scene
539         scene.active_clip = context.space_data.clip
540
541     @staticmethod
542     def _setupWorld(context):
543         scene = context.scene
544         world = scene.world
545
546         if not world:
547             world = bpy.data.worlds.new(name="World")
548             scene.world = world
549
550         world.light_settings.use_ambient_occlusion = True
551         world.light_settings.ao_blend_type = 'MULTIPLY'
552
553         world.light_settings.use_environment_light = True
554         world.light_settings.environment_energy = 0.1
555
556         world.light_settings.distance = 1.0
557         world.light_settings.sample_method = 'ADAPTIVE_QMC'
558         world.light_settings.samples = 7
559         world.light_settings.threshold = 0.005
560         if hasattr(scene, "cycles"):
561             world.light_settings.ao_factor = 0.05
562
563     @staticmethod
564     def _findOrCreateCamera(context):
565         scene = context.scene
566
567         if scene.camera:
568             return scene.camera
569
570         cam = bpy.data.cameras.new(name="Camera")
571         camob = bpy.data.objects.new(name="Camera", object_data=cam)
572         scene.objects.link(camob)
573
574         scene.camera = camob
575
576         camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
577                               Matrix.Rotation(0.815, 4, 'Z') *
578                               Matrix.Rotation(0.011, 4, 'Y') *
579                               Matrix.Rotation(1.109, 4, 'X'))
580
581         return camob
582
583     @staticmethod
584     def _setupCamera(context):
585         sc = context.space_data
586         clip = sc.clip
587         tracking = clip.tracking
588
589         camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
590         cam = camob.data
591
592         # Remove all constraints to be sure motion is fine
593         camob.constraints.clear()
594
595         # Append camera solver constraint
596         con = camob.constraints.new(type='CAMERA_SOLVER')
597         con.use_active_clip = True
598         con.influence = 1.0
599
600         cam.sensor_width = tracking.camera.sensor_width
601         cam.lens = tracking.camera.focal_length
602
603     @staticmethod
604     def _setupViewport(context):
605         sc = context.space_data
606         CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
607
608     @staticmethod
609     def _setupRenderLayers(context):
610         scene = context.scene
611         rlayers = scene.render.layers
612
613         if not scene.render.layers.get("Foreground"):
614             if len(rlayers) == 1:
615                 fg = rlayers[0]
616                 fg.name = 'Foreground'
617             else:
618                 fg = scene.render.layers.new("Foreground")
619
620             fg.use_sky = True
621             fg.layers = [True] + [False] * 19
622             fg.layers_zmask = [False] * 10 + [True] + [False] * 9
623             fg.use_pass_vector = True
624
625         if not scene.render.layers.get("Background"):
626             bg = scene.render.layers.new("Background")
627             bg.use_pass_shadow = True
628             bg.use_pass_ambient_occlusion = True
629             bg.layers = [False] * 10 + [True] + [False] * 9
630
631     @staticmethod
632     def _wipeDefaultNodes(tree):
633         if len(tree.nodes) != 2:
634             return False
635         types = [node.type for node in tree.nodes]
636         types.sort()
637
638         if types[0] == 'COMPOSITE' and types[1] == 'R_LAYERS':
639             while tree.nodes:
640                 tree.nodes.remove(tree.nodes[0])
641
642     @staticmethod
643     def _findNode(tree, type):
644         for node in tree.nodes:
645             if node.type == type:
646                 return node
647
648         return None
649
650     @staticmethod
651     def _findOrCreateNode(tree, type):
652         node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
653
654         if not node:
655             node = tree.nodes.new(type=type)
656
657         return node
658
659     @staticmethod
660     def _needSetupNodes(context):
661         scene = context.scene
662         tree = scene.node_tree
663
664         if not tree:
665             # No compositor node tree found, time to create it!
666             return True
667
668         for node in tree.nodes:
669             if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
670                 return False
671
672         return True
673
674     @staticmethod
675     def _offsetNodes(tree):
676         for a in tree.nodes:
677             for b in tree.nodes:
678                 if a != b and a.location == b.location:
679                     b.location += Vector((40.0, 20.0))
680
681     def _setupNodes(self, context):
682         if not self._needSetupNodes(context):
683             # compositor nodes were already setup or even changes already
684             # do nothing to prevent nodes damage
685             return
686
687         # Enable backdrop for all compositor spaces
688         def setup_space(space):
689             space.show_backdrop = True
690
691         CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
692                          setup_space)
693
694         sc = context.space_data
695         scene = context.scene
696         scene.use_nodes = True
697         tree = scene.node_tree
698         clip = sc.clip
699
700         need_stabilization = False
701
702         # Remove all the nodes if they came from default node setup.
703         # This is simplest way to make it so final node setup is
704         # is correct.
705         self._wipeDefaultNodes(tree)
706
707         # create nodes
708         rlayer_fg = self._findOrCreateNode(tree, 'CompositorNodeRLayers')
709         rlayer_bg = tree.nodes.new(type='CompositorNodeRLayers')
710         composite = self._findOrCreateNode(tree, 'CompositorNodeComposite')
711
712         movieclip = tree.nodes.new(type='CompositorNodeMovieClip')
713         distortion = tree.nodes.new(type='CompositorNodeMovieDistortion')
714
715         if need_stabilization:
716             stabilize = tree.nodes.new(type='CompositorNodeStabilize2D')
717
718         scale = tree.nodes.new(type='CompositorNodeScale')
719         invert = tree.nodes.new(type='CompositorNodeInvert')
720         add_ao = tree.nodes.new(type='CompositorNodeMixRGB')
721         add_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
722         mul_shadow = tree.nodes.new(type='CompositorNodeMixRGB')
723         mul_image = tree.nodes.new(type='CompositorNodeMixRGB')
724         vector_blur = tree.nodes.new(type='CompositorNodeVecBlur')
725         alphaover = tree.nodes.new(type='CompositorNodeAlphaOver')
726         viewer = tree.nodes.new(type='CompositorNodeViewer')
727
728         # setup nodes
729         movieclip.clip = clip
730
731         distortion.clip = clip
732         distortion.distortion_type = 'UNDISTORT'
733
734         if need_stabilization:
735             stabilize.clip = clip
736
737         scale.space = 'RENDER_SIZE'
738
739         rlayer_bg.scene = scene
740         rlayer_bg.layer = "Background"
741
742         rlayer_fg.scene = scene
743         rlayer_fg.layer = "Foreground"
744
745         add_ao.blend_type = 'ADD'
746         add_ao.show_preview = False
747         add_shadow.blend_type = 'ADD'
748         add_shadow.show_preview = False
749
750         mul_shadow.blend_type = 'MULTIPLY'
751         mul_shadow.inputs["Fac"].default_value = 0.8
752         mul_shadow.show_preview = False
753
754         mul_image.blend_type = 'MULTIPLY'
755         mul_image.inputs["Fac"].default_value = 0.8
756         mul_image.show_preview = False
757
758         vector_blur.factor = 0.75
759
760         # create links
761         tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
762
763         if need_stabilization:
764             tree.links.new(distortion.outputs["Image"],
765                            stabilize.inputs["Image"])
766             tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
767         else:
768             tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
769
770         tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
771
772         tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
773         tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
774
775         tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
776         tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
777
778         tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
779         tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
780
781         tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
782         tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
783
784         tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
785         tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"])
786         tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"])
787
788         tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
789         tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
790
791         tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
792         tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
793
794         # place nodes
795         movieclip.location = Vector((-300.0, 350.0))
796
797         distortion.location = movieclip.location
798         distortion.location += Vector((200.0, 0.0))
799
800         if need_stabilization:
801             stabilize.location = distortion.location
802             stabilize.location += Vector((200.0, 0.0))
803
804             scale.location = stabilize.location
805             scale.location += Vector((200.0, 0.0))
806         else:
807             scale.location = distortion.location
808             scale.location += Vector((200.0, 0.0))
809
810         rlayer_bg.location = movieclip.location
811         rlayer_bg.location -= Vector((0.0, 350.0))
812
813         invert.location = rlayer_bg.location
814         invert.location += Vector((250.0, 50.0))
815
816         add_ao.location = invert.location
817         add_ao.location[0] += 200
818         add_ao.location[1] = rlayer_bg.location[1]
819
820         add_shadow.location = add_ao.location
821         add_shadow.location -= Vector((0.0, 250.0))
822
823         mul_shadow.location = add_ao.location
824         mul_shadow.location += Vector((200.0, -50.0))
825
826         mul_image.location = mul_shadow.location
827         mul_image.location += Vector((300.0, 200.0))
828
829         rlayer_fg.location = rlayer_bg.location
830         rlayer_fg.location -= Vector((0.0, 500.0))
831
832         vector_blur.location[0] = mul_image.location[0]
833         vector_blur.location[1] = rlayer_fg.location[1]
834
835         alphaover.location[0] = vector_blur.location[0] + 350
836         alphaover.location[1] = \
837             (vector_blur.location[1] + mul_image.location[1]) / 2
838
839         composite.location = alphaover.location
840         composite.location += Vector((200.0, -100.0))
841
842         viewer.location = composite.location
843         composite.location += Vector((0.0, 200.0))
844
845         # ensure no nodes were creates on position of existing node
846         self._offsetNodes(tree)
847
848         scene.render.alpha_mode = 'TRANSPARENT'
849         if hasattr(scene, "cycles"):
850             scene.cycles.film_transparent = True
851
852     @staticmethod
853     def _createMesh(scene, name, vertices, faces):
854         from bpy_extras.io_utils import unpack_list
855
856         mesh = bpy.data.meshes.new(name=name)
857
858         mesh.vertices.add(len(vertices))
859         mesh.vertices.foreach_set("co", unpack_list(vertices))
860
861         nbr_loops = len(faces)
862         nbr_polys = nbr_loops // 4
863         mesh.loops.add(nbr_loops)
864         mesh.polygons.add(nbr_polys)
865
866         mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
867         mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
868         mesh.loops.foreach_set("vertex_index", faces)
869
870         mesh.update()
871
872         ob = bpy.data.objects.new(name=name, object_data=mesh)
873
874         scene.objects.link(ob)
875
876         return ob
877
878     @staticmethod
879     def _getPlaneVertices(half_size, z):
880
881         return [(-half_size, -half_size, z),
882                 (half_size, -half_size, z),
883                 (half_size, half_size, z),
884                 (-half_size, half_size, z)]
885
886     def _createGround(self, scene):
887         vertices = self._getPlaneVertices(4.0, 0.0)
888         faces = [0, 1, 2, 3]
889
890         ob = self._createMesh(scene, "Ground", vertices, faces)
891         ob["is_ground"] = True
892
893         return ob
894
895     @staticmethod
896     def _findGround(context):
897         scene = context.scene
898
899         for ob in scene.objects:
900             if ob.type == 'MESH' and "is_ground" in ob:
901                 return ob
902
903         return None
904
905     @staticmethod
906     def _mergeLayers(layers_a, layers_b):
907
908         return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
909
910     @staticmethod
911     def _createLamp(scene):
912         lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
913         lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
914         scene.objects.link(lampob)
915
916         lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
917
918         lamp.distance = 30
919         lamp.shadow_method = 'RAY_SHADOW'
920
921         return lampob
922
923     def _createSampleObject(self, scene):
924         vertices = self._getPlaneVertices(1.0, -1.0) + \
925             self._getPlaneVertices(1.0, 1.0)
926         faces = (0, 1, 2, 3,
927                  4, 7, 6, 5,
928                  0, 4, 5, 1,
929                  1, 5, 6, 2,
930                  2, 6, 7, 3,
931                  3, 7, 4, 0)
932
933         return self._createMesh(scene, "Cube", vertices, faces)
934
935     def _setupObjects(self, context):
936         scene = context.scene
937
938         fg = scene.render.layers.get("Foreground")
939         bg = scene.render.layers.get("Background")
940
941         all_layers = self._mergeLayers(fg.layers, bg.layers)
942
943         # ensure all lamps are active on foreground and background
944         has_lamp = False
945         has_mesh = False
946         for ob in scene.objects:
947             if ob.type == 'LAMP':
948                 ob.layers = all_layers
949                 has_lamp = True
950             elif ob.type == 'MESH' and "is_ground" not in ob:
951                 has_mesh = True
952
953         # create sample lamp if there's no lamps in the scene
954         if not has_lamp:
955             lamp = self._createLamp(scene)
956             lamp.layers = all_layers
957
958         # create sample object if there's no meshes in the scene
959         if not has_mesh:
960             ob = self._createSampleObject(scene)
961             ob.layers = fg.layers
962
963         # create ground object if needed
964         ground = self._findGround(context)
965         if not ground:
966             ground = self._createGround(scene)
967             ground.layers = bg.layers
968         else:
969             # make sure ground is available on Background layer
970             ground.layers = self._mergeLayers(ground.layers, bg.layers)
971
972         # layers with background and foreground should be rendered
973         scene.layers = self._mergeLayers(scene.layers, all_layers)
974
975     def execute(self, context):
976         scene = context.scene
977         current_active_layer = scene.active_layer
978
979         self._setupScene(context)
980         self._setupWorld(context)
981         self._setupCamera(context)
982         self._setupViewport(context)
983         self._setupRenderLayers(context)
984         self._setupNodes(context)
985         self._setupObjects(context)
986
987         # Active layer has probably changed, set it back to the original value.
988         # NOTE: The active layer is always true.
989         scene.layers[current_active_layer] = True
990
991         return {'FINISHED'}
992
993
994 class CLIP_OT_track_settings_as_default(Operator):
995     """Copy tracking settings from active track to default settings"""
996
997     bl_idname = "clip.track_settings_as_default"
998     bl_label = "Track Settings As Default"
999     bl_options = {'UNDO', 'REGISTER'}
1000
1001     @classmethod
1002     def poll(cls, context):
1003         sc = context.space_data
1004
1005         if sc.type != 'CLIP_EDITOR':
1006             return False
1007
1008         clip = sc.clip
1009
1010         return clip and clip.tracking.tracks.active
1011
1012     def execute(self, context):
1013         sc = context.space_data
1014         clip = sc.clip
1015
1016         track = clip.tracking.tracks.active
1017         framenr = context.scene.frame_current - clip.frame_start + 1
1018
1019         CLIP_default_settings_from_track(clip, track, framenr)
1020
1021         return {'FINISHED'}
1022
1023
1024 class CLIP_OT_track_settings_to_track(bpy.types.Operator):
1025     """Copy tracking settings from active track to selected tracks"""
1026
1027     bl_label = "Copy Track Settings"
1028     bl_idname = "clip.track_settings_to_track"
1029     bl_options = {'UNDO', 'REGISTER'}
1030
1031     _attrs_track = (
1032         "correlation_min",
1033         "frames_limit",
1034         "pattern_match",
1035         "margin",
1036         "motion_model",
1037         "use_brute",
1038         "use_normalization",
1039         "use_mask",
1040         "use_red_channel",
1041         "use_green_channel",
1042         "use_blue_channel",
1043         "weight"
1044         )
1045
1046     _attrs_marker = (
1047         "pattern_corners",
1048         "search_min",
1049         "search_max",
1050         )
1051
1052     @classmethod
1053     def poll(cls, context):
1054         space = context.space_data
1055         if space.type != 'CLIP_EDITOR':
1056             return False
1057         clip = space.clip
1058         return clip and clip.tracking.tracks.active
1059
1060     def execute(self, context):
1061         space = context.space_data
1062         clip = space.clip
1063         track = clip.tracking.tracks.active
1064
1065         framenr = context.scene.frame_current - clip.frame_start + 1
1066         marker = track.markers.find_frame(framenr, False)
1067
1068         for t in clip.tracking.tracks:
1069             if t.select and t != track:
1070                 marker_selected = t.markers.find_frame(framenr, False)
1071                 for attr in self._attrs_track:
1072                     setattr(t, attr, getattr(track, attr))
1073                 for attr in self._attrs_marker:
1074                     setattr(marker_selected, attr, getattr(marker, attr))
1075
1076         return {'FINISHED'}
1077
1078
1079 classes = (
1080     CLIP_OT_bundles_to_mesh,
1081     CLIP_OT_constraint_to_fcurve,
1082     CLIP_OT_delete_proxy,
1083     CLIP_OT_filter_tracks,
1084     CLIP_OT_set_active_clip,
1085     CLIP_OT_set_viewport_background,
1086     CLIP_OT_setup_tracking_scene,
1087     CLIP_OT_track_settings_as_default,
1088     CLIP_OT_track_settings_to_track,
1089     CLIP_OT_track_to_empty,
1090 )