1 # ##### BEGIN GPL LICENSE BLOCK #####
3 # This program is free software; you can redistribute it and/or
4 # modify it under the terms of the GNU General Public License
5 # as published by the Free Software Foundation; either version 2
6 # of the License, or (at your option) any later version.
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software Foundation,
15 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 # ##### END GPL LICENSE BLOCK #####
22 from bpy.types import Operator
24 from mathutils import Vector, Matrix
27 def CLIP_spaces_walk(context, all_screens, tarea, tspace, callback, *args):
28 screens = bpy.data.screens if all_screens else [context.screen]
30 for screen in screens:
31 for area in screen.areas:
32 if area.type == tarea:
33 for space in area.spaces:
34 if space.type == tspace:
35 callback(space, *args)
38 def CLIP_set_viewport_background(context, all_screens, clip, clip_user):
39 def set_background(space_v3d, clip, user):
42 for x in space_v3d.background_images:
43 if x.source == 'MOVIE_CLIP':
48 bgpic = space_v3d.background_images.new()
50 bgpic.source = 'MOVIE_CLIP'
52 bgpic.clip_user.proxy_render_size = user.proxy_render_size
53 bgpic.clip_user.use_render_undistorted = True
54 bgpic.use_camera_clip = False
55 bgpic.view_axis = 'CAMERA'
57 space_v3d.show_background_images = True
59 CLIP_spaces_walk(context, all_screens, 'VIEW_3D', 'VIEW_3D',
60 set_background, clip, clip_user)
63 def CLIP_camera_for_clip(context, clip):
68 for ob in scene.objects:
69 if ob.type == 'CAMERA':
70 for con in ob.constraints:
71 if con.type == 'CAMERA_SOLVER':
72 cur_clip = scene.active_clip if con.use_active_clip else con.clip
80 def CLIP_track_view_selected(sc, track):
81 if track.select_anchor:
84 if sc.show_marker_pattern and track.select_pattern:
87 if sc.show_marker_search and track.select_search:
93 def CLIP_default_settings_from_track(clip, track, framenr):
94 settings = clip.tracking.settings
99 marker = track.markers.find_frame(framenr, False)
100 pattern_bb = marker.pattern_bound_box
102 pattern = Vector(pattern_bb[1]) - Vector(pattern_bb[0])
103 search = marker.search_max - marker.search_min
105 pattern[0] = pattern[0] * width
106 pattern[1] = pattern[1] * height
108 search[0] = search[0] * width
109 search[1] = search[1] * height
111 settings.default_correlation_min = track.correlation_min
112 settings.default_pattern_size = max(pattern[0], pattern[1])
113 settings.default_search_size = max(search[0], search[1])
114 settings.default_frames_limit = track.frames_limit
115 settings.default_pattern_match = track.pattern_match
116 settings.default_margin = track.margin
117 settings.default_motion_model = track.motion_model
118 settings.use_default_brute = track.use_brute
119 settings.use_default_normalization = track.use_normalization
120 settings.use_default_mask = track.use_mask
121 settings.use_default_red_channel = track.use_red_channel
122 settings.use_default_green_channel = track.use_green_channel
123 settings.use_default_blue_channel = track.use_blue_channel
126 class CLIP_OT_track_to_empty(Operator):
127 """Create an Empty object which will be copying movement of active track"""
129 bl_idname = "clip.track_to_empty"
130 bl_label = "Link Empty to Track"
131 bl_options = {'UNDO', 'REGISTER'}
133 def _link_track(self, context, clip, tracking_object, track):
134 sc = context.space_data
138 ob = bpy.data.objects.new(name=track.name, object_data=None)
140 context.scene.objects.link(ob)
141 context.scene.objects.active = ob
143 for con in ob.constraints:
144 if con.type == 'FOLLOW_TRACK':
148 if constraint is None:
149 constraint = ob.constraints.new(type='FOLLOW_TRACK')
151 constraint.use_active_clip = False
152 constraint.clip = sc.clip
153 constraint.track = track.name
154 constraint.use_3d_position = False
155 constraint.object = tracking_object.name
156 constraint.camera = CLIP_camera_for_clip(context, clip)
158 def execute(self, context):
159 sc = context.space_data
161 tracking_object = clip.tracking.objects.active
163 for track in tracking_object.tracks:
164 if CLIP_track_view_selected(sc, track):
165 self._link_track(context, clip, tracking_object, track)
170 class CLIP_OT_bundles_to_mesh(Operator):
171 """Create vertex cloud using coordinates of reconstructed tracks"""
173 bl_idname = "clip.bundles_to_mesh"
174 bl_label = "3D Markers to Mesh"
175 bl_options = {'UNDO', 'REGISTER'}
178 def poll(cls, context):
179 sc = context.space_data
180 return (sc.type == 'CLIP_EDITOR') and sc.clip
182 def execute(self, context):
183 from bpy_extras.io_utils import unpack_list
185 sc = context.space_data
187 tracking_object = clip.tracking.objects.active
191 mesh = bpy.data.meshes.new(name="Tracks")
192 for track in tracking_object.tracks:
194 new_verts.append(track.bundle)
197 mesh.vertices.add(len(new_verts))
198 mesh.vertices.foreach_set("co", unpack_list(new_verts))
200 ob = bpy.data.objects.new(name="Tracks", object_data=mesh)
202 context.scene.objects.link(ob)
207 class CLIP_OT_delete_proxy(Operator):
208 """Delete movie clip proxy files from the hard drive"""
210 bl_idname = "clip.delete_proxy"
211 bl_label = "Delete Proxy"
212 bl_options = {'REGISTER'}
215 def poll(cls, context):
216 if context.space_data.type != 'CLIP_EDITOR':
219 sc = context.space_data
223 def invoke(self, context, event):
224 wm = context.window_manager
226 return wm.invoke_confirm(self, event)
228 def _rmproxy(self, abspath):
231 if not os.path.exists(abspath):
234 if os.path.isdir(abspath):
235 shutil.rmtree(abspath)
239 def execute(self, context):
240 sc = context.space_data
242 if clip.use_proxy_custom_directory:
243 proxydir = clip.proxy.directory
245 clipdir = os.path.dirname(clip.filepath)
246 proxydir = os.path.join(clipdir, "BL_proxy")
248 clipfile = os.path.basename(clip.filepath)
249 proxy = os.path.join(proxydir, clipfile)
250 absproxy = bpy.path.abspath(proxy)
252 # proxy_<quality>[_undostorted]
253 for x in (25, 50, 75, 100):
254 d = os.path.join(absproxy, "proxy_%d" % x)
257 self._rmproxy(d + "_undistorted")
258 self._rmproxy(os.path.join(absproxy, "proxy_%d.avi" % x))
260 tc = ("free_run.blen_tc",
261 "interp_free_run.blen_tc",
262 "record_run.blen_tc")
265 self._rmproxy(os.path.join(absproxy, x))
267 # remove proxy per-clip directory
273 # remove [custom] proxy directory if empty
275 absdir = bpy.path.abspath(proxydir)
283 class CLIP_OT_set_viewport_background(Operator):
284 """Set current movie clip as a camera background in 3D view-port """ \
285 """(works only when a 3D view-port is visible)"""
287 bl_idname = "clip.set_viewport_background"
288 bl_label = "Set as Background"
289 bl_options = {'REGISTER'}
292 def poll(cls, context):
293 if context.space_data.type != 'CLIP_EDITOR':
296 sc = context.space_data
300 def execute(self, context):
301 sc = context.space_data
302 CLIP_set_viewport_background(context, False, sc.clip, sc.clip_user)
307 class CLIP_OT_constraint_to_fcurve(Operator):
308 """Create F-Curves for object which will copy \
309 object's movement caused by this constraint"""
311 bl_idname = "clip.constraint_to_fcurve"
312 bl_label = "Constraint to F-Curve"
313 bl_options = {'UNDO', 'REGISTER'}
315 def _bake_object(self, scene, ob):
320 frame_current = scene.frame_current
323 # Find constraint which would be converting
324 # TODO: several camera solvers and track followers would fail,
325 # but can't think about real work-flow where it'll be useful
326 for x in ob.constraints:
327 if x.type in {'CAMERA_SOLVER', 'FOLLOW_TRACK', 'OBJECT_SOLVER'}:
331 self.report({'ERROR'},
332 "Motion Tracking constraint to be converted not found")
336 # Get clip used for parenting
337 if con.use_active_clip:
338 clip = scene.active_clip
343 self.report({'ERROR'},
344 "Movie clip to use tracking data from isn't set")
348 if con.type == 'FOLLOW_TRACK' and con.use_3d_position:
349 mat = ob.matrix_world.copy()
350 ob.constraints.remove(con)
351 ob.matrix_world = mat
355 # Find start and end frames
356 for track in clip.tracking.tracks:
358 sfra = track.markers[0].frame
360 sfra = min(sfra, track.markers[0].frame)
363 efra = track.markers[-1].frame
365 efra = max(efra, track.markers[-1].frame)
367 if sfra is None or efra is None:
370 # Store object matrices
371 for x in range(sfra, efra + 1):
373 matrices.append(ob.matrix_world.copy())
375 ob.animation_data_create()
377 # Apply matrices on object and insert key-frames
379 for x in range(sfra, efra + 1):
381 ob.matrix_world = matrices[i]
383 ob.keyframe_insert("location")
385 if ob.rotation_mode == 'QUATERNION':
386 ob.keyframe_insert("rotation_quaternion")
388 ob.keyframe_insert("rotation_euler")
392 ob.constraints.remove(con)
394 scene.frame_set(frame_current)
396 def execute(self, context):
397 scene = context.scene
398 # XXX, should probably use context.selected_editable_objects
399 # since selected objects can be from a lib or in hidden layer!
400 for ob in scene.objects:
402 self._bake_object(scene, ob)
407 class CLIP_OT_setup_tracking_scene(Operator):
408 """Prepare scene for compositing 3D objects into this footage"""
410 bl_idname = "clip.setup_tracking_scene"
411 bl_label = "Setup Tracking Scene"
412 bl_options = {'UNDO', 'REGISTER'}
415 def poll(cls, context):
416 sc = context.space_data
418 if sc.type != 'CLIP_EDITOR':
423 return clip and clip.tracking.reconstruction.is_valid
426 def _setupScene(context):
427 scene = context.scene
428 scene.active_clip = context.space_data.clip
431 def _setupWorld(context):
432 scene = context.scene
436 world = bpy.data.worlds.new(name="World")
439 world.light_settings.use_ambient_occlusion = True
440 world.light_settings.ao_blend_type = 'MULTIPLY'
442 world.light_settings.use_environment_light = True
443 world.light_settings.environment_energy = 0.1
445 world.light_settings.distance = 1.0
446 world.light_settings.sample_method = 'ADAPTIVE_QMC'
447 world.light_settings.samples = 7
448 world.light_settings.threshold = 0.005
451 def _findOrCreateCamera(context):
452 scene = context.scene
457 cam = bpy.data.cameras.new(name="Camera")
458 camob = bpy.data.objects.new(name="Camera", object_data=cam)
459 scene.objects.link(camob)
463 camob.matrix_local = (Matrix.Translation((7.481, -6.508, 5.344)) *
464 Matrix.Rotation(0.815, 4, 'Z') *
465 Matrix.Rotation(0.011, 4, 'Y') *
466 Matrix.Rotation(1.109, 4, 'X'))
471 def _setupCamera(context):
472 sc = context.space_data
474 tracking = clip.tracking
476 camob = CLIP_OT_setup_tracking_scene._findOrCreateCamera(context)
479 # Remove all constraints to be sure motion is fine
480 camob.constraints.clear()
482 # Append camera solver constraint
483 con = camob.constraints.new(type='CAMERA_SOLVER')
484 con.use_active_clip = True
487 cam.sensor_width = tracking.camera.sensor_width
488 cam.lens = tracking.camera.focal_length
491 def _setupViewport(context):
492 sc = context.space_data
493 CLIP_set_viewport_background(context, True, sc.clip, sc.clip_user)
496 def _setupRenderLayers(context):
497 scene = context.scene
498 rlayers = scene.render.layers
500 if not scene.render.layers.get("Foreground"):
501 if len(rlayers) == 1:
503 fg.name = 'Foreground'
505 fg = scene.render.layers.new("Foreground")
508 fg.layers = [True] + [False] * 19
509 fg.layers_zmask = [False] * 10 + [True] + [False] * 9
510 fg.use_pass_vector = True
512 if not scene.render.layers.get("Background"):
513 bg = scene.render.layers.new("Background")
514 bg.use_pass_shadow = True
515 bg.use_pass_ambient_occlusion = True
516 bg.layers = [False] * 10 + [True] + [False] * 9
519 def _findNode(tree, type):
520 for node in tree.nodes:
521 if node.type == type:
527 def _findOrCreateNode(tree, type):
528 node = CLIP_OT_setup_tracking_scene._findNode(tree, type)
531 node = tree.nodes.new(type=type)
536 def _needSetupNodes(context):
537 scene = context.scene
538 tree = scene.node_tree
541 # No compositor node tree found, time to create it!
544 for node in tree.nodes:
545 if node.type in {'MOVIECLIP', 'MOVIEDISTORTION'}:
551 def _offsetNodes(tree):
554 if a != b and a.location == b.location:
555 b.location += Vector((40.0, 20.0))
557 def _setupNodes(self, context):
558 if not self._needSetupNodes(context):
559 # compositor nodes were already setup or even changes already
560 # do nothing to prevent nodes damage
563 # Enable backdrop for all compositor spaces
564 def setup_space(space):
565 space.show_backdrop = True
567 CLIP_spaces_walk(context, True, 'NODE_EDITOR', 'NODE_EDITOR',
570 sc = context.space_data
571 scene = context.scene
572 scene.use_nodes = True
573 tree = scene.node_tree
576 need_stabilization = False
579 rlayer_fg = self._findOrCreateNode(tree, 'R_LAYERS')
580 rlayer_bg = tree.nodes.new(type='R_LAYERS')
581 composite = self._findOrCreateNode(tree, 'COMPOSITE')
583 movieclip = tree.nodes.new(type='MOVIECLIP')
584 distortion = tree.nodes.new(type='MOVIEDISTORTION')
586 if need_stabilization:
587 stabilize = tree.nodes.new(type='STABILIZE2D')
589 scale = tree.nodes.new(type='SCALE')
590 invert = tree.nodes.new(type='INVERT')
591 add_ao = tree.nodes.new(type='MIX_RGB')
592 add_shadow = tree.nodes.new(type='MIX_RGB')
593 mul_shadow = tree.nodes.new(type='MIX_RGB')
594 mul_image = tree.nodes.new(type='MIX_RGB')
595 vector_blur = tree.nodes.new(type='VECBLUR')
596 alphaover = tree.nodes.new(type='ALPHAOVER')
597 viewer = tree.nodes.new(type='VIEWER')
600 movieclip.clip = clip
602 distortion.clip = clip
603 distortion.distortion_type = 'UNDISTORT'
605 if need_stabilization:
606 stabilize.clip = clip
608 scale.space = 'RENDER_SIZE'
610 rlayer_bg.scene = scene
611 rlayer_bg.layer = "Background"
613 rlayer_fg.scene = scene
614 rlayer_fg.layer = "Foreground"
616 add_ao.blend_type = 'ADD'
617 add_shadow.blend_type = 'ADD'
619 mul_shadow.blend_type = 'MULTIPLY'
620 mul_shadow.inputs["Fac"].default_value = 0.8
622 mul_image.blend_type = 'MULTIPLY'
623 mul_image.inputs["Fac"].default_value = 0.8
625 vector_blur.factor = 0.75
628 tree.links.new(movieclip.outputs["Image"], distortion.inputs["Image"])
630 if need_stabilization:
631 tree.links.new(distortion.outputs["Image"],
632 stabilize.inputs["Image"])
633 tree.links.new(stabilize.outputs["Image"], scale.inputs["Image"])
635 tree.links.new(distortion.outputs["Image"], scale.inputs["Image"])
637 tree.links.new(rlayer_bg.outputs["Alpha"], invert.inputs["Color"])
639 tree.links.new(invert.outputs["Color"], add_shadow.inputs[1])
640 tree.links.new(rlayer_bg.outputs["Shadow"], add_shadow.inputs[2])
642 tree.links.new(invert.outputs["Color"], add_ao.inputs[1])
643 tree.links.new(rlayer_bg.outputs["AO"], add_ao.inputs[2])
645 tree.links.new(add_ao.outputs["Image"], mul_shadow.inputs[1])
646 tree.links.new(add_shadow.outputs["Image"], mul_shadow.inputs[2])
648 tree.links.new(scale.outputs["Image"], mul_image.inputs[1])
649 tree.links.new(mul_shadow.outputs["Image"], mul_image.inputs[2])
651 tree.links.new(rlayer_fg.outputs["Image"], vector_blur.inputs["Image"])
652 tree.links.new(rlayer_fg.outputs["Z"], vector_blur.inputs["Z"])
653 tree.links.new(rlayer_fg.outputs["Speed"], vector_blur.inputs["Speed"])
655 tree.links.new(mul_image.outputs["Image"], alphaover.inputs[1])
656 tree.links.new(vector_blur.outputs["Image"], alphaover.inputs[2])
658 tree.links.new(alphaover.outputs["Image"], composite.inputs["Image"])
659 tree.links.new(alphaover.outputs["Image"], viewer.inputs["Image"])
662 movieclip.location = Vector((-300.0, 350.0))
664 distortion.location = movieclip.location
665 distortion.location += Vector((200.0, 0.0))
667 if need_stabilization:
668 stabilize.location = distortion.location
669 stabilize.location += Vector((200.0, 0.0))
671 scale.location = stabilize.location
672 scale.location += Vector((200.0, 0.0))
674 scale.location = distortion.location
675 scale.location += Vector((200.0, 0.0))
677 rlayer_bg.location = movieclip.location
678 rlayer_bg.location -= Vector((0.0, 350.0))
680 invert.location = rlayer_bg.location
681 invert.location += Vector((250.0, 50.0))
683 add_ao.location = invert.location
684 add_ao.location[0] += 200
685 add_ao.location[1] = rlayer_bg.location[1]
687 add_shadow.location = add_ao.location
688 add_shadow.location -= Vector((0.0, 250.0))
690 mul_shadow.location = add_ao.location
691 mul_shadow.location += Vector((200.0, -50.0))
693 mul_image.location = mul_shadow.location
694 mul_image.location += Vector((300.0, 200.0))
696 rlayer_fg.location = rlayer_bg.location
697 rlayer_fg.location -= Vector((0.0, 500.0))
699 vector_blur.location[0] = mul_image.location[0]
700 vector_blur.location[1] = rlayer_fg.location[1]
702 alphaover.location[0] = vector_blur.location[0] + 350
703 alphaover.location[1] = \
704 (vector_blur.location[1] + mul_image.location[1]) / 2
706 composite.location = alphaover.location
707 composite.location += Vector((200.0, -100.0))
709 viewer.location = composite.location
710 composite.location += Vector((0.0, 200.0))
712 # ensure no nodes were creates on position of existing node
713 self._offsetNodes(tree)
716 def _createMesh(scene, name, vertices, faces):
717 from bpy_extras.io_utils import unpack_list
719 mesh = bpy.data.meshes.new(name=name)
721 mesh.vertices.add(len(vertices))
722 mesh.vertices.foreach_set("co", unpack_list(vertices))
724 nbr_loops = len(faces)
725 nbr_polys = nbr_loops // 4
726 mesh.loops.add(nbr_loops)
727 mesh.polygons.add(nbr_polys)
729 mesh.polygons.foreach_set("loop_start", range(0, nbr_loops, 4))
730 mesh.polygons.foreach_set("loop_total", (4,) * nbr_polys)
731 mesh.loops.foreach_set("vertex_index", faces)
735 ob = bpy.data.objects.new(name=name, object_data=mesh)
737 scene.objects.link(ob)
742 def _getPlaneVertices(half_size, z):
744 return [(-half_size, -half_size, z),
745 (-half_size, half_size, z),
746 (half_size, half_size, z),
747 (half_size, -half_size, z)]
749 def _createGround(self, scene):
750 vertices = self._getPlaneVertices(4.0, 0.0)
753 ob = self._createMesh(scene, "Ground", vertices, faces)
754 ob["is_ground"] = True
759 def _findGround(context):
760 scene = context.scene
762 for ob in scene.objects:
763 if ob.type == 'MESH' and "is_ground" in ob:
769 def _mergeLayers(layers_a, layers_b):
771 return [(layers_a[i] | layers_b[i]) for i in range(len(layers_a))]
774 def _createLamp(scene):
775 lamp = bpy.data.lamps.new(name="Lamp", type='POINT')
776 lampob = bpy.data.objects.new(name="Lamp", object_data=lamp)
777 scene.objects.link(lampob)
779 lampob.matrix_local = Matrix.Translation((4.076, 1.005, 5.904))
782 lamp.shadow_method = 'RAY_SHADOW'
786 def _createSampleObject(self, scene):
787 vertices = self._getPlaneVertices(1.0, -1.0) + \
788 self._getPlaneVertices(1.0, 1.0)
796 return self._createMesh(scene, "Cube", vertices, faces)
798 def _setupObjects(self, context):
799 scene = context.scene
801 fg = scene.render.layers.get("Foreground")
802 bg = scene.render.layers.get("Background")
804 all_layers = self._mergeLayers(fg.layers, bg.layers)
806 # ensure all lamps are active on foreground and background
809 for ob in scene.objects:
810 if ob.type == 'LAMP':
811 ob.layers = all_layers
813 elif ob.type == 'MESH' and "is_ground" not in ob:
816 # create sample lamp if there's no lamps in the scene
818 lamp = self._createLamp(scene)
819 lamp.layers = all_layers
821 # create sample object if there's no meshes in the scene
823 ob = self._createSampleObject(scene)
824 ob.layers = fg.layers
826 # create ground object if needed
827 ground = self._findGround(context)
829 ground = self._createGround(scene)
830 ground.layers = bg.layers
832 # make sure ground is available on Background layer
833 ground.layers = self._mergeLayers(ground.layers, bg.layers)
835 # layers with background and foreground should be rendered
836 scene.layers = self._mergeLayers(scene.layers, all_layers)
838 def execute(self, context):
839 self._setupScene(context)
840 self._setupWorld(context)
841 self._setupCamera(context)
842 self._setupViewport(context)
843 self._setupRenderLayers(context)
844 self._setupNodes(context)
845 self._setupObjects(context)
850 class CLIP_OT_track_settings_as_default(Operator):
851 """Copy tracking settings from active track to default settings"""
853 bl_idname = "clip.track_settings_as_default"
854 bl_label = "Track Settings As Default"
855 bl_options = {'UNDO', 'REGISTER'}
858 def poll(cls, context):
859 sc = context.space_data
861 if sc.type != 'CLIP_EDITOR':
866 return clip and clip.tracking.tracks.active
868 def execute(self, context):
869 sc = context.space_data
872 track = clip.tracking.tracks.active
873 framenr = context.scene.frame_current - clip.frame_start + 1
875 CLIP_default_settings_from_track(clip, track, framenr)