def test_add_clip(self): """ Test the Clip.save method by adding multiple clips """ # Import additional classes that need the app defined first from classes.query import Clip # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def test_filter_clip(self): """ Test the Clip.filter method """ clips = Clip.filter(id=self.clip_ids[0]) self.assertTrue(clips) # Do not find a clip clips = Clip.filter(id="invalidID") self.assertEqual(len(clips), 0)
def test_filter_clip(self): """ Test the Clip.filter method """ # Import additional classes that need the app defined first from classes.query import Clip # Find all clips named file1 clips = Clip.filter(id=TestQueryClass.clip_ids[0]) self.assertTrue(clips) # Do not find a clip clips = Clip.filter(id="invalidID") self.assertEqual(len(clips), 0)
def actionRemoveEffect_trigger(self, event): log.info('actionRemoveEffect_trigger') # Loop through selected clips for effect_id in self.selected_effects: log.info("effect id: %s" % effect_id) # Find matching file clips = Clip.filter() found_effect = None for c in clips: found_effect = False log.info("c.data[effects]: %s" % c.data["effects"]) for effect in c.data["effects"]: if effect["id"] == effect_id: found_effect = effect break if found_effect: # Remove found effect from clip data and save clip c.data["effects"].remove(found_effect) c.save() # Clear selected effects self.removeSelection(effect_id, "effect")
def CutsToClips(cuts): #app = get_app() clips = [] position = 0 video_length = 0 print(cuts) for cut in cuts: c = Clip.filter(id=cut["clip"]) #print("-------c:",c[0].data["position"], c[0].data) path = c[0].data["reader"]["path"] offset = float(c[0].data["position"]) start = float(cut["start"]) - offset end = float(cut["end"]) - offset print("=======================-------start:", start, "end:", end, "position", position, path) try: clip = openshot.Clip(path) clip.Start(start) clip.End(end) clip.Position(position) clips.append(clip) except: log.error('Failed to load media file into preview player: %s' % path) return clips, video_length position = position + (end - start) - offset video_length = video_length + cut["video_length"] return clips, video_length
def test_add_clip(self): # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def test_intersect(self): """ Test special filter argument 'intersect' """ trans = Transition.get(id=self.transition_ids[0]) self.assertTrue(trans) pos = trans.data.get("position", -1.0) duration = trans.data.get("duration", -1.0) self.assertTrue(pos >= 0.0) self.assertTrue(duration >= 0.0) time = pos + (duration / 2) def get_times(item): pos = item.data.get("position", -1.0) end = pos + item.data.get("duration", -1.0) return (pos, end) t_intersect = Transition.filter(intersect=time) t_ids = [t.id for t in t_intersect] t_all = Transition.filter() t_rest = [x for x in t_all if x.id not in t_ids] c_intersect = Clip.filter(intersect=time) c_ids = [c.id for c in c_intersect] c_all = Clip.filter() c_rest = [x for x in c_all if x.id not in c_ids] for item in t_intersect + c_intersect: item_id = item.id pos, end = get_times(item) self.assertTrue(pos <= time) self.assertTrue(time <= end) for item in t_rest + c_rest: item_id = item.id pos, end = get_times(item) if pos < time: self.assertTrue(end <= time) if end > time: self.assertTrue(pos >= time)
def actionRemoveClip_trigger(self, event): log.info('actionRemoveClip_trigger') # Loop through selected clips for clip_id in self.selected_clips: # Find matching file clips = Clip.filter(id=clip_id) for c in clips: # Clear selected clips self.removeSelection(clip_id, "clip") # Remove clip c.delete()
def actionRemove_from_Project_trigger(self, event): log.info("actionRemove_from_Project_trigger") # Loop through selected files for file_id in self.selected_files: # Find matching file f = File.get(id=file_id) if f: # Remove file f.delete() # Find matching clips (if any) clips = Clip.filter(file_id=file_id) for c in clips: # Remove clip c.delete() # Clear selected files self.selected_files = []
def test_update_clip(self): """ Test the Clip.save method """ update_id = self.clip_ids[0] clip = Clip.get(id=update_id) self.assertTrue(clip) # Update clip clip.data["layer"] = 2 clip.data["title"] = "My Title" clip.save() # Verify updated data clip = Clip.get(id=update_id) self.assertEqual(clip.data["layer"], 2) self.assertEqual(clip.data["title"], "My Title") clips = Clip.filter(layer=2) self.assertEqual(len(clips), 1)
def export_xml(): """Export final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Ticks (final cut pro value) ticks = 254016000000 # Get path recommended_path = get_app().project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.xml" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".xml") file_path = QFileDialog.getSaveFileName(app.window, _("Export XML..."), recommended_path, _("Final Cut Pro (*.xml)"))[0] if not file_path: # User canceled dialog return # Append .xml if needed if not file_path.endswith(".xml"): file_path = "%s.xml" % file_path # Get filename with no path file_name = os.path.basename(file_path) # Determine max frame (based on clips) duration = 0.0 for clip in Clip.filter(): clip_last_frame = clip.data.get("position") + (clip.data.get("end") - clip.data.get("start")) if clip_last_frame > duration: # Set max length of timeline duration = clip_last_frame # XML template path xmldoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-project-template.xml')) # Set Project Details xmldoc.getElementsByTagName("name")[0].childNodes[0].nodeValue = file_name xmldoc.getElementsByTagName("uuid")[0].childNodes[0].nodeValue = str( uuid1()) xmldoc.getElementsByTagName( "duration")[0].childNodes[0].nodeValue = duration xmldoc.getElementsByTagName( "width")[0].childNodes[0].nodeValue = app.project.get("width") xmldoc.getElementsByTagName( "height")[0].childNodes[0].nodeValue = app.project.get("height") xmldoc.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = app.project.get("sample_rate") xmldoc.getElementsByTagName("sequence")[0].setAttribute( "id", app.project.get("id")) for childNode in xmldoc.getElementsByTagName("timebase"): childNode.childNodes[0].nodeValue = fps_float # Get parent audio node parentAudioNode = xmldoc.getElementsByTagName("audio")[0] # Loop through tracks all_tracks = get_app().project.get("layers") track_count = 1 for track in sorted(all_tracks, key=itemgetter('number')): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track details track_locked = track.get("lock", False) clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Create video track node trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-video-template.xml')) videoTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] xmldoc.getElementsByTagName("video")[0].appendChild(videoTrackNode) # Create audio track nodes (1 for each channel) trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-audio-template.xml')) audioTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] parentAudioNode.appendChild(audioTrackNode) audioTrackNode.getElementsByTagName( "outputchannelindex")[0].childNodes[0].nodeValue = track_count # Is Track Locked? if track_locked: videoTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" audioTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" # Loop through clips on this track for clip in clips_on_track: # Create VIDEO clip node clipNode = None if clip.data.get("reader", {}).get("has_video"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-video-template.xml')) clipNode = clipTemplateDoc.getElementsByTagName('clipitem')[0] videoTrackNode.appendChild(clipNode) # Update clip properties clipNode.setAttribute('id', clip.data.get('id')) clipNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName( "name")[1].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("pathurl")[0].childNodes[ 0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipNode.getElementsByTagName("pproTicksIn")[0].childNodes[ 0].nodeValue = (clip.data.get('start') * fps_float) * ticks clipNode.getElementsByTagName("pproTicksOut")[0].childNodes[ 0].nodeValue = (clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Opacity", clipNode, clip.data.get('alpha', {}).get('Points', []), 100.0) # Create AUDIO clip nodes if clip.data.get("reader", {}).get("has_audio"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-audio-template.xml')) clipAudioNode = clipTemplateDoc.getElementsByTagName( 'clipitem')[0] audioTrackNode.appendChild(clipAudioNode) # Update audio characteristics if clipNode: clipNode.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = clip.data.get("reader", {}).get("channels") clipNode.getElementsByTagName("channelcount")[ 0].childNodes[0].nodeValue = clip.data.get( "reader", {}).get("sample_rate") clipAudioNode.getElementsByTagName( "file")[0].childNodes.clear() else: clipAudioNode.getElementsByTagName("name")[1].childNodes[ 0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("pathurl")[ 0].childNodes[0].nodeValue = clip.data.get('title') # Update audio clip properties clipAudioNode.setAttribute('id', "%s-audio" % clip.data.get('id')) clipAudioNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipAudioNode.getElementsByTagName( "trackindex")[0].childNodes[0].nodeValue = track_count clipAudioNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipAudioNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipAudioNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipAudioNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipAudioNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipAudioNode.getElementsByTagName( "pproTicksIn")[0].childNodes[0].nodeValue = ( clip.data.get('start') * fps_float) * ticks clipAudioNode.getElementsByTagName( "pproTicksOut")[0].childNodes[0].nodeValue = ( clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Audio Levels", clipAudioNode, clip.data.get('volume', {}).get('Points', []), 1.0) else: # No audio, remove audio characteristics if clipNode: clipNode.getElementsByTagName("audio").pop() # Update counter track_count += 1 try: file = open(os.fsencode(file_path), "wb") # wb needed for windows support file.write(bytes(xmldoc.toxml(), 'UTF-8')) file.close() except IOError as inst: log.error("Error writing XML export: {}".format(str(inst)))
def export_edl(): """Export EDL File""" app = get_app() _ = app._tr # EDL Export format edl_string = "%03d %-9s%-6s%-9s%11s %11s %11s %11s\n" # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get EDL path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.edl" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".edl") file_path = QFileDialog.getSaveFileName( app.window, _("Export EDL..."), recommended_path, _("Edit Decision Lists (*.edl)"))[0] if not file_path: return # Append .edl if needed if not file_path.endswith(".edl"): file_path = "%s.edl" % file_path # Get filename with no extension file_name_with_ext = os.path.basename(file_path) file_name = os.path.splitext(file_name_with_ext)[0] all_tracks = get_app().project.get("layers") track_count = len(all_tracks) for track in reversed(sorted(all_tracks, key=itemgetter('number'))): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track name track_name = track.get("label") or "TRACK %s" % track_count clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Generate EDL File (1 per track - limitation of EDL format) # TODO: Improve and move this into its own class with open("%s-%s.edl" % (file_path.replace(".edl", ""), track_name), 'w', encoding="utf8") as f: # Add Header f.write("TITLE: %s - %s\n" % (file_name, track_name)) f.write("FCM: NON-DROP FRAME\n\n") # Loop through each track edit_index = 1 export_position = 0.0 # Loop through clips on this track for clip in clips_on_track: # Do we need a blank clip? if clip.data.get('position', 0.0) > export_position: # Blank clip (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(0.0, fps_num, fps_den) clip_end_time = secondsToTimecode( clip.data.get('position') - export_position, fps_num, fps_den) timeline_start_time = secondsToTimecode( export_position, fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) # Write blank clip f.write(edl_string % (edit_index, "BL"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) # Format clip start/end and timeline start/end values (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(clip.data.get('start'), fps_num, fps_den) clip_end_time = secondsToTimecode(clip.data.get('end'), fps_num, fps_den) timeline_start_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position') + (clip.data.get('end') - clip.data.get('start')), fps_num, fps_den) has_video = clip.data.get("reader", {}).get("has_video", False) has_audio = clip.data.get("reader", {}).get("has_audio", False) if has_video: # Video Track f.write(edl_string % (edit_index, "AX"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) if has_audio: # Audio Track f.write(edl_string % (edit_index, "AX"[:9], "A"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) f.write("* FROM CLIP NAME: %s\n" % clip.data.get('title')) # Add opacity data (if any) alpha_points = clip.data.get('alpha', {}).get('Points', []) if len(alpha_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in alpha_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = point.get('co', {}).get('Y', 0.0) * 100.0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for opacity_time in sorted(keyframes.keys()): opacity_value = keyframes.get(opacity_time) f.write( "* OPACITY LEVEL AT %s IS %0.2f%% (REEL AX)\n" % (secondsToTimecode(opacity_time, fps_num, fps_den), opacity_value)) # Add volume data (if any) volume_points = clip.data.get('volume', {}).get('Points', []) if len(volume_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in volume_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = (point.get('co', {}).get('Y', 0.0) * 99.0) - 99 # Scaling 0-1 to -99-0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for volume_time in sorted(keyframes.keys()): volume_value = keyframes.get(volume_time) f.write( "* AUDIO LEVEL AT %s IS %0.2f DB (REEL AX A1)\n" % (secondsToTimecode(volume_time, fps_num, fps_den), volume_value)) # Update export position export_position = clip.data.get('position') + ( clip.data.get('end') - clip.data.get('start')) f.write("\n") edit_index += 1 # Update counters track_count -= 1
def changed(self, action): # Clear previous rects self.clip_rects.clear() self.clip_rects_selected.clear() self.marker_rects.clear() # Get layer lookup layers = {} for count, layer in enumerate(reversed(sorted(Track.filter()))): layers[layer.data.get('number')] = count # Wait for timeline object and valid scrollbar positions if hasattr(get_app().window, "timeline") and self.scrollbar_position[2] != 0.0: # Get max width of timeline project_duration = get_app().project.get("duration") pixels_per_second = self.width() / project_duration # Determine scale factor vertical_factor = self.height() / len(layers.keys()) for clip in Clip.filter(): # Calculate clip geometry (and cache it) clip_x = (clip.data.get('position', 0.0) * pixels_per_second) clip_y = layers.get(clip.data.get('layer', 0), 0) * vertical_factor clip_width = ( (clip.data.get('end', 0.0) - clip.data.get('start', 0.0)) * pixels_per_second) clip_rect = QRectF(clip_x, clip_y, clip_width, 1.0 * vertical_factor) if clip.id in get_app().window.selected_clips: # selected clip self.clip_rects_selected.append(clip_rect) else: # un-selected clip self.clip_rects.append(clip_rect) for clip in Transition.filter(): # Calculate clip geometry (and cache it) clip_x = (clip.data.get('position', 0.0) * pixels_per_second) clip_y = layers.get(clip.data.get('layer', 0), 0) * vertical_factor clip_width = ( (clip.data.get('end', 0.0) - clip.data.get('start', 0.0)) * pixels_per_second) clip_rect = QRectF(clip_x, clip_y, clip_width, 1.0 * vertical_factor) if clip.id in get_app().window.selected_transitions: # selected clip self.clip_rects_selected.append(clip_rect) else: # un-selected clip self.clip_rects.append(clip_rect) for marker in Marker.filter(): # Calculate clip geometry (and cache it) marker_x = (marker.data.get('position', 0.0) * pixels_per_second) marker_rect = QRectF(marker_x, 0, 0.5, len(layers) * vertical_factor) self.marker_rects.append(marker_rect) # Force re-paint self.update()