def actionRemoveTrack_trigger(self, event): log.info('actionRemoveTrack_trigger') for track_id in self.selected_tracks: tracks = Track.filter(id=track_id) for t in tracks: # Remove track t.delete()
def addTrack(self, name): log.info("add track %s", name) # Get # of tracks all_tracks = get_app().project.get(["layers"]) track_number = 1000000 if len(list(reversed(sorted(all_tracks, key=itemgetter('number'))))) > 0: track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() track.data = { "number": track_number, id: str(len(all_tracks)), "y": 0, "label": "", "lock": False, "name": name } track.save() return track
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName( "locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = { "number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked } track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName( "file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName( "pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file( clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data[ "has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: # Ignore errors for now pass if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName( "name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float( clip_element.getElementsByTagName("start") [0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float( clip_element.getElementsByTagName("in") [0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float( clip_element.getElementsByTagName("out") [0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName( "effect"): effectid = effect_element.getElementsByTagName( "effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def __init__(self, files=None, position=0.0): # Create dialog class QDialog.__init__(self) # Load UI from Designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Get settings self.settings = settings.get_settings() # Get translation object self.app = get_app() _ = self.app._tr # Track metrics track_metric_screen("add-to-timeline-screen") # Add custom treeview to window self.treeFiles = TimelineTreeView(self) self.vboxTreeParent.insertWidget(0, self.treeFiles) # Update data in model self.treeFiles.timeline_model.update_model(files) # Refresh view self.treeFiles.refresh_view() # Init start position self.txtStartTime.setValue(position) # Init default image length self.txtImageLength.setValue(self.settings.get("default-image-length")) self.txtImageLength.valueChanged.connect(self.updateTotal) self.cmbTransition.currentIndexChanged.connect(self.updateTotal) self.cmbFade.currentIndexChanged.connect(self.updateTotal) self.txtFadeLength.valueChanged.connect(self.updateTotal) self.txtTransitionLength.valueChanged.connect(self.updateTotal) # Add all tracks to dropdown tracks = Track.filter() for track in reversed(tracks): # Add to dropdown self.cmbTrack.addItem(_('Track %s' % track.data['number']), track.data['number']) # Add all fade options self.cmbFade.addItem(_('None'), None) self.cmbFade.addItem(_('Fade In'), 'Fade In') self.cmbFade.addItem(_('Fade Out'), 'Fade Out') self.cmbFade.addItem(_('Fade In & Out'), 'Fade In & Out') # Add all zoom options self.cmbZoom.addItem(_('None'), None) self.cmbZoom.addItem(_('Random'), 'Random') self.cmbZoom.addItem(_('Zoom In'), 'Zoom In') self.cmbZoom.addItem(_('Zoom Out'), 'Zoom Out') # Add all transitions transitions_dir = os.path.join(info.PATH, "transitions") common_dir = os.path.join(transitions_dir, "common") extra_dir = os.path.join(transitions_dir, "extra") transition_groups = [{ "type": "common", "dir": common_dir, "files": os.listdir(common_dir) }, { "type": "extra", "dir": extra_dir, "files": os.listdir(extra_dir) }] self.cmbTransition.addItem(_('None'), None) self.cmbTransition.addItem(_('Random'), 'random') self.transitions = [] for group in transition_groups: type = group["type"] dir = group["dir"] files = group["files"] for filename in sorted(files): path = os.path.join(dir, filename) (fileBaseName, fileExtension) = os.path.splitext(filename) # Skip hidden files (such as .DS_Store, etc...) if filename[0] == "." or "thumbs.db" in filename.lower(): continue # split the name into parts (looking for a number) suffix_number = None name_parts = fileBaseName.split("_") if name_parts[-1].isdigit(): suffix_number = name_parts[-1] # get name of transition trans_name = fileBaseName.replace("_", " ").capitalize() # replace suffix number with placeholder (if any) if suffix_number: trans_name = trans_name.replace(suffix_number, "%s") trans_name = _(trans_name) % suffix_number else: trans_name = _(trans_name) # Check for thumbnail path (in build-in cache) thumb_path = os.path.join(info.IMAGES_PATH, "cache", "{}.png".format(fileBaseName)) # Check built-in cache (if not found) if not os.path.exists(thumb_path): # Check user folder cache thumb_path = os.path.join(info.CACHE_PATH, "{}.png".format(fileBaseName)) # Add item self.transitions.append(path) self.cmbTransition.addItem(QIcon(thumb_path), _(trans_name), path) # Connections self.btnMoveUp.clicked.connect(self.btnMoveUpClicked) self.btnMoveDown.clicked.connect(self.btnMoveDownClicked) self.btnShuffle.clicked.connect(self.btnShuffleClicked) self.btnRemove.clicked.connect(self.btnRemoveClicked) self.btnBox.accepted.connect(self.accept) self.btnBox.rejected.connect(self.reject) # Update total self.updateTotal()
def export_xml(): """Export final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Ticks (final cut pro value) ticks = 254016000000 # Get path recommended_path = get_app().project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.xml" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".xml") file_path = QFileDialog.getSaveFileName(app.window, _("Export XML..."), recommended_path, _("Final Cut Pro (*.xml)"))[0] if not file_path: # User canceled dialog return # Append .xml if needed if not file_path.endswith(".xml"): file_path = "%s.xml" % file_path # Get filename with no path file_name = os.path.basename(file_path) # Determine max frame (based on clips) duration = 0.0 for clip in Clip.filter(): clip_last_frame = clip.data.get("position") + (clip.data.get("end") - clip.data.get("start")) if clip_last_frame > duration: # Set max length of timeline duration = clip_last_frame # XML template path xmldoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-project-template.xml')) # Set Project Details xmldoc.getElementsByTagName("name")[0].childNodes[0].nodeValue = file_name xmldoc.getElementsByTagName("uuid")[0].childNodes[0].nodeValue = str( uuid1()) xmldoc.getElementsByTagName( "duration")[0].childNodes[0].nodeValue = duration xmldoc.getElementsByTagName( "width")[0].childNodes[0].nodeValue = app.project.get("width") xmldoc.getElementsByTagName( "height")[0].childNodes[0].nodeValue = app.project.get("height") xmldoc.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = app.project.get("sample_rate") xmldoc.getElementsByTagName("sequence")[0].setAttribute( "id", app.project.get("id")) for childNode in xmldoc.getElementsByTagName("timebase"): childNode.childNodes[0].nodeValue = fps_float # Get parent audio node parentAudioNode = xmldoc.getElementsByTagName("audio")[0] # Loop through tracks all_tracks = get_app().project.get("layers") track_count = 1 for track in sorted(all_tracks, key=itemgetter('number')): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track details track_locked = track.get("lock", False) clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Create video track node trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-video-template.xml')) videoTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] xmldoc.getElementsByTagName("video")[0].appendChild(videoTrackNode) # Create audio track nodes (1 for each channel) trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-audio-template.xml')) audioTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] parentAudioNode.appendChild(audioTrackNode) audioTrackNode.getElementsByTagName( "outputchannelindex")[0].childNodes[0].nodeValue = track_count # Is Track Locked? if track_locked: videoTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" audioTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" # Loop through clips on this track for clip in clips_on_track: # Create VIDEO clip node clipNode = None if clip.data.get("reader", {}).get("has_video"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-video-template.xml')) clipNode = clipTemplateDoc.getElementsByTagName('clipitem')[0] videoTrackNode.appendChild(clipNode) # Update clip properties clipNode.setAttribute('id', clip.data.get('id')) clipNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName( "name")[1].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("pathurl")[0].childNodes[ 0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipNode.getElementsByTagName("pproTicksIn")[0].childNodes[ 0].nodeValue = (clip.data.get('start') * fps_float) * ticks clipNode.getElementsByTagName("pproTicksOut")[0].childNodes[ 0].nodeValue = (clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Opacity", clipNode, clip.data.get('alpha', {}).get('Points', []), 100.0) # Create AUDIO clip nodes if clip.data.get("reader", {}).get("has_audio"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-audio-template.xml')) clipAudioNode = clipTemplateDoc.getElementsByTagName( 'clipitem')[0] audioTrackNode.appendChild(clipAudioNode) # Update audio characteristics if clipNode: clipNode.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = clip.data.get("reader", {}).get("channels") clipNode.getElementsByTagName("channelcount")[ 0].childNodes[0].nodeValue = clip.data.get( "reader", {}).get("sample_rate") clipAudioNode.getElementsByTagName( "file")[0].childNodes.clear() else: clipAudioNode.getElementsByTagName("name")[1].childNodes[ 0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("pathurl")[ 0].childNodes[0].nodeValue = clip.data.get('title') # Update audio clip properties clipAudioNode.setAttribute('id', "%s-audio" % clip.data.get('id')) clipAudioNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipAudioNode.getElementsByTagName( "trackindex")[0].childNodes[0].nodeValue = track_count clipAudioNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipAudioNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipAudioNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipAudioNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipAudioNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipAudioNode.getElementsByTagName( "pproTicksIn")[0].childNodes[0].nodeValue = ( clip.data.get('start') * fps_float) * ticks clipAudioNode.getElementsByTagName( "pproTicksOut")[0].childNodes[0].nodeValue = ( clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Audio Levels", clipAudioNode, clip.data.get('volume', {}).get('Points', []), 1.0) else: # No audio, remove audio characteristics if clipNode: clipNode.getElementsByTagName("audio").pop() # Update counter track_count += 1 try: file = open(os.fsencode(file_path), "wb") # wb needed for windows support file.write(bytes(xmldoc.toxml(), 'UTF-8')) file.close() except IOError as inst: log.error("Error writing XML export: {}".format(str(inst)))
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = { "openshot-qt": info.VERSION, "libopenshot": v.ToString() } # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image( file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image( file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data[ "has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ( "%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = { "number": track_counter, "y": 0, "label": legacy_t.name } t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys( ): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info( "Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join( info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json()) new_clip["volume"] = {"Points": [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists( trans.resource): trans.resource = os.path.join( info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader( trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint( round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % { "path": file_path, "error": ex }) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception( _("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def import_edl(): """Import EDL File""" app = get_app() _ = app._tr # Get EDL path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName( app.window, _("Import EDL..."), recommended_path, _("Edit Decision Lists (*.edl)"), _("Edit Decision Lists (*.edl)"))[0] if os.path.exists(file_path): context = {} current_clip_index = "" # Get # of tracks all_tracks = app.project.get("layers") track_number = list( reversed( sorted(all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() track.data = { "number": track_number, "y": 0, "label": "EDL Import", "lock": False } track.save() # Open EDL file with open(file_path, "r") as f: # Loop through each line, and compare against regex expressions for line in f: # Detect title for r in title_regex.findall(line): context["title"] = r # Project title # Detect clips for r in clips_regex.findall(line): if len(r) == 8: edit_index = r[0] # 001 tape = r[1] # BL, AX clip_type = r[2] # V, A if tape == "BL": # Ignore continue if current_clip_index == "": # first clip, ignore for now current_clip_index = edit_index if current_clip_index != edit_index: # clip changed, time to commit previous context create_clip(context, track) # reset context current_clip_index = edit_index context = { "title": context.get("title"), "fcm": context.get("fcm") } if tape not in context: context[tape] = {} if clip_type not in context[tape]: context[tape][clip_type] = {} # New clip detected context["edit_index"] = edit_index # 001 context[tape][clip_type]["edit_type"] = r[3] # C context[tape][clip_type]["clip_start_time"] = r[ 4] # 00:00:00:01 context[tape][clip_type]["clip_end_time"] = r[ 5] # 00:00:03:01 context[tape][clip_type]["timeline_position"] = r[ 6] # 00:00:30:01 context[tape][clip_type]["timeline_position_end"] = r[ 7] # 00:00:33:01 # Detect clip name for r in clip_name_regex.findall(line): context["clip_path"] = r # FileName.mp4 # Detect opacity for r in opacity_regex.findall(line): if len(r) == 2: if "opacity" not in context: context["opacity"] = [] keyframe_time = r[0] # 00:00:00:01 keyframe_value = float( r[1]) / 100.0 # 100.00 (scale 0 to 1) context["opacity"].append({ "time": keyframe_time, "value": keyframe_value }) # Detect audio levels for r in audio_level_regex.findall(line): if len(r) == 2: if "volume" not in context: context["volume"] = [] keyframe_time = r[0] # 00:00:00:01 keyframe_value = (float(r[1]) + 99.0) / 99.0 # -99.00 (scale 0 to 1) context["volume"].append({ "time": keyframe_time, "value": keyframe_value }) # Detect FCM attribute for r in fcm_regex.findall(line): context["fcm"] = r # NON-DROP FRAME # Final edit needs committing create_clip(context, track) # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = {"openshot-qt" : info.VERSION, "libopenshot" : v.ToString()} # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json(), strict=False) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ("%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = {"number": track_counter, "y": 0, "label": legacy_t.name} t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys(): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info("Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json(), strict=False) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json(), strict=False) new_clip["volume"] = { "Points" : [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists(trans.resource): trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader(trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint(round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json(), strict=False), "contrast": json.loads(contrast.Json(), strict=False), "reader": json.loads(transition_reader.Json(), strict=False), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex}) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def __init__(self, files=None, position=0.0): # Create dialog class QDialog.__init__(self) # Load UI from Designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Get settings self.settings = settings.get_settings() # Get translation object self.app = get_app() _ = self.app._tr # Track metrics track_metric_screen("add-to-timeline-screen") # Add custom treeview to window self.treeFiles = TimelineTreeView(self) self.vboxTreeParent.insertWidget(0, self.treeFiles) # Update data in model self.treeFiles.timeline_model.update_model(files) # Refresh view self.treeFiles.refresh_view() # Init start position self.txtStartTime.setValue(position) # Init default image length self.txtImageLength.setValue(self.settings.get("default-image-length")) self.txtImageLength.valueChanged.connect(self.updateTotal) self.cmbTransition.currentIndexChanged.connect(self.updateTotal) self.cmbFade.currentIndexChanged.connect(self.updateTotal) self.txtFadeLength.valueChanged.connect(self.updateTotal) self.txtTransitionLength.valueChanged.connect(self.updateTotal) # Add all tracks to dropdown tracks = Track.filter() for track in reversed(tracks): # Add to dropdown self.cmbTrack.addItem(_('Track %s' % track.data['number']), track.data['number']) # Add all fade options self.cmbFade.addItem(_('None'), None) self.cmbFade.addItem(_('Fade In'), 'Fade In') self.cmbFade.addItem(_('Fade Out'), 'Fade Out') self.cmbFade.addItem(_('Fade In & Out'), 'Fade In & Out') # Add all zoom options self.cmbZoom.addItem(_('None'), None) self.cmbZoom.addItem(_('Random'), 'Random') self.cmbZoom.addItem(_('Zoom In'), 'Zoom In') self.cmbZoom.addItem(_('Zoom Out'), 'Zoom Out') # Add all transitions transitions_dir = os.path.join(info.PATH, "transitions") common_dir = os.path.join(transitions_dir, "common") extra_dir = os.path.join(transitions_dir, "extra") transition_groups = [{"type": "common", "dir": common_dir, "files": os.listdir(common_dir)}, {"type": "extra", "dir": extra_dir, "files": os.listdir(extra_dir)}] self.cmbTransition.addItem(_('None'), None) self.cmbTransition.addItem(_('Random'), 'random') self.transitions = [] for group in transition_groups: type = group["type"] dir = group["dir"] files = group["files"] for filename in sorted(files): path = os.path.join(dir, filename) (fileBaseName, fileExtension) = os.path.splitext(filename) # Skip hidden files (such as .DS_Store, etc...) if filename[0] == "." or "thumbs.db" in filename.lower(): continue # split the name into parts (looking for a number) suffix_number = None name_parts = fileBaseName.split("_") if name_parts[-1].isdigit(): suffix_number = name_parts[-1] # get name of transition trans_name = fileBaseName.replace("_", " ").capitalize() # replace suffix number with placeholder (if any) if suffix_number: trans_name = trans_name.replace(suffix_number, "%s") trans_name = _(trans_name) % suffix_number else: trans_name = _(trans_name) # Check for thumbnail path (in build-in cache) thumb_path = os.path.join(info.IMAGES_PATH, "cache", "{}.png".format(fileBaseName)) # Check built-in cache (if not found) if not os.path.exists(thumb_path): # Check user folder cache thumb_path = os.path.join(info.CACHE_PATH, "{}.png".format(fileBaseName)) # Add item self.transitions.append(path) self.cmbTransition.addItem(QIcon(thumb_path), _(trans_name), path) # Connections self.btnMoveUp.clicked.connect(self.btnMoveUpClicked) self.btnMoveDown.clicked.connect(self.btnMoveDownClicked) self.btnShuffle.clicked.connect(self.btnShuffleClicked) self.btnRemove.clicked.connect(self.btnRemoveClicked) self.btnBox.accepted.connect(self.accept) self.btnBox.rejected.connect(self.reject) # Update total self.updateTotal()
def export_edl(): """Export EDL File""" app = get_app() _ = app._tr # EDL Export format edl_string = "%03d %-9s%-6s%-9s%11s %11s %11s %11s\n" # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get EDL path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.edl" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".edl") file_path = QFileDialog.getSaveFileName( app.window, _("Export EDL..."), recommended_path, _("Edit Decision Lists (*.edl)"))[0] if not file_path: return # Append .edl if needed if not file_path.endswith(".edl"): file_path = "%s.edl" % file_path # Get filename with no extension file_name_with_ext = os.path.basename(file_path) file_name = os.path.splitext(file_name_with_ext)[0] all_tracks = get_app().project.get("layers") track_count = len(all_tracks) for track in reversed(sorted(all_tracks, key=itemgetter('number'))): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track name track_name = track.get("label") or "TRACK %s" % track_count clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Generate EDL File (1 per track - limitation of EDL format) # TODO: Improve and move this into its own class with open("%s-%s.edl" % (file_path.replace(".edl", ""), track_name), 'w', encoding="utf8") as f: # Add Header f.write("TITLE: %s - %s\n" % (file_name, track_name)) f.write("FCM: NON-DROP FRAME\n\n") # Loop through each track edit_index = 1 export_position = 0.0 # Loop through clips on this track for clip in clips_on_track: # Do we need a blank clip? if clip.data.get('position', 0.0) > export_position: # Blank clip (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(0.0, fps_num, fps_den) clip_end_time = secondsToTimecode( clip.data.get('position') - export_position, fps_num, fps_den) timeline_start_time = secondsToTimecode( export_position, fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) # Write blank clip f.write(edl_string % (edit_index, "BL"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) # Format clip start/end and timeline start/end values (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(clip.data.get('start'), fps_num, fps_den) clip_end_time = secondsToTimecode(clip.data.get('end'), fps_num, fps_den) timeline_start_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position') + (clip.data.get('end') - clip.data.get('start')), fps_num, fps_den) has_video = clip.data.get("reader", {}).get("has_video", False) has_audio = clip.data.get("reader", {}).get("has_audio", False) if has_video: # Video Track f.write(edl_string % (edit_index, "AX"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) if has_audio: # Audio Track f.write(edl_string % (edit_index, "AX"[:9], "A"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) f.write("* FROM CLIP NAME: %s\n" % clip.data.get('title')) # Add opacity data (if any) alpha_points = clip.data.get('alpha', {}).get('Points', []) if len(alpha_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in alpha_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = point.get('co', {}).get('Y', 0.0) * 100.0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for opacity_time in sorted(keyframes.keys()): opacity_value = keyframes.get(opacity_time) f.write( "* OPACITY LEVEL AT %s IS %0.2f%% (REEL AX)\n" % (secondsToTimecode(opacity_time, fps_num, fps_den), opacity_value)) # Add volume data (if any) volume_points = clip.data.get('volume', {}).get('Points', []) if len(volume_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in volume_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = (point.get('co', {}).get('Y', 0.0) * 99.0) - 99 # Scaling 0-1 to -99-0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for volume_time in sorted(keyframes.keys()): volume_value = keyframes.get(volume_time) f.write( "* AUDIO LEVEL AT %s IS %0.2f DB (REEL AX A1)\n" % (secondsToTimecode(volume_time, fps_num, fps_den), volume_value)) # Update export position export_position = clip.data.get('position') + ( clip.data.get('end') - clip.data.get('start')) f.write("\n") edit_index += 1 # Update counters track_count -= 1
def changed(self, action): # Clear previous rects self.clip_rects.clear() self.clip_rects_selected.clear() self.marker_rects.clear() # Get layer lookup layers = {} for count, layer in enumerate(reversed(sorted(Track.filter()))): layers[layer.data.get('number')] = count # Wait for timeline object and valid scrollbar positions if hasattr(get_app().window, "timeline") and self.scrollbar_position[2] != 0.0: # Get max width of timeline project_duration = get_app().project.get("duration") pixels_per_second = self.width() / project_duration # Determine scale factor vertical_factor = self.height() / len(layers.keys()) for clip in Clip.filter(): # Calculate clip geometry (and cache it) clip_x = (clip.data.get('position', 0.0) * pixels_per_second) clip_y = layers.get(clip.data.get('layer', 0), 0) * vertical_factor clip_width = ( (clip.data.get('end', 0.0) - clip.data.get('start', 0.0)) * pixels_per_second) clip_rect = QRectF(clip_x, clip_y, clip_width, 1.0 * vertical_factor) if clip.id in get_app().window.selected_clips: # selected clip self.clip_rects_selected.append(clip_rect) else: # un-selected clip self.clip_rects.append(clip_rect) for clip in Transition.filter(): # Calculate clip geometry (and cache it) clip_x = (clip.data.get('position', 0.0) * pixels_per_second) clip_y = layers.get(clip.data.get('layer', 0), 0) * vertical_factor clip_width = ( (clip.data.get('end', 0.0) - clip.data.get('start', 0.0)) * pixels_per_second) clip_rect = QRectF(clip_x, clip_y, clip_width, 1.0 * vertical_factor) if clip.id in get_app().window.selected_transitions: # selected clip self.clip_rects_selected.append(clip_rect) else: # un-selected clip self.clip_rects.append(clip_rect) for marker in Marker.filter(): # Calculate clip geometry (and cache it) marker_x = (marker.data.get('position', 0.0) * pixels_per_second) marker_rect = QRectF(marker_x, 0, 0.5, len(layers) * vertical_factor) self.marker_rects.append(marker_rect) # Force re-paint self.update()
def paintEvent(self, event, *args): """ Custom paint event """ event.accept() # Paint timeline preview on QWidget painter = QPainter(self) painter.setRenderHints( QPainter.Antialiasing | QPainter.SmoothPixmapTransform | QPainter.TextAntialiasing, True) # Fill the whole widget with the solid color (background solid color) painter.fillRect(event.rect(), QColor("#191919")) # Create pens / colors clip_pen = QPen(QBrush(QColor("#53a0ed")), 1.5) clip_pen.setCosmetic(True) painter.setPen(clip_pen) selected_clip_pen = QPen(QBrush(QColor("Red")), 1.5) selected_clip_pen.setCosmetic(True) scroll_color = QColor("#4053a0ed") scroll_pen = QPen(QBrush(scroll_color), 2.0) scroll_pen.setCosmetic(True) marker_color = QColor("#4053a0ed") marker_pen = QPen(QBrush(marker_color), 1.0) marker_pen.setCosmetic(True) playhead_color = QColor(Qt.red) playhead_color.setAlphaF(0.5) playhead_pen = QPen(QBrush(playhead_color), 1.0) playhead_pen.setCosmetic(True) handle_color = QColor("#a653a0ed") handle_pen = QPen(QBrush(handle_color), 1.5) handle_pen.setCosmetic(True) # Get layer lookup layers = Track.filter() # Wait for timeline object and valid scrollbar positions if get_app().window.timeline and self.scrollbar_position[2] != 0.0: # Get max width of timeline project_duration = get_app().project.get("duration") pixels_per_second = event.rect().width() / project_duration project_pixel_width = max(0, project_duration * pixels_per_second) scroll_width = (self.scrollbar_position[1] - self.scrollbar_position[0]) * event.rect().width() # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Determine scale factor vertical_factor = event.rect().height() / len(layers) # Loop through each clip painter.setPen(clip_pen) for clip_rect in self.clip_rects: painter.drawRect(clip_rect) painter.setPen(selected_clip_pen) for clip_rect in self.clip_rects_selected: painter.drawRect(clip_rect) painter.setPen(marker_pen) for marker_rect in self.marker_rects: painter.drawRect(marker_rect) painter.setPen(playhead_pen) playhead_x = ((self.current_frame / fps_float) * pixels_per_second) playhead_rect = QRectF(playhead_x, 0, 0.5, len(layers) * vertical_factor) painter.drawRect(playhead_rect) # Draw scroll bars (if available) if self.scrollbar_position: painter.setPen(scroll_pen) # scroll bar path scroll_x = self.scrollbar_position[0] * event.rect().width() self.scroll_bar_rect = QRectF(scroll_x, 0.0, scroll_width, event.rect().height()) scroll_path = QPainterPath() scroll_path.addRoundedRect(self.scroll_bar_rect, 6, 6) # draw scroll bar rect painter.fillPath(scroll_path, scroll_color) painter.drawPath(scroll_path) # draw handles painter.setPen(handle_pen) handle_width = 12.0 # left handle left_handle_x = (self.scrollbar_position[0] * event.rect().width()) - (handle_width / 2.0) self.left_handle_rect = QRectF(left_handle_x, event.rect().height() / 4.0, handle_width, event.rect().height() / 2.0) left_handle_path = QPainterPath() left_handle_path.addRoundedRect(self.left_handle_rect, handle_width, handle_width) painter.fillPath(left_handle_path, handle_color) # right handle right_handle_x = (self.scrollbar_position[1] * event.rect().width()) - (handle_width / 2.0) self.right_handle_rect = QRectF(right_handle_x, event.rect().height() / 4.0, handle_width, event.rect().height() / 2.0) right_handle_path = QPainterPath() right_handle_path.addRoundedRect(self.right_handle_rect, handle_width, handle_width) painter.fillPath(right_handle_path, handle_color) # Determine if play-head is inside scroll area if get_app().window.preview_thread.player.Mode( ) == openshot.PLAYBACK_PLAY and self.is_auto_center: if not self.scroll_bar_rect.contains(playhead_rect): get_app().window.TimelineCenter.emit() # End painter painter.end()
def actionAddTrackBelow_trigger(self, event): log.info("actionAddTrackAbove_trigger") # Get # of tracks track_number = len(get_app().project.get(["layers"])) # Look for existing Marker track = Track() track.data = {"number": track_number, "y": 0} track.save()