def add_file(self, filepath): # Add file into project app = get_app() _ = app._tr # Check for this path in our existing project data # ["1F595-1F3FE", # "openshot-qt-git/src/emojis/color/svg/1F595-1F3FE.svg"] file = File.get(path=filepath) # If this file is already found, exit if file: return file # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return file except Exception as ex: # Log exception log.warning("Failed to import file: {}".format(str(ex)))
def test_add_file(self): """ Test the File.save method by adding multiple files """ # Import additional classes that need the app defined first from classes.query import File # Find number of files in project num_files = len(File.filter()) # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(PATH, "images", "openshot.png") query_file.data["media_type"] = "image" query_file.save() self.assertTrue(query_file) self.assertEqual(len(File.filter()), num_files + 1) # Save the file again (which should not change the total # of files) query_file.save() self.assertEqual(len(File.filter()), num_files + 1)
def setUpClass(TestQueryClass): """ Init unit test data """ # Create Qt application TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest") TestQueryClass.clip_ids = [] TestQueryClass.file_ids = [] TestQueryClass.transition_ids = [] # Import additional classes that need the app defined first from classes.query import Clip, File, Transition # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids TestQueryClass.clip_ids.append(query_clip.id) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids TestQueryClass.file_ids.append(query_file.id) # Insert some transitions into the project data for num in range(5): # Create mask object transition_object = openshot.Mask() transitions_data = json.loads(transition_object.Json()) # Insert into project data query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids TestQueryClass.transition_ids.append(query_transition.id)
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def add_file(self, filepath): """ Add an animation to the project file tree """ path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Get the JSON for the clip's internal reader try: # Open image sequence in FFmpegReader reader = openshot.FFmpegReader(filepath) reader.Open() # Serialize JSON for the reader file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "video" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader reader = clip.Reader() file_data = json.loads(reader.Json()) print("file_data:", file_data) # Determine media type if file_data["has_video"]: file_data["media_type"] = "video" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() # open in timeline added by yanght====== self.timeline.addNewClip(file) return True
def add_file(self, filepath): """ Add an animation to the project file tree """ path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Get the JSON for the clip's internal reader try: # Open image sequence in FFmpegReader reader = openshot.FFmpegReader(filepath) reader.Open() # Serialize JSON for the reader file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "video" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def add_file(self, filepath): filename = os.path.basename(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return True except Exception as ex: # Handle exception log.error('Could not import {}: {}'.format(filename, str(ex))) msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def test_add_file(self): # Find number of files in project num_files = len(File.filter()) # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() self.assertTrue(query_file) self.assertEqual(len(File.filter()), num_files + 1) # Save the file again (which should not change the total # of files) query_file.save() self.assertEqual(len(File.filter()), num_files + 1)
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name (parentPath, folderName) = os.path.split(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader( ).info.video_length # Save file file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def add_files(self, files, image_seq_details=None, quiet=False): # Access translations app = get_app() _ = app._tr # Make sure we're working with a list of files if not isinstance(files, (list, tuple)): files = [files] start_count = len(files) for count, filepath in enumerate(files): (dir_path, filename) = os.path.split(filepath) # Check for this path in our existing project data new_file = File.get(path=filepath) # If this file is already found, exit if new_file: del new_file continue try: # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" else: # If none set, just assume video file_data["media_type"] = "video" # Save new file to the project data new_file = File() new_file.data = file_data # Is this an image sequence / animation? seq_info = image_seq_details or self.get_image_sequence_details(filepath) if seq_info: # Update file with correct path folder_path = seq_info["folder_path"] base_name = seq_info["base_name"] fixlen = seq_info["fixlen"] digits = seq_info["digits"] extension = seq_info["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name folderName = os.path.basename(folder_path) if not base_name: # Give alternate name new_file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details new_file.data["path"] = os.path.join(folder_path, pattern) new_file.data["media_type"] = "video" new_file.data["duration"] = image_seq.Reader().info.duration new_file.data["video_length"] = image_seq.Reader().info.video_length log.info('Imported {} as image sequence {}'.format( filepath, pattern)) # Remove any other image sequence files from the list we're processing match_glob = "{}{}.{}".format(base_name, '[0-9]*', extension) log.debug("Removing files from import list with glob: {}".format(match_glob)) for seq_file in glob.iglob(os.path.join(folder_path, match_glob)): # Don't remove the current file, or we mess up the for loop if seq_file in files and seq_file != filepath: files.remove(seq_file) if not seq_info: # Log our not-an-image-sequence import log.info("Imported media file {}".format(filepath)) # Save file new_file.save() if start_count > 15: message = _("Importing %(count)d / %(total)d") % { "count": count, "total": len(files) - 1 } app.window.statusBar.showMessage(message, 15000) # Let the event loop run to update the status bar get_app().processEvents() prev_path = app.project.get("import_path") if dir_path != prev_path: app.updates.update_untracked(["import_path"], dir_path) except Exception as ex: # Log exception log.warning("Failed to import {}: {}".format(filepath, ex)) if not quiet: # Show message box to user app.window.invalidImage(filename) # Reset list of ignored paths self.ignore_image_sequence_paths = [] message = _("Imported %(count)d files") % {"count": len(files) - 1} app.window.statusBar.showMessage(message, 3000)
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName( "locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = { "number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked } track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName( "file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName( "pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file( clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data[ "has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: # Ignore errors for now pass if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName( "name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float( clip_element.getElementsByTagName("start") [0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float( clip_element.getElementsByTagName("in") [0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float( clip_element.getElementsByTagName("out") [0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName( "effect"): effectid = effect_element.getElementsByTagName( "effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name (parentPath, folderName) = os.path.split(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader().info.video_length # Save file file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = { "openshot-qt": info.VERSION, "libopenshot": v.ToString() } # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image( file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image( file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data[ "has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ( "%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = { "number": track_counter, "y": 0, "label": legacy_t.name } t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys( ): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info( "Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join( info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json()) new_clip["volume"] = {"Points": [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists( trans.resource): trans.resource = os.path.join( info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader( trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint( round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % { "path": file_path, "error": ex }) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception( _("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def setUpClass(cls): """ Init unit test data """ # Create Qt application cls.app = QGuiApplication.instance() cls.clip_ids = [] cls.file_ids = [] cls.transition_ids = [] clips = [] # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) c.Position(num * 10.0) c.End(5.0) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids cls.clip_ids.append(query_clip.id) clips.append(query_clip) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids cls.file_ids.append(query_file.id) # Insert some transitions into the project data for c in clips: # Create mask object t = openshot.Mask() # Place over the last second of current clip pos = c.data.get("position", 0.0) start = c.data.get("start", 0.0) end = c.data.get("end", 0.0) t.Position((pos - start + end) - 1.0) t.End(1.0) # Insert into project data transitions_data = json.loads(t.Json()) query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids cls.transition_ids.append(query_transition.id) # Don't keep the full query objects around del clips
def create_clip(context, track): """Create a new clip based on this context dict""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file( context.get("clip_path", "")) if is_skipped: return # Get video context video_ctx = context.get("AX", {}).get("V", {}) audio_ctx = context.get("AX", {}).get("A", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = context.get("clip_path", "") clip.data["layer"] = track.data.get("number", 1000000) if video_ctx and not audio_ctx: # Only video clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_audio"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable audio }, "interpolation": 2 }] } elif audio_ctx and not video_ctx: # Only audio clip.data["position"] = timecodeToSeconds( audio_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( audio_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( audio_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_video"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable video }, "interpolation": 2 }] } else: # Both video and audio clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) # Add volume keyframes if context.get("volume"): clip.data["volume"] = {"Points": []} for keyframe in context.get("volume", []): clip.data["volume"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Add alpha keyframes if context.get("opacity"): clip.data["alpha"] = {"Points": []} for keyframe in context.get("opacity", []): clip.data["alpha"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Save clip clip.save()
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = {"openshot-qt" : info.VERSION, "libopenshot" : v.ToString()} # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json(), strict=False) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ("%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = {"number": track_counter, "y": 0, "label": legacy_t.name} t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys(): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info("Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json(), strict=False) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json(), strict=False) new_clip["volume"] = { "Points" : [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists(trans.resource): trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader(trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint(round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json(), strict=False), "contrast": json.loads(contrast.Json(), strict=False), "reader": json.loads(transition_reader.Json(), strict=False), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex}) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data