def test_get_File(self): """ Test the File.get method """ file = File.get(id=self.file_ids[1]) self.assertTrue(file) # Do not find a File file = File.get(id="invalidID") self.assertEqual(file, None)
def test_filter_File(self): """ Test the File.filter method """ files = File.filter(id=self.file_ids[0]) self.assertTrue(files) # Do not find a File files = File.filter(id="invalidID") self.assertEqual(len(files), 0)
def test_get_File(self): """ Test the File.get method """ # Import additional classes that need the app defined first from classes.query import File # Find a File named file1 file = File.get(id=TestQueryClass.file_ids[1]) self.assertTrue(file) # Do not find a File file = File.get(id="invalidID") self.assertEqual(file, None)
def test_filter_File(self): """ Test the File.filter method """ # Import additional classes that need the app defined first from classes.query import File # Find all Files named file1 files = File.filter(id=TestQueryClass.file_ids[0]) self.assertTrue(files) # Do not find a File files = File.filter(id="invalidID") self.assertEqual(len(files), 0)
def value_updated(self, item): """ Name or tags updated """ # Get translation method _ = get_app()._tr # Determine what was changed file_id = self.files_model.model.item(item.row(), 5).text() name = self.files_model.model.item(item.row(), 1).text() tags = self.files_model.model.item(item.row(), 2).text() # Get file object and update friendly name and tags attribute f = File.get(id=file_id) if name != f.data["path"]: f.data["name"] = name else: f.data["name"] = "" if "tags" in f.data.keys(): if tags != f.data["tags"]: f.data["tags"] = tags elif tags: f.data["tags"] = tags # Tell file model to ignore updates (since this treeview will already be updated) self.files_model.ignore_update_signal = True # Save File f.save() # Re-enable updates self.files_model.ignore_update_signal = False
def test_update_File(self): """ Test the File.save method """ update_id = self.file_ids[0] file = File.get(id=update_id) self.assertTrue(file) # Update File file.data["height"] = 1080 file.data["width"] = 1920 file.save() # Verify updated data file = File.get(id=update_id) self.assertEqual(file.data["height"], 1080) self.assertEqual(file.data["width"], 1920)
def update_file_thumbnail(self, file_id): """Update/re-generate the thumbnail of a specific file""" file = File.get(id=file_id) path, filename = os.path.split(file.data["path"]) name = file.data.get("name", filename) # Refresh thumbnail for updated file self.ignore_updates = True m = self.model if file_id in self.model_ids: # Look up stored index to ID column id_index = self.model_ids[file_id] if not id_index.isValid(): return # Update thumb for file thumb_path = self.get_thumb_path(file_id, 1, clear_cache=True) thumb_index = id_index.sibling(id_index.row(), 0) item = m.itemFromIndex(thumb_index) item.setIcon(QIcon(thumb_path)) item.setText(name) # Emit signal when model is updated self.ModelRefreshed.emit() self.ignore_updates = False
def contextMenuEvent(self, event): # Update selection self.updateSelection() # Set context menu mode app = get_app() app.context_menu_object = "files" menu = QMenu(self) menu.addAction(self.win.actionImportFiles) menu.addAction(self.win.actionThumbnailView) if self.selected: # If file selected, show file related options menu.addSeparator() # Add edit title option (if svg file) selected_file_id = self.win.selected_files[0] file = File.get(id=selected_file_id) if file and file.data.get("path").endswith(".svg"): menu.addAction(self.win.actionEditTitle) menu.addAction(self.win.actionDuplicateTitle) menu.addSeparator() menu.addAction(self.win.actionPreview_File) menu.addAction(self.win.actionSplitClip) menu.addAction(self.win.actionAdd_to_Timeline) menu.addAction(self.win.actionFile_Properties) menu.addSeparator() menu.addAction(self.win.actionRemove_from_Project) menu.addSeparator() # Show menu menu.exec_(QCursor.pos())
def contextMenuEvent(self, event): # Update selection self.updateSelection() # Set context menu mode app = get_app() app.context_menu_object = "files" menu = QMenu(self) menu.addAction(self.win.actionImportFiles) menu.addAction(self.win.actionDetailsView) if self.selected: # If file selected, show file related options menu.addSeparator() # Add edit title option (if svg file) selected_file_id = self.win.selected_files[0] file = File.get(id=selected_file_id) if file and file.data.get("path").endswith(".svg"): menu.addAction(self.win.actionEditTitle) menu.addAction(self.win.actionDuplicateTitle) menu.addSeparator() menu.addAction(self.win.actionPreview_File) menu.addAction(self.win.actionSplitClip) menu.addAction(self.win.actionAdd_to_Timeline) menu.addAction(self.win.actionFile_Properties) menu.addSeparator() menu.addAction(self.win.actionRemove_from_Project) menu.addSeparator() # Show menu menu.exec_(QCursor.pos())
def test_delete_File(self): """ Test the File.delete method """ delete_id = self.file_ids[4] file = File.get(id=delete_id) self.assertTrue(file) file.delete() # Verify deleted data deleted_file = File.get(id=delete_id) self.assertFalse(deleted_file) # Delete File again (should do nothing) file.delete() deleted_file = File.get(id=delete_id) self.assertFalse(deleted_file)
def rect_select_clicked(self, widget, param): """Rect select button clicked""" self.context[param["setting"]].update({"button-clicked": True}) # show dialog from windows.region import SelectRegion from classes.query import File, Clip c = Clip.get(id=self.clip_id) reader_path = c.data.get('reader', {}).get('path','') f = File.get(path=reader_path) if f: win = SelectRegion(f, self.clip_instance) # Run the dialog event loop - blocking interaction on this window during that time result = win.exec_() if result == QDialog.Accepted: # self.first_frame = win.current_frame # Region selected (get coordinates if any) topLeft = win.videoPreview.regionTopLeftHandle bottomRight = win.videoPreview.regionBottomRightHandle viewPortSize = win.viewport_rect curr_frame_size = win.videoPreview.curr_frame_size x1 = topLeft.x() / curr_frame_size.width() y1 = topLeft.y() / curr_frame_size.height() x2 = bottomRight.x() / curr_frame_size.width() y2 = bottomRight.y() / curr_frame_size.height() # Get QImage of region if win.videoPreview.region_qimage: region_qimage = win.videoPreview.region_qimage # Resize QImage to match button size resized_qimage = region_qimage.scaled(widget.size(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation) # Draw Qimage onto QPushButton (to display region selection to user) palette = widget.palette() palette.setBrush(widget.backgroundRole(), QBrush(resized_qimage)) widget.setFlat(True) widget.setAutoFillBackground(True) widget.setPalette(palette) # Remove button text (so region QImage is more visible) widget.setText("") # If data found, add to context if topLeft and bottomRight: self.context[param["setting"]].update({"normalized_x": x1, "normalized_y": y1, "normalized_width": x2-x1, "normalized_height": y2-y1, "first-frame": win.current_frame, }) log.info(self.context) else: log.error('No file found with path: %s' % reader_path)
def setUpClass(TestQueryClass): """ Init unit test data """ # Create Qt application TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest") TestQueryClass.clip_ids = [] TestQueryClass.file_ids = [] TestQueryClass.transition_ids = [] # Import additional classes that need the app defined first from classes.query import Clip, File, Transition # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids TestQueryClass.clip_ids.append(query_clip.id) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids TestQueryClass.file_ids.append(query_file.id) # Insert some transitions into the project data for num in range(5): # Create mask object transition_object = openshot.Mask() transitions_data = json.loads(transition_object.Json()) # Insert into project data query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids TestQueryClass.transition_ids.append(query_transition.id)
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def test_update_File(self): """ Test the File.save method """ # Import additional classes that need the app defined first from classes.query import File # Find a File named file1 update_id = TestQueryClass.file_ids[0] file = File.get(id=update_id) self.assertTrue(file) # Update File file.data["height"] = 1080 file.data["width"] = 1920 file.save() # Verify updated data # Get File again file = File.get(id=update_id) self.assertEqual(file.data["height"], 1080) self.assertEqual(file.data["width"], 1920)
def add_file(self, filepath): # Add file into project app = get_app() _ = app._tr # Check for this path in our existing project data # ["1F595-1F3FE", # "openshot-qt-git/src/emojis/color/svg/1F595-1F3FE.svg"] file = File.get(path=filepath) # If this file is already found, exit if file: return file # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return file except Exception as ex: # Log exception log.warning("Failed to import file: {}".format(str(ex)))
def add_file(self, filepath): """ Add an animation to the project file tree """ path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Get the JSON for the clip's internal reader try: # Open image sequence in FFmpegReader reader = openshot.FFmpegReader(filepath) reader.Open() # Serialize JSON for the reader file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "video" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def test_delete_File(self): """ Test the File.delete method """ # Import additional classes that need the app defined first from classes.query import File # Find a File named file1 delete_id = TestQueryClass.file_ids[4] file = File.get(id=delete_id) self.assertTrue(file) # Delete File file.delete() # Verify deleted data deleted_file = File.get(id=delete_id) self.assertFalse(deleted_file) # Delete File again (should do nothing file.delete() # Verify deleted data deleted_file = File.get(id=delete_id) self.assertFalse(deleted_file)
def actionPreview_File_trigger(self, event): """ Preview the selected media file """ log.info('actionPreview_File_trigger') if self.selected_files: # Find matching file f = File.get(id=self.selected_files[0]) if f: # Get file path previewPath = f.data["path"] # Load file into player self.preview_thread.LoadFile(previewPath) # Trigger play button self.actionPlay.setChecked(False) self.actionPlay.trigger()
def contextMenuEvent(self, event): # Set context menu mode app = get_app() app.context_menu_object = "files" index = self.indexAt(event.pos()) # Build menu menu = QMenu(self) menu.addAction(self.win.actionImportFiles) menu.addAction(self.win.actionThumbnailView) if index.isValid(): # Look up the model item and our unique ID item = self.files_model.model.itemFromIndex(index) file_id = self.files_model.model.item(item.row(), 5).text() try: # Check whether we know the item is selected i = self.win.selected_files.index(file_id) except ValueError: # Add to our list, if it's not already there self.win.selected_files.append(file_id) # If a valid file is selected, show file related options menu.addSeparator() # Add edit title option (if svg file) file = File.get(id=file_id) if file and file.data.get("path").endswith(".svg"): menu.addAction(self.win.actionEditTitle) menu.addAction(self.win.actionDuplicateTitle) menu.addSeparator() menu.addAction(self.win.actionPreview_File) menu.addAction(self.win.actionSplitClip) menu.addAction(self.win.actionAdd_to_Timeline) menu.addAction(self.win.actionFile_Properties) menu.addSeparator() menu.addAction(self.win.actionRemove_from_Project) menu.addSeparator() # Show menu menu.exec_(event.globalPos())
def actionRemove_from_Project_trigger(self, event): log.info("actionRemove_from_Project_trigger") # Loop through selected files for file_id in self.selected_files: # Find matching file f = File.get(id=file_id) if f: # Remove file f.delete() # Find matching clips (if any) clips = Clip.filter(file_id=file_id) for c in clips: # Remove clip c.delete() # Clear selected files self.selected_files = []
def contextMenuEvent(self, event): event.accept() # Set context menu mode app = get_app() app.context_menu_object = "files" index = self.indexAt(event.pos()) # Build menu menu = QMenu(self) menu.addAction(self.win.actionImportFiles) menu.addAction(self.win.actionDetailsView) if index.isValid(): # Look up the model item and our unique ID model = self.model() # Look up file_id from 5th column of row id_index = index.sibling(index.row(), 5) file_id = model.data(id_index, Qt.DisplayRole) # If a valid file selected, show file related options menu.addSeparator() # Add edit title option (if svg file) file = File.get(id=file_id) if file and file.data.get("path").endswith(".svg"): menu.addAction(self.win.actionEditTitle) menu.addAction(self.win.actionDuplicateTitle) menu.addSeparator() menu.addAction(self.win.actionPreview_File) menu.addAction(self.win.actionSplitClip) menu.addAction(self.win.actionAdd_to_Timeline) menu.addAction(self.win.actionFile_Properties) menu.addSeparator() menu.addAction(self.win.actionRemove_from_Project) menu.addSeparator() # Show menu menu.popup(event.globalPos())
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def value_updated(self, item): """ Name or tags updated """ if self.files_model.ignore_updates: return # Get translation method _ = get_app()._tr # Determine what was changed file_id = self.files_model.model.item(item.row(), 5).text() name = self.files_model.model.item(item.row(), 1).text() tags = self.files_model.model.item(item.row(), 2).text() # Get file object and update friendly name and tags attribute f = File.get(id=file_id) f.data.update({"name": name or os.path.basename(f.data.get("path"))}) if "tags" in f.data or tags: f.data.update({"tags": tags}) # Save File f.save() # Update file thumbnail self.win.FileUpdated.emit(file_id)
def add_file(self, filepath): """ Add an animation to the project file tree """ path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Get the JSON for the clip's internal reader try: # Open image sequence in FFmpegReader reader = openshot.FFmpegReader(filepath) reader.Open() # Serialize JSON for the reader file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "video" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader reader = clip.Reader() file_data = json.loads(reader.Json()) print("file_data:", file_data) # Determine media type if file_data["has_video"]: file_data["media_type"] = "video" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() # open in timeline added by yanght====== self.timeline.addNewClip(file) return True
def add_file(self, filepath): filename = os.path.basename(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return True except Exception as ex: # Handle exception log.error('Could not import {}: {}'.format(filename, str(ex))) msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def test_add_file(self): """ Test the File.save method by adding multiple files """ # Import additional classes that need the app defined first from classes.query import File # Find number of files in project num_files = len(File.filter()) # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(PATH, "images", "openshot.png") query_file.data["media_type"] = "image" query_file.save() self.assertTrue(query_file) self.assertEqual(len(File.filter()), num_files + 1) # Save the file again (which should not change the total # of files) query_file.save() self.assertEqual(len(File.filter()), num_files + 1)
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName( "locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = { "number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked } track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName( "file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName( "pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file( clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data[ "has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: # Ignore errors for now pass if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName( "name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float( clip_element.getElementsByTagName("start") [0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float( clip_element.getElementsByTagName("in") [0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float( clip_element.getElementsByTagName("out") [0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName( "effect"): effectid = effect_element.getElementsByTagName( "effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def currentChanged(self, selected, deselected): # Get selected item self.selected = selected self.deselected = deselected # Get translation object _ = self.app._tr # Clear existing settings self.win.clear_effect_controls() # Get animation details animation = self.get_animation_details() self.selected_template = animation.get("service") # In newer versions of Qt, setting the model invokes the currentChanged signal, # but the selection is -1. So, just do nothing here. if not self.selected_template: return # Assign a new unique id for each template selected self.generateUniqueFolder() # Loop through params for param in animation.get("params", []): log.info(param["title"]) # Is Hidden Param? if param["name"] == "start_frame" or param["name"] == "end_frame": # add value to dictionary self.params[param["name"]] = int(param["default"]) # skip to next param without rendering the controls continue # Create Label widget = None label = QLabel() label.setText(_(param["title"])) label.setToolTip(_(param["title"])) if param["type"] == "spinner": # add value to dictionary self.params[param["name"]] = float(param["default"]) # create spinner widget = QDoubleSpinBox() widget.setMinimum(float(param["min"])) widget.setMaximum(float(param["max"])) widget.setValue(float(param["default"])) widget.setSingleStep(0.01) widget.setToolTip(param["title"]) widget.valueChanged.connect( functools.partial(self.spinner_value_changed, param)) elif param["type"] == "text": # add value to dictionary self.params[param["name"]] = _(param["default"]) # create spinner widget = QLineEdit() widget.setText(_(param["default"])) widget.textChanged.connect( functools.partial(self.text_value_changed, widget, param)) elif param["type"] == "multiline": # add value to dictionary self.params[param["name"]] = _(param["default"]) # create spinner widget = QTextEdit() widget.setText(_(param["default"]).replace("\\n", "\n")) widget.textChanged.connect( functools.partial(self.text_value_changed, widget, param)) elif param["type"] == "dropdown": # add value to dictionary self.params[param["name"]] = param["default"] # create spinner widget = QComboBox() widget.currentIndexChanged.connect( functools.partial(self.dropdown_index_changed, widget, param)) # Add values to dropdown if "project_files" in param["name"]: # override files dropdown param["values"] = {} for file in File.filter(): if file.data["media_type"] in ("image", "video"): (dirName, fileName) = os.path.split(file.data["path"]) (fileBaseName, fileExtension) = os.path.splitext(fileName) if fileExtension.lower() not in (".svg"): param["values"][fileName] = "|".join( (file.data["path"], str(file.data["height"]), str(file.data["width"]), file.data["media_type"], str(file.data["fps"]["num"] / file.data["fps"]["den"]))) # Add normal values box_index = 0 for k, v in sorted(param["values"].items()): # add dropdown item widget.addItem(_(k), v) # select dropdown (if default) if v == param["default"]: widget.setCurrentIndex(box_index) box_index = box_index + 1 if not param["values"]: widget.addItem(_("No Files Found"), "") widget.setEnabled(False) elif param["type"] == "color": # add value to dictionary color = QColor(param["default"]) self.params[param["name"]] = [ color.redF(), color.greenF(), color.blueF() ] widget = QPushButton() widget.setText("") widget.setStyleSheet("background-color: {}".format( param["default"])) widget.clicked.connect( functools.partial(self.color_button_clicked, widget, param)) # Add Label and Widget to the form if (widget and label): self.win.settingsContainer.layout().addRow(label, widget) elif (label): self.win.settingsContainer.layout().addRow(label) # Enable interface self.enable_interface() # Init slider values self.init_slider_values()
def update_model(self, clear=True): log.info("updating files model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Skip updates (if needed) if self.ignore_update_signal: return # Clear all items if clear: self.model_ids = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels( ["", _("Name"), _("Tags"), "", "", ""]) # Get list of files in project files = File.filter() # get all files # add item for each file for file in files: path, filename = os.path.split(file.data["path"]) tags = "" if "tags" in file.data.keys(): tags = file.data["tags"] name = filename if "name" in file.data.keys(): name = file.data["name"] if not win.actionFilesShowAll.isChecked(): if win.actionFilesShowVideo.isChecked(): if not file.data["media_type"] == "video": continue # to next file, didn't match filter elif win.actionFilesShowAudio.isChecked(): if not file.data["media_type"] == "audio": continue # to next file, didn't match filter elif win.actionFilesShowImage.isChecked(): if not file.data["media_type"] == "image": continue # to next file, didn't match filter if win.filesFilter.text() != "": if not win.filesFilter.text().lower() in filename.lower() \ and not win.filesFilter.text().lower() in tags.lower() \ and not win.filesFilter.text().lower() in name.lower(): continue # Generate thumbnail for file (if needed) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Check for start and end attributes (optional) thumbnail_frame = 1 if 'start' in file.data.keys(): fps = file.data["fps"] fps_float = float(fps["num"]) / float(fps["den"]) thumbnail_frame = round( float(file.data['start']) * fps_float) + 1 # Determine thumb path (default value... a guess) thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s-%s.png" % (file.id, thumbnail_frame)) # Connect to thumbnail server and get image thumb_server_details = get_app( ).window.http_server_thread.server_address thumb_address = "http://%s:%s/thumbnails/%s/%s/path/" % ( thumb_server_details[0], thumb_server_details[1], file.id, thumbnail_frame) r = get(thumb_address) if r.ok: # Update thumbnail path to real one thumb_path = r.text else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText(name) col.setToolTip(filename) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) row.append(col) # Append Filename col = QStandardItem("Name") col.setData(filename, Qt.DisplayRole) col.setText(name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Tags col = QStandardItem("Tags") col.setData(tags, Qt.DisplayRole) col.setText(tags) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Media Type col = QStandardItem("Type") col.setData(file.data["media_type"], Qt.DisplayRole) col.setText(file.data["media_type"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ID col = QStandardItem("ID") col.setData(file.data["id"], Qt.DisplayRole) col.setText(file.data["id"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not file.data["id"] in self.model_ids: self.model.appendRow(row) self.model_ids[file.data["id"]] = file.data["id"] # Process events in QT (to keep the interface responsive) app.processEvents() # Refresh view and filters (to hide or show this new item) get_app().window.resize_contents() # Emit signal self.model.ModelRefreshed.emit()
def update_model(self, clear=True): log.info("updating files model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Skip updates (if needed) if self.ignore_update_signal: return # Clear all items if clear: self.model_ids = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels( [_("Thumb"), _("Name"), _("Tags"), "", "", ""]) # Get list of files in project files = File.filter() # get all files # add item for each file for file in files: path, filename = os.path.split(file.data["path"]) tags = "" if "tags" in file.data.keys(): tags = file.data["tags"] name = filename if "name" in file.data.keys(): name = file.data["name"] if not win.actionFilesShowAll.isChecked(): if win.actionFilesShowVideo.isChecked(): if not file.data["media_type"] == "video": continue # to next file, didn't match filter elif win.actionFilesShowAudio.isChecked(): if not file.data["media_type"] == "audio": continue # to next file, didn't match filter elif win.actionFilesShowImage.isChecked(): if not file.data["media_type"] == "image": continue # to next file, didn't match filter if win.filesFilter.text() != "": if not win.filesFilter.text().lower() in filename.lower() \ and not win.filesFilter.text().lower() in tags.lower() \ and not win.filesFilter.text().lower() in name.lower(): continue # Generate thumbnail for file (if needed) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "{}.png".format(file.id)) # Check if thumb exists if not os.path.exists(thumb_path): try: # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Reload this reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Determine if video overlay should be applied to thumbnail overlay_path = "" if file.data["media_type"] == "video": overlay_path = os.path.join( info.IMAGES_PATH, "overlay.png") # Check for start and end attributes (optional) thumbnail_frame = 1 if 'start' in file.data.keys(): fps = file.data["fps"] fps_float = float(fps["num"]) / float(fps["den"]) thumbnail_frame = round( float(file.data['start']) * fps_float) + 1 # Save thumbnail reader.GetFrame(thumbnail_frame).Thumbnail( thumb_path, 98, 64, os.path.join(info.IMAGES_PATH, "mask.png"), overlay_path, "#000", False) reader.Close() clip.Close() except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.". format(filename))) msg.exec_() continue else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText((name[:9] + '...') if len(name) > 10 else name) col.setToolTip(filename) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) row.append(col) # Append Filename col = QStandardItem("Name") col.setData(filename, Qt.DisplayRole) col.setText((name[:20] + '...') if len(name) > 15 else name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Tags col = QStandardItem("Tags") col.setData(tags, Qt.DisplayRole) col.setText(tags) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Media Type col = QStandardItem("Type") col.setData(file.data["media_type"], Qt.DisplayRole) col.setText(file.data["media_type"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ID col = QStandardItem("ID") col.setData(file.data["id"], Qt.DisplayRole) col.setText(file.data["id"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not file.data["id"] in self.model_ids: self.model.appendRow(row) self.model_ids[file.data["id"]] = file.data["id"] # Process events in QT (to keep the interface responsive) app.processEvents() # Refresh view and filters (to hide or show this new item) get_app().window.resize_contents()
def create_clip(context, track): """Create a new clip based on this context dict""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file( context.get("clip_path", "")) if is_skipped: return # Get video context video_ctx = context.get("AX", {}).get("V", {}) audio_ctx = context.get("AX", {}).get("A", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = context.get("clip_path", "") clip.data["layer"] = track.data.get("number", 1000000) if video_ctx and not audio_ctx: # Only video clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_audio"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable audio }, "interpolation": 2 }] } elif audio_ctx and not video_ctx: # Only audio clip.data["position"] = timecodeToSeconds( audio_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( audio_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( audio_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_video"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable video }, "interpolation": 2 }] } else: # Both video and audio clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) # Add volume keyframes if context.get("volume"): clip.data["volume"] = {"Points": []} for keyframe in context.get("volume", []): clip.data["volume"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Add alpha keyframes if context.get("opacity"): clip.data["alpha"] = {"Points": []} for keyframe in context.get("opacity", []): clip.data["alpha"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Save clip clip.save()
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = { "openshot-qt": info.VERSION, "libopenshot": v.ToString() } # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image( file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image( file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data[ "has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ( "%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = { "number": track_counter, "y": 0, "label": legacy_t.name } t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys( ): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info( "Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join( info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json()) new_clip["volume"] = {"Points": [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists( trans.resource): trans.resource = os.path.join( info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader( trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint( round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % { "path": file_path, "error": ex }) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception( _("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def currentChanged(self, selected, deselected): # Get selected item self.selected = selected self.deselected = deselected # Get translation object _ = self.app._tr # Clear existing settings self.win.clear_effect_controls() # Get animation details animation = self.get_animation_details() self.selected_template = animation["service"] # Assign a new unique id for each template selected self.generateUniqueFolder() # Loop through params for param in animation["params"]: log.info(param["title"]) # Is Hidden Param? if param["name"] == "start_frame" or param["name"] == "end_frame": # add value to dictionary self.params[param["name"]] = int(param["default"]) # skip to next param without rendering the controls continue # Create Label widget = None label = QLabel() label.setText(_(param["title"])) label.setToolTip(_(param["title"])) if param["type"] == "spinner": # add value to dictionary self.params[param["name"]] = float(param["default"]) # create spinner widget = QDoubleSpinBox() widget.setMinimum(float(param["min"])) widget.setMaximum(float(param["max"])) widget.setValue(float(param["default"])) widget.setSingleStep(0.01) widget.setToolTip(param["title"]) widget.valueChanged.connect(functools.partial(self.spinner_value_changed, param)) elif param["type"] == "text": # add value to dictionary self.params[param["name"]] = _(param["default"]) # create spinner widget = QLineEdit() widget.setText(_(param["default"])) widget.textChanged.connect(functools.partial(self.text_value_changed, widget, param)) elif param["type"] == "multiline": # add value to dictionary self.params[param["name"]] = _(param["default"]) # create spinner widget = QTextEdit() widget.setText(_(param["default"])) widget.textChanged.connect(functools.partial(self.text_value_changed, widget, param)) elif param["type"] == "dropdown": # add value to dictionary self.params[param["name"]] = param["default"] # create spinner widget = QComboBox() widget.currentIndexChanged.connect(functools.partial(self.dropdown_index_changed, widget, param)) # Add values to dropdown if "project_files" in param["name"]: # override files dropdown param["values"] = {} for file in File.filter(): if file.data["media_type"] in ("image", "video"): (dirName, fileName) = os.path.split(file.data["path"]) (fileBaseName, fileExtension) = os.path.splitext(fileName) if fileExtension.lower() not in (".svg"): param["values"][fileName] = "|".join((file.data["path"], str(file.data["height"]), str(file.data["width"]), file.data["media_type"], str(file.data["fps"]["num"] / file.data["fps"][ "den"]))) # Add normal values box_index = 0 for k, v in sorted(param["values"].items()): # add dropdown item widget.addItem(_(k), v) # select dropdown (if default) if v == param["default"]: widget.setCurrentIndex(box_index) box_index = box_index + 1 if not param["values"]: widget.addItem(_("No Files Found"), "") widget.setEnabled(False) elif param["type"] == "color": # add value to dictionary color = QColor(param["default"]) self.params[param["name"]] = [color.redF(), color.greenF(), color.blueF()] widget = QPushButton() widget.setText("") widget.setStyleSheet("background-color: {}".format(param["default"])) widget.clicked.connect(functools.partial(self.color_button_clicked, widget, param)) # Add Label and Widget to the form if (widget and label): self.win.settingsContainer.layout().addRow(label, widget) elif (label): self.win.settingsContainer.layout().addRow(label) # Enable interface self.enable_interface() # Init slider values self.init_slider_values()
def accept(self): """ Start exporting video """ # get translations app = get_app() _ = app._tr # Disable controls self.txtFileName.setEnabled(False) self.txtExportFolder.setEnabled(False) self.tabWidget.setEnabled(False) self.export_button.setEnabled(False) self.exporting = True # Determine type of export (video+audio, video, audio, image sequences) # _("Video & Audio"), _("Video Only"), _("Audio Only"), _("Image Sequence") export_type = self.cboExportTo.currentText() # Determine final exported file path if export_type != _("Image Sequence"): file_name_with_ext = "%s.%s" % (self.txtFileName.text().strip(), self.txtVideoFormat.text().strip()) else: file_name_with_ext = "%s%s" % (self.txtFileName.text().strip(), self.txtImageFormat.text().strip()) export_file_path = os.path.join(self.txtExportFolder.text().strip(), file_name_with_ext) log.info(export_file_path) # Translate object _ = get_app()._tr file = File.get(path=export_file_path) if file: ret = QMessageBox.question( self, _("Export Video"), _("%s is an input file.\nPlease choose a different name.") % file_name_with_ext, QMessageBox.Ok) self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Handle exception if os.path.exists(export_file_path) and export_type in [ _("Video & Audio"), _("Video Only"), _("Audio Only") ]: # File already exists! Prompt user ret = QMessageBox.question( self, _("Export Video"), _("%s already exists.\nDo you want to replace it?") % file_name_with_ext, QMessageBox.No | QMessageBox.Yes) if ret == QMessageBox.No: # Stop and don't do anything # Re-enable controls self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Init export settings video_settings = { "vformat": self.txtVideoFormat.text(), "vcodec": self.txtVideoCodec.text(), "fps": { "num": self.txtFrameRateNum.value(), "den": self.txtFrameRateDen.value() }, "width": self.txtWidth.value(), "height": self.txtHeight.value(), "pixel_ratio": { "num": self.txtPixelRatioNum.value(), "den": self.txtPixelRatioDen.value() }, "video_bitrate": int(self.convert_to_bytes(self.txtVideoBitRate.text())), "start_frame": self.txtStartFrame.value(), "end_frame": self.txtEndFrame.value() + 1 } audio_settings = { "acodec": self.txtAudioCodec.text(), "sample_rate": self.txtSampleRate.value(), "channels": self.txtChannels.value(), "channel_layout": self.cboChannelLayout.currentData(), "audio_bitrate": int(self.convert_to_bytes(self.txtAudioBitrate.text())) } # Override vcodec and format for Image Sequences if export_type == _("Image Sequence"): image_ext = os.path.splitext( self.txtImageFormat.text().strip())[1].replace(".", "") video_settings["vformat"] = image_ext if image_ext in ["jpg", "jpeg"]: video_settings["vcodec"] = "mjpeg" else: video_settings["vcodec"] = image_ext # Set MaxSize (so we don't have any downsampling) self.timeline.SetMaxSize(video_settings.get("width"), video_settings.get("height")) # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(250) self.timeline.SetCache(export_cache_object) # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options if export_type in [ _("Video & Audio"), _("Video Only"), _("Image Sequence") ]: w.SetVideoOptions( True, video_settings.get("vcodec"), openshot.Fraction( video_settings.get("fps").get("num"), video_settings.get("fps").get("den")), video_settings.get("width"), video_settings.get("height"), openshot.Fraction( video_settings.get("pixel_ratio").get("num"), video_settings.get("pixel_ratio").get("den")), False, False, video_settings.get("video_bitrate")) # Set audio options if export_type in [_("Video & Audio"), _("Audio Only")]: w.SetAudioOptions(True, audio_settings.get("acodec"), audio_settings.get("sample_rate"), audio_settings.get("channels"), audio_settings.get("channel_layout"), audio_settings.get("audio_bitrate")) # Open the writer w.Open() # Notify window of export started get_app().window.ExportStarted.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame")) progressstep = max( 1, round((video_settings.get("end_frame") - video_settings.get("start_frame")) / 100)) # Write each frame in the selected range for frame in range(video_settings.get("start_frame"), video_settings.get("end_frame")): # Update progress bar (emit signal to main window) if (frame % progressstep) == 0: get_app().window.ExportFrame.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break # Close writer w.Close() except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (audio_settings.get("channels"))) track_metric_error("invalid-channels-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("channels"))) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (audio_settings.get("sample_rate"))) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("sample_rate"))) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (video_settings.get("vformat"))) track_metric_error("invalid-format-%s" % (video_settings.get("vformat"))) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("invalid-codec-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("video-encode-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() _ = get_app()._tr msg.setWindowTitle(_("Export Error")) msg.setText( _("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Notify window of export started get_app().window.ExportEnded.emit(export_file_path) # Close timeline object self.timeline.Close() # Clear all cache self.timeline.ClearAllCache() # Accept dialog super(Export, self).accept()
def update_model(self, clear=True): log.info("updating files model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Skip updates (if needed) if self.ignore_update_signal: return # Clear all items if clear: self.model_ids = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels([_("Thumb"), _("Name"), _("Tags"), "", "", ""]) # Get list of files in project files = File.filter() # get all files # add item for each file for file in files: path, filename = os.path.split(file.data["path"]) tags = "" if "tags" in file.data.keys(): tags = file.data["tags"] name = filename if "name" in file.data.keys(): name = file.data["name"] if not win.actionFilesShowAll.isChecked(): if win.actionFilesShowVideo.isChecked(): if not file.data["media_type"] == "video": continue # to next file, didn't match filter elif win.actionFilesShowAudio.isChecked(): if not file.data["media_type"] == "audio": continue # to next file, didn't match filter elif win.actionFilesShowImage.isChecked(): if not file.data["media_type"] == "image": continue # to next file, didn't match filter if win.filesFilter.text() != "": if not win.filesFilter.text().lower() in filename.lower() \ and not win.filesFilter.text().lower() in tags.lower() \ and not win.filesFilter.text().lower() in name.lower(): continue # Generate thumbnail for file (if needed) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "{}.png".format(file.id)) # Check if thumb exists if not os.path.exists(thumb_path): try: # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Reload this reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Determine if video overlay should be applied to thumbnail overlay_path = "" if file.data["media_type"] == "video": overlay_path = os.path.join(info.IMAGES_PATH, "overlay.png") # Check for start and end attributes (optional) thumbnail_frame = 1 if 'start' in file.data.keys(): fps = file.data["fps"] fps_float = float(fps["num"]) / float(fps["den"]) thumbnail_frame = round(float(file.data['start']) * fps_float) + 1 # Save thumbnail reader.GetFrame(thumbnail_frame).Thumbnail(thumb_path, 98, 64, os.path.join(info.IMAGES_PATH, "mask.png"), overlay_path, "#000", False) reader.Close() clip.Close() except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() continue else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText((name[:9] + '...') if len(name) > 10 else name) col.setToolTip(filename) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled) row.append(col) # Append Filename col = QStandardItem("Name") col.setData(filename, Qt.DisplayRole) col.setText((name[:20] + '...') if len(name) > 15 else name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Tags col = QStandardItem("Tags") col.setData(tags, Qt.DisplayRole) col.setText(tags) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Media Type col = QStandardItem("Type") col.setData(file.data["media_type"], Qt.DisplayRole) col.setText(file.data["media_type"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsDragEnabled | Qt.ItemIsEditable) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ID col = QStandardItem("ID") col.setData(file.data["id"], Qt.DisplayRole) col.setText(file.data["id"]) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not file.data["id"] in self.model_ids: self.model.appendRow(row) self.model_ids[file.data["id"]] = file.data["id"] # Process events in QT (to keep the interface responsive) app.processEvents() # Refresh view and filters (to hide or show this new item) get_app().window.resize_contents()
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name (parentPath, folderName) = os.path.split(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader( ).info.video_length # Save file file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name (parentPath, folderName) = os.path.split(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader().info.video_length # Save file file.save() return True except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = {"openshot-qt" : info.VERSION, "libopenshot" : v.ToString()} # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json(), strict=False) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ("%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = {"number": track_counter, "y": 0, "label": legacy_t.name} t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys(): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info("Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json(), strict=False) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json(), strict=False) new_clip["volume"] = { "Points" : [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists(trans.resource): trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader(trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint(round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json(), strict=False), "contrast": json.loads(contrast.Json(), strict=False), "reader": json.loads(transition_reader.Json(), strict=False), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex}) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data