def test_add_clip(self): """ Test the Clip.save method by adding multiple clips """ # Import additional classes that need the app defined first from classes.query import Clip # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def GenerateThumbnail(file_path, thumb_path, thumbnail_frame, width, height, mask, overlay): """Create thumbnail image, and check for rotate metadata (if any)""" # Create a clip object and get the reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Get the 'rotate' metadata (if any) rotate = 0.0 try: if reader.info.metadata.count("rotate"): rotate = float(reader.info.metadata.find("rotate").value()[1]) except Exception: pass # Create thumbnail folder (if needed) parent_path = os.path.dirname(thumb_path) if not os.path.exists(parent_path): os.mkdir(parent_path) # Save thumbnail image and close readers reader.GetFrame(thumbnail_frame).Thumbnail(thumb_path, width, height, mask, overlay, "#000", False, "png", 85, rotate) reader.Close() clip.Close()
def __init__(self, file_name, clip_id, is_video=None): # otherwhise there is a json parse error locale.setlocale(locale.LC_NUMERIC, 'en_US.utf8') self.clip = openshot.Clip(file_name) self.clip.Id(clip_id) self.is_video = is_video if self.is_video is not None: if self.is_video: self.clip.has_video = openshot.Keyframe(1) self.clip.has_audio = openshot.Keyframe(0) else: self.clip.has_video = openshot.Keyframe(0) self.clip.has_audio = openshot.Keyframe(1) self.file_name = file_name self.file_type = get_file_type(self.file_name) self.timeline_instance = TimelineModel.get_instance() # if the timeline has no clips, set some timeline data to the data of this clip if self.is_first_vid(): self.set_timeline_data()
def add_file(self, filepath): # Add file into project app = get_app() _ = app._tr # Check for this path in our existing project data # ["1F595-1F3FE", # "openshot-qt-git/src/emojis/color/svg/1F595-1F3FE.svg"] file = File.get(path=filepath) # If this file is already found, exit if file: return file # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return file except Exception as ex: # Log exception log.warning("Failed to import file: {}".format(str(ex)))
def CutsToClips(cuts): #app = get_app() clips = [] position = 0 video_length = 0 print(cuts) for cut in cuts: c = Clip.filter(id=cut["clip"]) #print("-------c:",c[0].data["position"], c[0].data) path = c[0].data["reader"]["path"] offset = float(c[0].data["position"]) start = float(cut["start"]) - offset end = float(cut["end"]) - offset print("=======================-------start:", start, "end:", end, "position", position, path) try: clip = openshot.Clip(path) clip.Start(start) clip.End(end) clip.Position(position) clips.append(clip) except: log.error('Failed to load media file into preview player: %s' % path) return clips, video_length position = position + (end - start) - offset video_length = video_length + cut["video_length"] return clips, video_length
def run(self): # $ dpkg -L openshot-qt | grep test # /usr/lib/python3/dist-packages/openshot_qt/tests/query_tests.py # Create clip # cp /usr/lib/python3/dist-packages/openshot_qt/images/AboutLogo.png asset/img/AboutLogo.png #clip = openshot.Clip(os.path.join(THE_IMG_DIR_PATH, 'AboutLogo.png')) clip = openshot.Clip( '/usr/share/backgrounds/Spices_in_Athens_by_Makis_Chourdakis.jpg') clip.Position(0) clip.Start(0) clip.End(5) # Parse JSON clip_data = json.loads(clip.Json()) #print(clip_data) pp = pprint.PrettyPrinter(indent=2) pp.pprint(clip_data) print() print( '================================================================================' ) print() self.timeline.AddClip(clip) print(self.timeline.Json())
def display_svg(self): # Create a temp file for this thumbnail image new_file, tmp_filename = tempfile.mkstemp() tmp_filename = "%s.png" % tmp_filename # Create a clip object and get the reader clip = openshot.Clip(self.filename) reader = clip.Reader() # Open reader reader.Open() # Save thumbnail image and close readers reader.GetFrame(1).Thumbnail(tmp_filename, self.graphicsView.width(), self.graphicsView.height(), "", "", "#000", False, "png", 85, 0.0) reader.Close() clip.Close() # Display temp image scene = QGraphicsScene(self) view = self.graphicsView svg = QtGui.QPixmap(tmp_filename) svg_scaled = svg.scaled(self.graphicsView.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) scene.addPixmap(svg_scaled) view.setScene(scene) view.show()
def setUpClass(TestQueryClass): """ Init unit test data """ # Create Qt application TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest") TestQueryClass.clip_ids = [] TestQueryClass.file_ids = [] TestQueryClass.transition_ids = [] # Import additional classes that need the app defined first from classes.query import Clip, File, Transition # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids TestQueryClass.clip_ids.append(query_clip.id) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids TestQueryClass.file_ids.append(query_file.id) # Insert some transitions into the project data for num in range(5): # Create mask object transition_object = openshot.Mask() transitions_data = json.loads(transition_object.Json()) # Insert into project data query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids TestQueryClass.transition_ids.append(query_transition.id)
def get_audio_data(clip_id, file_path, channel_filter, volume_keyframe): """Get a Clip object form libopenshot, and grab audio data""" clip = openshot.Clip(file_path) clip.Open() # Disable video stream (for speed improvement) clip.Reader().info.has_video = False log.info("Clip loaded, start thread") t = threading.Thread(target=get_waveform_thread, args=[clip, clip_id, file_path, channel_filter, volume_keyframe]) t.daemon = True t.start()
def testHardwareDecode(self, decoder, decoder_card="0"): """Test specific settings for hardware decode, so the UI can remove unsupported options.""" is_supported = False example_media = os.path.join(info.RESOURCES_PATH, "hardware-example.mp4") # Persist decoder card results if decoder_card not in self.hardware_tests_cards: # Init new decoder card list self.hardware_tests_cards[decoder_card] = [] if int(decoder) in self.hardware_tests_cards.get(decoder_card): # Test already run and succeeded return True # Keep track of previous settings current_decoder = openshot.Settings.Instance().HARDWARE_DECODER current_decoder_card = openshot.Settings.Instance().HW_DE_DEVICE_SET try: # Temp override hardware settings (to test them) openshot.Settings.Instance().HARDWARE_DECODER = int(decoder) openshot.Settings.Instance().HW_DE_DEVICE_SET = int(decoder_card) # Find reader clip = openshot.Clip(example_media) reader = clip.Reader() # Open reader reader.Open() # Test decoded pixel values for a valid decode (based on hardware-example.mp4) if reader.GetFrame(0).CheckPixel(0, 0, 2, 133, 255, 255, 5): is_supported = True self.hardware_tests_cards[decoder_card].append(int(decoder)) else: log.warning( "CheckPixel failed testing hardware decoding in preferences (i.e. wrong color found): %s-%s" % (decoder, decoder_card)) reader.Close() clip.Close() except: log.warning( "Exception trying to test hardware decoding in preferences (this is expected): %s-%s" % (decoder, decoder_card)) # Resume current settings openshot.Settings.Instance().HARDWARE_DECODER = current_decoder openshot.Settings.Instance().HW_DE_DEVICE_SET = current_decoder_card return is_supported
def create_clip_2(self): # $ dpkg -L openshot-qt | grep test # /usr/lib/python3/dist-packages/openshot_qt/tests/query_tests.py # Create clip # cp /usr/lib/python3/dist-packages/openshot_qt/images/AboutLogo.png asset/img/AboutLogo.png #clip = openshot.Clip(os.path.join(THE_IMG_DIR_PATH, 'openshot.png')) self.clips[1] = openshot.Clip( '/usr/share/backgrounds/Manhattan_Sunset_by_Giacomo_Ferroni.jpg') self.clips[1].Position(5.0) self.clips[1].Start(0) self.clips[1].End(5) return self.clips[1]
def create_clip_1(self): # $ dpkg -L openshot-qt | grep test # /usr/lib/python3/dist-packages/openshot_qt/tests/query_tests.py # Create clip # cp /usr/lib/python3/dist-packages/openshot_qt/images/AboutLogo.png asset/img/AboutLogo.png #clip = openshot.Clip(os.path.join(THE_IMG_DIR_PATH, 'AboutLogo.png')) self.clips[0] = openshot.Clip( '/usr/share/backgrounds/Spices_in_Athens_by_Makis_Chourdakis.jpg') self.clips[0].Position(0.0) self.clips[0].Start(0) self.clips[0].End(5) return self.clips[0]
def main(): # $ dpkg -L openshot-qt | grep test # /usr/lib/python3/dist-packages/openshot_qt/tests/query_tests.py # Create clip # cp /usr/lib/python3/dist-packages/openshot_qt/images/AboutLogo.png asset/img/AboutLogo.png #clip = openshot.Clip(os.path.join(THE_IMG_DIR_PATH, 'AboutLogo.png')) clip = openshot.Clip('/usr/share/backgrounds/Spices_in_Athens_by_Makis_Chourdakis.jpg') # Parse JSON clip_data = json.loads(clip.Json()) #print(clip_data) pp = pprint.PrettyPrinter(indent=2); pp.pprint(clip_data)
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
def test_add_clip(self): # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def get_width_from_file(path): t = get_file_type(path) width = 0 if t == FileType.VIDEO_FILE: v = cv2.VideoCapture(path) v.set(cv2.CAP_PROP_POS_AVI_RATIO, 1) d = v.get(cv2.CAP_PROP_POS_MSEC) width = seconds_to_pos(d / 1000) elif t == FileType.AUDIO_FILE: c = openshot.Clip(path) d = c.Duration() width = seconds_to_pos(d) elif t == FileType.IMAGE_FILE: width = get_px_per_second() return width
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader reader = clip.Reader() file_data = json.loads(reader.Json()) print("file_data:", file_data) # Determine media type if file_data["has_video"]: file_data["media_type"] = "video" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() # open in timeline added by yanght====== self.timeline.addNewClip(file) return True
def GenerateThumbnail(file_path, thumb_path, thumbnail_frame, width, height, mask, overlay): """Create thumbnail image, and check for rotate metadata (if any)""" # Craete a clip object and get the reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Get the 'rotate' metadata (if any) rotate = 0.0 try: if reader.info.metadata.count("rotate"): rotate = float(reader.info.metadata.find("rotate").value()[1]) except: pass # Save thumbnail image and close readers reader.GetFrame(thumbnail_frame).Thumbnail(thumb_path, width, height, mask, overlay, "#000", False, "png", 100, rotate) reader.Close() clip.Close()
def add_file(self, filepath): filename = os.path.basename(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Set media type file_data["media_type"] = "image" # Save new file to the project data file = File() file.data = file_data file.save() return True except Exception as ex: # Handle exception log.error('Could not import {}: {}'.format(filename, str(ex))) msg = QMessageBox() msg.setText(_("{} is not a valid video, audio, or image file.".format(filename))) msg.exec_() return False
def display_svg(self): # Create a temp file for this thumbnail image new_file, tmp_filename = tempfile.mkstemp(suffix=".png") os.close(new_file) # Create a clip object and get the reader clip = openshot.Clip(self.filename) reader = clip.Reader() # Open reader reader.Open() # Overwrite temp file with thumbnail image and close readers reader.GetFrame(1).Thumbnail( tmp_filename, self.graphicsView.width(), self.graphicsView.height(), "", "", "#000", False, "png", 85, 0.0) reader.Close() clip.Close() # Attempt to load saved thumbnail svg = QtGui.QPixmap() if not svg.load(tmp_filename): log.error("Couldn't load title preview from {}".format(tmp_filename)) return # Display temp image scene = QGraphicsScene(self) view = self.graphicsView svg_scaled = svg.scaled(self.graphicsView.size(), Qt.KeepAspectRatio, Qt.SmoothTransformation) scene.addPixmap(svg_scaled) view.setScene(scene) view.show() # Remove temporary file os.unlink(tmp_filename)
def GenerateThumbnail(file_path, thumb_path, thumbnail_frame, width, height, mask, overlay): """Create thumbnail image, and check for rotate metadata (if any)""" # Create a clip object and get the reader clip = openshot.Clip(file_path) reader = clip.Reader() # Open reader reader.Open() # Get the 'rotate' metadata (if any) rotate = 0.0 try: if reader.info.metadata.count("rotate"): rotate_data = reader.info.metadata.find("rotate").value()[1] rotate = float(rotate_data) except ValueError as ex: log.warning("Could not parse rotation value {}: {}".format( rotate_data, ex)) except Exception: log.warning( "Error reading rotation metadata from {}".format(file_path), exc_info=1) # Create thumbnail folder (if needed) parent_path = os.path.dirname(thumb_path) if not os.path.exists(parent_path): os.mkdir(parent_path) # Save thumbnail image and close readers reader.GetFrame(thumbnail_frame).Thumbnail(thumb_path, width, height, mask, overlay, "#000", False, "png", 85, rotate) reader.Close() clip.Close()
def __init__(self, cuts_json, clips_json, preview=False): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") # If preview, hide cutting controls if preview: self.lblInstructions.setVisible(False) self.widgetControls.setVisible(False) self.setWindowTitle(_("Preview")) self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None project = get_app().project # Keep track of file object #self.file = file self.file_path = file.absolute_path() self.video_length = int(file.data['video_length']) self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.width, self.height, openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout try: # Add clip for current preview file self.clip = openshot.Clip(self.file_path) # Show waveform for audio files if not self.clip.Reader().info.has_video and self.clip.Reader( ).info.has_audio: self.clip.Waveform(True) # Set has_audio property self.r.info.has_audio = self.clip.Reader().info.has_audio if preview: # Display frame #'s during preview self.clip.display = openshot.FRAME_DISPLAY_CLIP self.r.AddClip(self.clip) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set max size of video preview (for speed) viewport_rect = self.videoPreview.centeredViewport( self.videoPreview.width(), self.videoPreview.height()) self.r.SetMaxSize(viewport_rect.width(), viewport_rect.height()) # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 if 'start' in self.file.data.keys(): start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.btnStart.clicked.connect(self.btnStart_clicked) self.btnEnd.clicked.connect(self.btnEnd_clicked) self.btnClear.clicked.connect(self.btnClear_clicked) self.btnAddClip.clicked.connect(self.btnAddClip_clicked) self.initialized = True
def add_file(self, filepath): path, filename = os.path.split(filepath) # Add file into project app = get_app() _ = get_app()._tr # Check for this path in our existing project data file = File.get(path=filepath) # If this file is already found, exit if file: return # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip = openshot.Clip(filepath) # Get the JSON for the clip's internal reader try: reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Is this file an image sequence / animation? image_seq_details = self.get_image_sequence_details(filepath) if image_seq_details: # Update file with correct path folder_path = image_seq_details["folder_path"] file_name = image_seq_details["file_path"] base_name = image_seq_details["base_name"] fixlen = image_seq_details["fixlen"] digits = image_seq_details["digits"] extension = image_seq_details["extension"] if not fixlen: zero_pattern = "%d" else: zero_pattern = "%%0%sd" % digits # Generate the regex pattern for this image sequence pattern = "%s%s.%s" % (base_name, zero_pattern, extension) # Split folder name (parentPath, folderName) = os.path.split(folder_path) if not base_name: # Give alternate name file.data["name"] = "%s (%s)" % (folderName, pattern) # Load image sequence (to determine duration and video_length) image_seq = openshot.Clip(os.path.join(folder_path, pattern)) # Update file details file.data["path"] = os.path.join(folder_path, pattern) file.data["media_type"] = "video" file.data["duration"] = image_seq.Reader().info.duration file.data["video_length"] = image_seq.Reader( ).info.video_length # Save file file.save() return True except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid video, audio, or image file.".format( filename))) msg.exec_() return False
effects_text[xmldoc.getElementsByTagName("title") [0].childNodes[0].data] = full_file_path effects_text[xmldoc.getElementsByTagName("description") [0].childNodes[0].data] = full_file_path # get params params = xmldoc.getElementsByTagName("param") # Loop through params for param in params: if param.attributes["title"]: effects_text[param.attributes["title"].value] = full_file_path # Append on properties from libopenshot objects = [ openshot.Clip(), openshot.Blur(), openshot.Brightness(), openshot.ChromaKey(), openshot.Deinterlace(), openshot.Mask(), openshot.Negate(), openshot.Saturation() ] # Loop through each libopenshot object for object in objects: props = json.loads(object.PropertiesJSON(1)) # Loop through props for key in props.keys():
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName( "locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = { "number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked } track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName( "file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName( "pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file( clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data[ "has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: # Ignore errors for now pass if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName( "name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float( clip_element.getElementsByTagName("start") [0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float( clip_element.getElementsByTagName("in") [0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float( clip_element.getElementsByTagName("out") [0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName( "effect"): effectid = effect_element.getElementsByTagName( "effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def update_model(self, clear=True): log.info("updating title model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Clear all items if clear: self.model_paths = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels([_("Thumb"), _("Name")]) # get a list of files in the OpenShot /transitions directory titles_dir = os.path.join(info.PATH, "titles") # Add build-in templates titles_list = [] for filename in sorted(os.listdir(titles_dir)): titles_list.append(os.path.join(titles_dir, filename)) # Add user-defined titles (if any) for file in sorted(os.listdir(info.TITLE_PATH)): # pretty up the filename for display purposes if fnmatch.fnmatch(file, '*.svg'): titles_list.append(os.path.join(info.TITLE_PATH, file)) for path in sorted(titles_list): (parent_path, filename) = os.path.split(path) (fileBaseName, fileExtension) = os.path.splitext(filename) # Skip hidden files (such as .DS_Store, etc...) if filename[0] == "." or "thumbs.db" in filename.lower( ) or filename.lower() == "temp.svg": continue # split the name into parts (looking for a number) suffix_number = None name_parts = fileBaseName.split("_") if name_parts[-1].isdigit(): suffix_number = name_parts[-1] # get name of transition title_name = fileBaseName.replace("_", " ").capitalize() # replace suffix number with placeholder (if any) if suffix_number: title_name = title_name.replace(suffix_number, "%s") title_name = self.app._tr(title_name) % suffix_number else: title_name = self.app._tr(title_name) # Check for thumbnail path (in build-in cache) thumb_path = os.path.join(info.IMAGES_PATH, "cache", "{}.png".format(fileBaseName)) # Check built-in cache (if not found) if not os.path.exists(thumb_path): # Check user folder cache thumb_path = os.path.join(info.CACHE_PATH, "{}.png".format(fileBaseName)) # Generate thumbnail (if needed) if not os.path.exists(thumb_path): try: # Reload this reader clip = openshot.Clip(path) reader = clip.Reader() # Open reader reader.Open() # Save thumbnail reader.GetFrame(0).Thumbnail( thumb_path, 98, 64, os.path.join(info.IMAGES_PATH, "mask.png"), "", "#000", True) reader.Close() clip.Close() except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid image file.".format(filename))) msg.exec_() continue row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText(title_name) col.setToolTip(title_name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append Filename col = QStandardItem("Name") col.setData(title_name, Qt.DisplayRole) col.setText(title_name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not path in self.model_paths: self.model.appendRow(row) self.model_paths[path] = path # Process events in QT (to keep the interface responsive) app.processEvents()
def update_model(self, clear=True): log.info("updating effects model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Clear all items if clear: self.model_paths = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels([_("Thumb"), _("Name")]) # get a list of files in the OpenShot /effects directory effects_dir = os.path.join(info.PATH, "blender") icons_dir = os.path.join(effects_dir, "icons") for file in os.listdir(effects_dir): if os.path.isfile(os.path.join(effects_dir, file)) and ".xml" in file: # Split path path = os.path.join(effects_dir, file) (fileBaseName, fileExtension) = os.path.splitext(path) # load xml effect file xmldoc = xml.parse(path) # Get all attributes title = xmldoc.getElementsByTagName("title")[0].childNodes[0].data description = xmldoc.getElementsByTagName("description")[0].childNodes[0].data icon_name = xmldoc.getElementsByTagName("icon")[0].childNodes[0].data icon_path = os.path.join(icons_dir, icon_name) category = xmldoc.getElementsByTagName("category")[0].childNodes[0].data service = xmldoc.getElementsByTagName("service")[0].childNodes[0].data # Generate thumbnail for file (if needed) thumb_path = os.path.join(info.CACHE_PATH, icon_name) # Check if thumb exists if not os.path.exists(thumb_path): try: # Reload this reader clip = openshot.Clip(icon_path) reader = clip.Reader() # Open reader reader.Open() # Determine scale of thumbnail scale = 95.0 / reader.info.width # Save thumbnail reader.GetFrame(0).Save(thumb_path, scale) reader.Close() except: # Handle exception msg = QMessageBox() msg.setText(_("{} is not a valid image file.".format(icon_path))) msg.exec_() continue row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText(self.app._tr(title)) col.setToolTip(self.app._tr(title)) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable) row.append(col) # Append Name col = QStandardItem("Name") col.setData(self.app._tr(title), Qt.DisplayRole) col.setText(self.app._tr(title)) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable) row.append(col) # Append Service col = QStandardItem("Service") col.setData(service, Qt.DisplayRole) col.setText(service) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not path in self.model_paths: self.model.appendRow(row) self.model_paths[path] = path # Process events in QT (to keep the interface responsive) app.processEvents()
def accept(self): """ Ok button clicked """ log.info('accept') # Get settings from form start_position = self.txtStartTime.value() track_num = self.cmbTrack.currentData() fade_value = self.cmbFade.currentData() fade_length = self.txtFadeLength.value() transition_path = self.cmbTransition.currentData() transition_length = self.txtTransitionLength.value() image_length = self.txtImageLength.value() zoom_value = self.cmbZoom.currentData() # Init position position = start_position random_transition = False if transition_path == "random": random_transition = True # Get frames per second fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Loop through each file (in the current order) for file in self.treeFiles.timeline_model.files: # Create a clip clip = Clip() clip.data = {} if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["position"] = position new_clip["layer"] = track_num new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Skip any clips that are missing a 'reader' attribute # TODO: Determine why this even happens, as it shouldn't be possible if not new_clip.get("reader"): continue # Skip to next file # Overwrite frame rate (incase the user changed it in the File Properties) file_properties_fps = float(file.data["fps"]["num"]) / float( file.data["fps"]["den"]) file_fps = float(new_clip["reader"]["fps"]["num"]) / float( new_clip["reader"]["fps"]["den"]) fps_diff = file_fps / file_properties_fps new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"] new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"] # Scale duration / length / and end properties new_clip["reader"]["duration"] *= fps_diff new_clip["end"] *= fps_diff new_clip["duration"] *= fps_diff # Check for optional start and end attributes start_time = 0 end_time = new_clip["reader"]["duration"] if 'start' in file.data.keys(): start_time = file.data['start'] new_clip["start"] = start_time if 'end' in file.data.keys(): end_time = file.data['end'] new_clip["end"] = end_time # Adjust clip duration, start, and end new_clip["duration"] = new_clip["reader"]["duration"] if file.data["media_type"] == "image": end_time = image_length new_clip["end"] = end_time # Adjust Fade of Clips (if no transition is chosen) if not transition_path: if fade_value != None: # Overlap this clip with the previous one (if any) position = max(start_position, new_clip["position"] - fade_length) new_clip["position"] = position if fade_value == 'Fade In' or fade_value == 'Fade In & Out': start = openshot.Point( round(start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( min( round((start_time + fade_length) * fps_float) + 1, round(end_time * fps_float) + 1), 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) if fade_value == 'Fade Out' or fade_value == 'Fade In & Out': start = openshot.Point( max( round((end_time * fps_float) + 1) - (round(fade_length * fps_float) + 1), round(start_time * fps_float) + 1), 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) # Adjust zoom amount if zoom_value != None: # Location animation if zoom_value == "Random": animate_start_x = uniform(-0.5, 0.5) animate_end_x = uniform(-0.15, 0.15) animate_start_y = uniform(-0.5, 0.5) animate_end_y = uniform(-0.15, 0.15) # Scale animation start_scale = uniform(0.5, 1.5) end_scale = uniform(0.85, 1.15) elif zoom_value == "Zoom In": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.0 end_scale = 1.25 elif zoom_value == "Zoom Out": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.25 end_scale = 1.0 # Add keyframes start = openshot.Point( round(start_time * fps_float) + 1, start_scale, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(end_time * fps_float) + 1, end_scale, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["scale_x"]["Points"].append(start_object) new_clip["scale_x"]["Points"].append(end_object) new_clip["scale_y"]["Points"].append(start_object) new_clip["scale_y"]["Points"].append(end_object) # Add keyframes start_x = openshot.Point( round(start_time * fps_float) + 1, animate_start_x, openshot.BEZIER) start_x_object = json.loads(start_x.Json()) end_x = openshot.Point( round(end_time * fps_float) + 1, animate_end_x, openshot.BEZIER) end_x_object = json.loads(end_x.Json()) start_y = openshot.Point( round(start_time * fps_float) + 1, animate_start_y, openshot.BEZIER) start_y_object = json.loads(start_y.Json()) end_y = openshot.Point( round(end_time * fps_float) + 1, animate_end_y, openshot.BEZIER) end_y_object = json.loads(end_y.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["location_x"]["Points"].append(start_x_object) new_clip["location_x"]["Points"].append(end_x_object) new_clip["location_y"]["Points"].append(start_y_object) new_clip["location_y"]["Points"].append(end_y_object) if transition_path: # Add transition for this clip (if any) # Open up QtImageReader for transition Image if random_transition: random_index = randint(0, len(self.transitions)) transition_path = self.transitions[random_index] # Get reader for transition transition_reader = openshot.QtImageReader(transition_path) brightness = openshot.Keyframe() brightness.AddPoint(1, 1.0, openshot.BEZIER) brightness.AddPoint( round( min(transition_length, end_time - start_time) * fps_float) + 1, -1.0, openshot.BEZIER) contrast = openshot.Keyframe(3.0) # Create transition dictionary transitions_data = { "layer": track_num, "title": "Transition", "type": "Mask", "start": 0, "end": min(transition_length, end_time - start_time), "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Overlap this clip with the previous one (if any) position = max(start_position, position - transition_length) transitions_data["position"] = position new_clip["position"] = position # Create transition tran = Transition() tran.data = transitions_data tran.save() # Save Clip clip.data = new_clip clip.save() # Increment position by length of clip position += (end_time - start_time) # Accept dialog super(AddToTimeline, self).accept()
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = { "openshot-qt": info.VERSION, "libopenshot": v.ToString() } # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image( file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image( file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data[ "has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ( "%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = { "number": track_counter, "y": 0, "label": legacy_t.name } t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys( ): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info( "Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join( info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json()) new_clip["volume"] = {"Points": [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists( trans.resource): trans.resource = os.path.join( info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader( trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint( round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % { "path": file_path, "error": ex }) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception( _("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def update_model(self, clear=True): log.info("updating transitions model.") app = get_app() # Get window to check filters win = app.window _ = app._tr # Clear all items if clear: self.model_paths = {} self.model.clear() # Add Headers self.model.setHorizontalHeaderLabels([_("Thumb"), _("Name")]) # get a list of files in the OpenShot /transitions directory transitions_dir = os.path.join(info.PATH, "transitions") common_dir = os.path.join(transitions_dir, "common") extra_dir = os.path.join(transitions_dir, "extra") transition_groups = [{ "type": "common", "dir": common_dir, "files": os.listdir(common_dir) }, { "type": "extra", "dir": extra_dir, "files": os.listdir(extra_dir) }] # Add optional user-defined transitions folder if (os.path.exists(info.TRANSITIONS_PATH) and os.listdir(info.TRANSITIONS_PATH)): transition_groups.append({ "type": "user", "dir": info.TRANSITIONS_PATH, "files": os.listdir(info.TRANSITIONS_PATH) }) for group in transition_groups: type = group["type"] dir = group["dir"] files = group["files"] for filename in sorted(files): path = os.path.join(dir, filename) (fileBaseName, fileExtension) = os.path.splitext(filename) # Skip hidden files (such as .DS_Store, etc...) if filename[0] == "." or "thumbs.db" in filename.lower(): continue # split the name into parts (looking for a number) suffix_number = None name_parts = fileBaseName.split("_") if name_parts[-1].isdigit(): suffix_number = name_parts[-1] # get name of transition trans_name = fileBaseName.replace("_", " ").capitalize() # replace suffix number with placeholder (if any) if suffix_number: trans_name = trans_name.replace(suffix_number, "%s") trans_name = self.app._tr(trans_name) % suffix_number else: trans_name = self.app._tr(trans_name) if not win.actionTransitionsShowAll.isChecked(): if win.actionTransitionsShowCommon.isChecked(): if not type == "common": continue # to next file, didn't match filter if win.transitionsFilter.text() != "": if not win.transitionsFilter.text().lower( ) in trans_name.lower(): continue # Check for thumbnail path (in build-in cache) thumb_path = os.path.join(info.IMAGES_PATH, "cache", "{}.png".format(fileBaseName)) # Check built-in cache (if not found) if not os.path.exists(thumb_path): # Check user folder cache thumb_path = os.path.join(info.CACHE_PATH, "{}.png".format(fileBaseName)) # Generate thumbnail (if needed) if not os.path.exists(thumb_path): try: # Reload this reader clip = openshot.Clip(path) reader = clip.Reader() # Open reader reader.Open() # Save thumbnail reader.GetFrame(0).Thumbnail( thumb_path, 98, 64, os.path.join(info.IMAGES_PATH, "mask.png"), "", "#000", True) reader.Close() clip.Close() except: # Handle exception msg = QMessageBox() msg.setText( _("{} is not a valid image file.".format( filename))) msg.exec_() continue row = [] # Append thumbnail col = QStandardItem() col.setIcon(QIcon(thumb_path)) col.setText(trans_name) col.setToolTip(trans_name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append Filename col = QStandardItem("Name") col.setData(trans_name, Qt.DisplayRole) col.setText(trans_name) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append Media Type col = QStandardItem("Type") col.setData(type, Qt.DisplayRole) col.setText(type) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append Path col = QStandardItem("Path") col.setData(path, Qt.DisplayRole) col.setText(path) col.setFlags(Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable | Qt.ItemIsDragEnabled) row.append(col) # Append ROW to MODEL (if does not already exist in model) if not path in self.model_paths: self.model.appendRow(row) self.model_paths[path] = path