def test_add_clip(self): """ Test the Clip.save method by adding multiple clips """ # Import additional classes that need the app defined first from classes.query import Clip # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def setUpClass(TestQueryClass): """ Init unit test data """ # Create Qt application TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest") TestQueryClass.clip_ids = [] TestQueryClass.file_ids = [] TestQueryClass.transition_ids = [] # Import additional classes that need the app defined first from classes.query import Clip, File, Transition # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids TestQueryClass.clip_ids.append(query_clip.id) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids TestQueryClass.file_ids.append(query_file.id) # Insert some transitions into the project data for num in range(5): # Create mask object transition_object = openshot.Mask() transitions_data = json.loads(transition_object.Json()) # Insert into project data query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids TestQueryClass.transition_ids.append(query_transition.id)
def test_get_clip(self): """ Test the Clip.get method """ clip = Clip.get(id=self.clip_ids[1]) self.assertTrue(clip) # Do not find a clip clip = Clip.get(id="invalidID") self.assertEqual(clip, None)
def test_filter_clip(self): """ Test the Clip.filter method """ clips = Clip.filter(id=self.clip_ids[0]) self.assertTrue(clips) # Do not find a clip clips = Clip.filter(id="invalidID") self.assertEqual(len(clips), 0)
def getMenu(self): # Build menu for selection button menu = QMenu(self) # Get translation object _ = get_app()._tr # Look up item for more info if self.item_type == "clip": self.item_name = Clip.get(id=self.item_id).title() elif self.item_type == "transition": self.item_name = Transition.get(id=self.item_id).title() elif self.item_type == "effect": self.item_name = Effect.get(id=self.item_id).title() # Add selected clips for item_id in get_app().window.selected_clips: clip = Clip.get(id=item_id) item_name = clip.title() item_icon = QIcon(QPixmap(clip.data.get('image'))) action = menu.addAction(item_name) action.setIcon(item_icon) action.setData({'item_id':item_id, 'item_type':'clip'}) action.triggered.connect(self.Action_Triggered) # Add effects for these clips (if any) for effect in clip.data.get('effects'): item_name = Effect.get(id=effect.get('id')).title() item_icon = QIcon(QPixmap(os.path.join(info.PATH, "effects", "icons", "%s.png" % effect.get('class_name').lower()))) action = menu.addAction(' > %s' % _(item_name)) action.setIcon(item_icon) action.setData({'item_id': effect.get('id'), 'item_type': 'effect'}) action.triggered.connect(self.Action_Triggered) # Add selected transitions for item_id in get_app().window.selected_transitions: trans = Transition.get(id=item_id) item_name = _(trans.title()) item_icon = QIcon(QPixmap(trans.data.get('reader',{}).get('path'))) action = menu.addAction(_(item_name)) action.setIcon(item_icon) action.setData({'item_id': item_id, 'item_type': 'transition'}) action.triggered.connect(self.Action_Triggered) # Add selected effects for item_id in get_app().window.selected_effects: effect = Effect.get(id=item_id) item_name = _(effect.title()) item_icon = QIcon(QPixmap(os.path.join(info.PATH, "effects", "icons", "%s.png" % effect.data.get('class_name').lower()))) action = menu.addAction(_(item_name)) action.setIcon(item_icon) action.setData({'item_id': item_id, 'item_type': 'effect'}) action.triggered.connect(self.Action_Triggered) # Return the menu object return menu
def test_filter_clip(self): """ Test the Clip.filter method """ # Import additional classes that need the app defined first from classes.query import Clip # Find all clips named file1 clips = Clip.filter(id=TestQueryClass.clip_ids[0]) self.assertTrue(clips) # Do not find a clip clips = Clip.filter(id="invalidID") self.assertEqual(len(clips), 0)
def test_get_clip(self): """ Test the Clip.get method """ # Import additional classes that need the app defined first from classes.query import Clip # Find a clip named file1 clip = Clip.get(id=TestQueryClass.clip_ids[1]) self.assertTrue(clip) # Do not find a clip clip = Clip.get(id="invalidID") self.assertEqual(clip, None)
def updateProperty(self, id, frame_number, property_key, new_value): """Update a keyframe property to a new value, adding or updating keyframes as needed""" found_point = False clip_updated = False c = Clip.get(id=id) if not c: # No clip found return for point in c.data[property_key]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if point["co"]["X"] == frame_number: found_point = True clip_updated = True point["interpolation"] = openshot.BEZIER point["co"]["Y"] = float(new_value) if not found_point and new_value != None: clip_updated = True log.info("Created new point at X=%s" % frame_number) c.data[property_key]["Points"].append({'co': {'X': frame_number, 'Y': new_value}, 'interpolation': openshot.BEZIER}) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data.get(property_key)} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit()
def CutsToClips(cuts): #app = get_app() clips = [] position = 0 video_length = 0 print(cuts) for cut in cuts: c = Clip.filter(id=cut["clip"]) #print("-------c:",c[0].data["position"], c[0].data) path = c[0].data["reader"]["path"] offset = float(c[0].data["position"]) start = float(cut["start"]) - offset end = float(cut["end"]) - offset print("=======================-------start:", start, "end:", end, "position", position, path) try: clip = openshot.Clip(path) clip.Start(start) clip.End(end) clip.Position(position) clips.append(clip) except: log.error('Failed to load media file into preview player: %s' % path) return clips, video_length position = position + (end - start) - offset video_length = video_length + cut["video_length"] return clips, video_length
def transformTriggered(self, clip_id): """Handle the transform signal when it's emitted""" need_refresh = False # Disable Transform UI if self and self.transforming_clip: # Is this the same clip_id already being transformed? if not clip_id: # Clear transform self.transforming_clip = None need_refresh = True # Get new clip for transform if clip_id: self.transforming_clip = Clip.get(id=clip_id) if self.transforming_clip: self.transforming_clip_object = None clips = get_app().window.timeline_sync.timeline.Clips() for clip in clips: if clip.Id() == self.transforming_clip.id: self.transforming_clip_object = clip need_refresh = True break # Update the preview and reselct current frame in properties if need_refresh: get_app().window.refreshFrameSignal.emit() get_app().window.propertyTableView.select_frame( get_app().window.preview_thread.player.Position())
def actionRemoveEffect_trigger(self, event): log.info('actionRemoveEffect_trigger') # Loop through selected clips for effect_id in self.selected_effects: log.info("effect id: %s" % effect_id) # Find matching file clips = Clip.filter() found_effect = None for c in clips: found_effect = False log.info("c.data[effects]: %s" % c.data["effects"]) for effect in c.data["effects"]: if effect["id"] == effect_id: found_effect = effect break if found_effect: # Remove found effect from clip data and save clip c.data["effects"].remove(found_effect) c.save() # Clear selected effects self.removeSelection(effect_id, "effect")
def updateProperty(self, id, frame_number, property_key, new_value): """Update a keyframe property to a new value, adding or updating keyframes as needed""" found_point = False clip_updated = False c = Clip.get(id=id) for point in c.data[property_key]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if point["co"]["X"] == frame_number: found_point = True clip_updated = True point["interpolation"] = openshot.BEZIER point["co"]["Y"] = float(new_value) if not found_point and new_value != None: clip_updated = True log.info("Created new point at X=%s" % frame_number) c.data[property_key]["Points"].append({'co': {'X': frame_number, 'Y': new_value}, 'interpolation': openshot.BEZIER}) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data.get(property_key)} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit()
def transformTriggered(self, clip_id): """Handle the transform signal when it's emitted""" need_refresh = False # Disable Transform UI if self and self.transforming_clip: # Is this the same clip_id already being transformed? if not clip_id: # Clear transform self.transforming_clip = None need_refresh = True # Get new clip for transform if clip_id: self.transforming_clip = Clip.get(id=clip_id) if self.transforming_clip: self.transforming_clip_object = None clips = get_app().window.timeline_sync.timeline.Clips() for clip in clips: if clip.Id() == self.transforming_clip.id: self.transforming_clip_object = clip need_refresh = True break # Update the preview and reselct current frame in properties if need_refresh: get_app().window.refreshFrameSignal.emit() get_app().window.propertyTableView.select_frame(get_app().window.preview_thread.player.Position())
def test_delete_clip(self): """ Test the Clip.delete method """ delete_id = self.clip_ids[4] clip = Clip.get(id=delete_id) self.assertTrue(clip) clip.delete() # Verify deleted data deleted_clip = Clip.get(id=delete_id) self.assertFalse(deleted_clip) # Delete clip again (should do nothing) clip.delete() deleted_clip = Clip.get(id=delete_id) self.assertFalse(deleted_clip)
def rect_select_clicked(self, widget, param): """Rect select button clicked""" self.context[param["setting"]].update({"button-clicked": True}) # show dialog from windows.region import SelectRegion from classes.query import File, Clip c = Clip.get(id=self.clip_id) reader_path = c.data.get('reader', {}).get('path','') f = File.get(path=reader_path) if f: win = SelectRegion(f, self.clip_instance) # Run the dialog event loop - blocking interaction on this window during that time result = win.exec_() if result == QDialog.Accepted: # self.first_frame = win.current_frame # Region selected (get coordinates if any) topLeft = win.videoPreview.regionTopLeftHandle bottomRight = win.videoPreview.regionBottomRightHandle viewPortSize = win.viewport_rect curr_frame_size = win.videoPreview.curr_frame_size x1 = topLeft.x() / curr_frame_size.width() y1 = topLeft.y() / curr_frame_size.height() x2 = bottomRight.x() / curr_frame_size.width() y2 = bottomRight.y() / curr_frame_size.height() # Get QImage of region if win.videoPreview.region_qimage: region_qimage = win.videoPreview.region_qimage # Resize QImage to match button size resized_qimage = region_qimage.scaled(widget.size(), Qt.IgnoreAspectRatio, Qt.SmoothTransformation) # Draw Qimage onto QPushButton (to display region selection to user) palette = widget.palette() palette.setBrush(widget.backgroundRole(), QBrush(resized_qimage)) widget.setFlat(True) widget.setAutoFillBackground(True) widget.setPalette(palette) # Remove button text (so region QImage is more visible) widget.setText("") # If data found, add to context if topLeft and bottomRight: self.context[param["setting"]].update({"normalized_x": x1, "normalized_y": y1, "normalized_width": x2-x1, "normalized_height": y2-y1, "first-frame": win.current_frame, }) log.info(self.context) else: log.error('No file found with path: %s' % reader_path)
def test_update_clip(self): """ Test the Clip.save method """ update_id = self.clip_ids[0] clip = Clip.get(id=update_id) self.assertTrue(clip) # Update clip clip.data["layer"] = 2 clip.data["title"] = "My Title" clip.save() # Verify updated data clip = Clip.get(id=update_id) self.assertEqual(clip.data["layer"], 2) self.assertEqual(clip.data["title"], "My Title") clips = Clip.filter(layer=2) self.assertEqual(len(clips), 1)
def test_add_clip(self): # Find number of clips in project num_clips = len(Clip.filter()) # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() self.assertTrue(query_clip) self.assertEqual(len(Clip.filter()), num_clips + 1) # Save the clip again (which should not change the total # of clips) query_clip.save() self.assertEqual(len(Clip.filter()), num_clips + 1)
def test_update_clip(self): """ Test the Clip.save method """ # Import additional classes that need the app defined first from classes.query import Clip # Find a clip named file1 update_id = TestQueryClass.clip_ids[0] clip = Clip.get(id=update_id) self.assertTrue(clip) # Update clip clip.data["layer"] = 2 clip.data["title"] = "My Title" clip.save() # Verify updated data # Get clip again clip = Clip.get(id=update_id) self.assertEqual(clip.data["layer"], 2) self.assertEqual(clip.data["title"], "My Title")
def update_item_timeout(self): # Get the next item id, and type self.item_id = self.next_item_id self.item_type = self.next_item_type self.item_name = None self.item_icon = None # Stop timer self.update_timer.stop() # Get translation object _ = get_app()._tr # Look up item for more info if self.item_type == "clip": clip = Clip.get(id=self.item_id) if clip: self.item_name = clip.title() self.item_icon = QIcon(QPixmap(clip.data.get('image'))) elif self.item_type == "transition": trans = Transition.get(id=self.item_id) if trans: self.item_name = _(trans.title()) self.item_icon = QIcon( QPixmap(trans.data.get('reader', {}).get('path'))) elif self.item_type == "effect": effect = Effect.get(id=self.item_id) if effect: self.item_name = _(effect.title()) self.item_icon = QIcon( QPixmap( os.path.join( info.PATH, "effects", "icons", "%s.png" % effect.data.get('class_name').lower()))) # Truncate long text if self.item_name and len(self.item_name) > 25: self.item_name = "%s..." % self.item_name[:22] # Set label if self.item_id: self.lblSelection.setText("<strong>%s</strong>" % _("Selection:")) self.btnSelectionName.setText(self.item_name) self.btnSelectionName.setVisible(True) if self.item_icon: self.btnSelectionName.setIcon(self.item_icon) else: self.lblSelection.setText("<strong>%s</strong>" % _("No Selection")) self.btnSelectionName.setVisible(False) # Set the menu on the button self.btnSelectionName.setMenu(self.getMenu())
def test_intersect(self): """ Test special filter argument 'intersect' """ trans = Transition.get(id=self.transition_ids[0]) self.assertTrue(trans) pos = trans.data.get("position", -1.0) duration = trans.data.get("duration", -1.0) self.assertTrue(pos >= 0.0) self.assertTrue(duration >= 0.0) time = pos + (duration / 2) def get_times(item): pos = item.data.get("position", -1.0) end = pos + item.data.get("duration", -1.0) return (pos, end) t_intersect = Transition.filter(intersect=time) t_ids = [t.id for t in t_intersect] t_all = Transition.filter() t_rest = [x for x in t_all if x.id not in t_ids] c_intersect = Clip.filter(intersect=time) c_ids = [c.id for c in c_intersect] c_all = Clip.filter() c_rest = [x for x in c_all if x.id not in c_ids] for item in t_intersect + c_intersect: item_id = item.id pos, end = get_times(item) self.assertTrue(pos <= time) self.assertTrue(time <= end) for item in t_rest + c_rest: item_id = item.id pos, end = get_times(item) if pos < time: self.assertTrue(end <= time) if end > time: self.assertTrue(pos >= time)
def actionRemoveClip_trigger(self, event): log.info('actionRemoveClip_trigger') # Loop through selected clips for clip_id in self.selected_clips: # Find matching file clips = Clip.filter(id=clip_id) for c in clips: # Clear selected clips self.removeSelection(clip_id, "clip") # Remove clip c.delete()
def test_delete_clip(self): """ Test the Clip.delete method """ # Import additional classes that need the app defined first from classes.query import Clip # Find a clip named file1 delete_id = TestQueryClass.clip_ids[4] clip = Clip.get(id=delete_id) self.assertTrue(clip) # Delete clip clip.delete() # Verify deleted data deleted_clip = Clip.get(id=delete_id) self.assertFalse(deleted_clip) # Delete clip again (should do nothing) clip.delete() # Verify deleted data deleted_clip = Clip.get(id=delete_id) self.assertFalse(deleted_clip)
def update_item_timeout(self): # Get the next item id, and type self.item_id = self.next_item_id self.item_type = self.next_item_type self.item_name = None self.item_icon = None # Stop timer self.update_timer.stop() # Get translation object _ = get_app()._tr # Look up item for more info if self.item_type == "clip": clip = Clip.get(id=self.item_id) self.item_name = clip.title() self.item_icon = QIcon(QPixmap(clip.data.get('image'))) elif self.item_type == "transition": trans = Transition.get(id=self.item_id) self.item_name = _(trans.title()) self.item_icon = QIcon(QPixmap(trans.data.get('reader', {}).get('path'))) elif self.item_type == "effect": effect = Effect.get(id=self.item_id) self.item_name = _(effect.title()) self.item_icon = QIcon(QPixmap(os.path.join(info.PATH, "effects", "icons", "%s.png" % effect.data.get('class_name').lower()))) # Truncate long text if self.item_name and len(self.item_name) > 25: self.item_name = "%s..." % self.item_name[:22] # Set label if self.item_id: self.lblSelection.setText("<strong>%s</strong>" % _("Selection:")) self.btnSelectionName.setText(self.item_name) self.btnSelectionName.setVisible(True) if self.item_icon: self.btnSelectionName.setIcon(self.item_icon) else: self.lblSelection.setText("<strong>%s</strong>" % _("No Selection")) self.btnSelectionName.setVisible(False) # Set the menu on the button self.btnSelectionName.setMenu(self.getMenu())
def actionRemove_from_Project_trigger(self, event): log.info("actionRemove_from_Project_trigger") # Loop through selected files for file_id in self.selected_files: # Find matching file f = File.get(id=file_id) if f: # Remove file f.delete() # Find matching clips (if any) clips = Clip.filter(file_id=file_id) for c in clips: # Remove clip c.delete() # Clear selected files self.selected_files = []
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = { "openshot-qt": info.VERSION, "libopenshot": v.ToString() } # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not self.is_image( file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image( file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data[ "has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ( "%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = { "number": track_counter, "y": 0, "label": legacy_t.name } t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys( ): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info( "Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join( info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join( info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["alpha"]["Points"].append( start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json()) new_clip["volume"] = {"Points": [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point( round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point( round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["volume"]["Points"].append( start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists( trans.resource): trans.resource = os.path.join( info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader( trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint( round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % { "path": file_path, "error": ex }) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception( _("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def mouseMoveEvent(self, event): # Get data model and selection model = self.clip_properties_model.model # Do not change selected row during mouse move if self.lock_selection and self.prev_row: row = self.prev_row else: row = self.indexAt(event.pos()).row() self.prev_row = row self.lock_selection = True if row is None: return if model.item(row, 0): self.selected_label = model.item(row, 0) self.selected_item = model.item(row, 1) # Is the user dragging on the value column if self.selected_label and self.selected_item: # Get the position of the cursor and % value value_column_x = self.columnViewportPosition(1) cursor_value = event.x() - value_column_x cursor_value_percent = cursor_value / self.columnWidth(1) try: cur_property = self.selected_label.data() except Exception: # If item is deleted during this drag... an exception can occur # Just ignore, since this is harmless return property_key = cur_property[0] property_name = cur_property[1]["name"] property_type = cur_property[1]["type"] property_max = cur_property[1]["max"] property_min = cur_property[1]["min"] readonly = cur_property[1]["readonly"] item_id, item_type = self.selected_item.data() # Bail if readonly if readonly: return # Get the original data of this item (prior to any updates, for the undo/redo system) if not self.original_data: # Ignore undo/redo history temporarily (to avoid a huge pile of undo/redo history) get_app().updates.ignore_history = True # Find this clip c = None if item_type == "clip": # Get clip object c = Clip.get(id=item_id) elif item_type == "transition": # Get transition object c = Transition.get(id=item_id) elif item_type == "effect": # Get effect object c = Effect.get(id=item_id) if c: if property_key in c.data: # Grab the original data for this item/property self.original_data = c.data # For numeric values, apply percentage within parameter's allowable range if property_type in ["float", "int"] and property_name != "Track": if self.previous_x == -1: # Start tracking movement (init diff_length and previous_x) self.diff_length = 10 self.previous_x = event.x() # Calculate # of pixels dragged drag_diff = self.previous_x - event.x() # update previous x self.previous_x = event.x() # Ignore small initial movements if abs(drag_diff) < self.diff_length: # Lower threshold to 0 incrementally, to guarantee it'll eventually be exceeded self.diff_length = max(0, self.diff_length - 1) return # Compute size of property's possible values range min_max_range = float(property_max) - float(property_min) if min_max_range < 1000.0: # Small range - use cursor to calculate new value as percentage of total range self.new_value = property_min + (min_max_range * cursor_value_percent) else: # range is unreasonably long (such as position, start, end, etc.... which can be huge #'s) # Get the current value and apply fixed adjustments in response to motion self.new_value = QLocale().system().toDouble( self.selected_item.text())[0] if drag_diff > 0: # Move to the left by a small amount self.new_value -= 0.50 elif drag_diff < 0: # Move to the right by a small amount self.new_value += 0.50 # Clamp value between min and max (just incase user drags too big) self.new_value = max(property_min, self.new_value) self.new_value = min(property_max, self.new_value) # Update value of this property self.clip_properties_model.value_updated( self.selected_item, -1, self.new_value) # Repaint self.viewport().update()
def value_updated(self, item, interpolation=-1, value=None, interpolation_details=[]): """ Table cell change event - also handles context menu to update interpolation value """ if self.ignore_update_signal: return # Get translation method _ = get_app()._tr # Determine what was changed property = self.model.item(item.row(), 0).data() property_name = property[1]["name"] closest_point_x = property[1]["closest_point_x"] previous_point_x = property[1]["previous_point_x"] property_type = property[1]["type"] property_key = property[0] clip_id, item_type = item.data() # Get value (if any) if item.text(): # Set and format value based on property type if value != None: # Override value new_value = value elif property_type == "string": # Use string value new_value = item.text() elif property_type == "bool": # Use boolean value if item.text() == _("False"): new_value = False else: new_value = True elif property_type == "int": # Use int value new_value = QLocale().system().toInt(item.text())[0] else: # Use decimal value new_value = QLocale().system().toFloat(item.text())[0] else: new_value = None log.info("%s for %s changed to %s at frame %s with interpolation: %s at closest x: %s" % (property_key, clip_id, new_value, self.frame_number, interpolation, closest_point_x)) # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info("value updated: %s" % c.data) # Check the type of property (some are keyframe, and some are not) if property_type != "reader" and type(c.data[property_key]) == dict: # Keyframe # Loop through points, find a matching points on this frame found_point = False point_to_delete = None for point in c.data[property_key]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if interpolation == -1 and point["co"]["X"] == self.frame_number: # Found point, Update value found_point = True clip_updated = True # Update or delete point if new_value != None: point["co"]["Y"] = float(new_value) log.info("updating point: co.X = %s to value: %s" % (point["co"]["X"], float(new_value))) else: point_to_delete = point break elif interpolation > -1 and point["co"]["X"] == previous_point_x: # Only update interpolation type (and the LEFT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_right"] = point.get("handle_right") or {"Y": 0.0, "X": 0.0} point["handle_right"]["X"] = interpolation_details[0] point["handle_right"]["Y"] = interpolation_details[1] log.info("updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) elif interpolation > -1 and point["co"]["X"] == closest_point_x: # Only update interpolation type (and the RIGHT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_left"] = point.get("handle_left") or {"Y": 0.0, "X": 0.0} point["handle_left"]["X"] = interpolation_details[2] point["handle_left"]["Y"] = interpolation_details[3] log.info("updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) # Delete point (if needed) if point_to_delete: clip_updated = True log.info("Found point to delete at X=%s" % point_to_delete["co"]["X"]) c.data[property_key]["Points"].remove(point_to_delete) # Create new point (if needed) elif not found_point and new_value != None: clip_updated = True log.info("Created new point at X=%s" % self.frame_number) c.data[property_key]["Points"].append({'co': {'X': self.frame_number, 'Y': new_value}, 'interpolation': 1}) if not clip_updated: # If no keyframe was found, set a basic property if property_type == "int": # Integer clip_updated = True c.data[property_key] = int(new_value) elif property_type == "float": # Float clip_updated = True c.data[property_key] = new_value elif property_type == "bool": # Boolean clip_updated = True c.data[property_key] = bool(new_value) elif property_type == "string": # String clip_updated = True c.data[property_key] = str(new_value) elif property_type == "reader": # Reader clip_updated = True # Transition try: clip_object = openshot.Clip(value) clip_object.Open() c.data[property_key] = json.loads(clip_object.Reader().Json()) clip_object.Close() clip_object = None except: log.info('Failed to load %s into Clip object for reader property' % value) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data.get(property_key)} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit() # Clear selection self.parent.clearSelection()
def refreshTriggered(self): """Signal to refresh viewport (i.e. a property might have changed that effects the preview)""" # Update reference to clip if self and self.transforming_clip: self.transforming_clip = Clip.get(id=self.transforming_clip.id)
def import_xml(): """Import final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get XML path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = info.HOME_PATH else: recommended_path = os.path.dirname(recommended_path) file_path = QFileDialog.getOpenFileName(app.window, _("Import XML..."), recommended_path, _("Final Cut Pro (*.xml)"), _("Final Cut Pro (*.xml)"))[0] if not file_path or not os.path.exists(file_path): # User canceled dialog return # Parse XML file xmldoc = minidom.parse(file_path) # Get video tracks video_tracks = [] for video_element in xmldoc.getElementsByTagName("video"): for video_track in video_element.getElementsByTagName("track"): video_tracks.append(video_track) audio_tracks = [] for audio_element in xmldoc.getElementsByTagName("audio"): for audio_track in audio_element.getElementsByTagName("track"): audio_tracks.append(audio_track) # Loop through tracks track_index = 0 for tracks in [audio_tracks, video_tracks]: for track_element in tracks: # Get clipitems on this track (if any) clips_on_track = track_element.getElementsByTagName("clipitem") if not clips_on_track: continue # Get # of tracks track_index += 1 all_tracks = app.project.get("layers") track_number = list( reversed(sorted( all_tracks, key=itemgetter('number'))))[0].get("number") + 1000000 # Create new track above existing layer(s) track = Track() is_locked = False if track_element.getElementsByTagName( "locked")[0].childNodes[0].nodeValue == "TRUE": is_locked = True track.data = { "number": track_number, "y": 0, "label": "XML Import %s" % track_index, "lock": is_locked } track.save() # Loop through clips for clip_element in clips_on_track: # Get clip path xml_file_id = clip_element.getElementsByTagName( "file")[0].getAttribute("id") clip_path = "" if clip_element.getElementsByTagName("pathurl"): clip_path = clip_element.getElementsByTagName( "pathurl")[0].childNodes[0].nodeValue else: # Skip clipitem if no clippath node found # This usually happens for linked audio clips (which OpenShot combines audio and thus ignores this) continue clip_path, is_modified, is_skipped = find_missing_file( clip_path) if is_skipped: continue # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data[ "has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except Exception: # Ignore errors for now pass if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = clip_element.getElementsByTagName( "name")[0].childNodes[0].nodeValue clip.data["layer"] = track.data.get("number", 1000000) clip.data["image"] = thumb_path clip.data["position"] = float( clip_element.getElementsByTagName("start") [0].childNodes[0].nodeValue) / fps_float clip.data["start"] = float( clip_element.getElementsByTagName("in") [0].childNodes[0].nodeValue) / fps_float clip.data["end"] = float( clip_element.getElementsByTagName("out") [0].childNodes[0].nodeValue) / fps_float # Loop through clip's effects for effect_element in clip_element.getElementsByTagName( "effect"): effectid = effect_element.getElementsByTagName( "effectid")[0].childNodes[0].nodeValue keyframes = effect_element.getElementsByTagName("keyframe") if effectid == "opacity": clip.data["alpha"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["alpha"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) elif effectid == "audiolevels": clip.data["volume"] = {"Points": []} for keyframe_element in keyframes: keyframe_time = float( keyframe_element.getElementsByTagName("when") [0].childNodes[0].nodeValue) keyframe_value = float( keyframe_element.getElementsByTagName("value") [0].childNodes[0].nodeValue) / 100.0 clip.data["volume"]["Points"].append({ "co": { "X": round(keyframe_time), "Y": keyframe_value }, "interpolation": 1 # linear }) # Save clip clip.save() # Update the preview and reselect current frame in properties app.window.refreshFrameSignal.emit() app.window.propertyTableView.select_frame( app.window.preview_thread.player.Position())
def export_xml(): """Export final cut pro XML file""" app = get_app() _ = app._tr # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Ticks (final cut pro value) ticks = 254016000000 # Get path recommended_path = get_app().project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.xml" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".xml") file_path = QFileDialog.getSaveFileName(app.window, _("Export XML..."), recommended_path, _("Final Cut Pro (*.xml)"))[0] if not file_path: # User canceled dialog return # Append .xml if needed if not file_path.endswith(".xml"): file_path = "%s.xml" % file_path # Get filename with no path file_name = os.path.basename(file_path) # Determine max frame (based on clips) duration = 0.0 for clip in Clip.filter(): clip_last_frame = clip.data.get("position") + (clip.data.get("end") - clip.data.get("start")) if clip_last_frame > duration: # Set max length of timeline duration = clip_last_frame # XML template path xmldoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-project-template.xml')) # Set Project Details xmldoc.getElementsByTagName("name")[0].childNodes[0].nodeValue = file_name xmldoc.getElementsByTagName("uuid")[0].childNodes[0].nodeValue = str( uuid1()) xmldoc.getElementsByTagName( "duration")[0].childNodes[0].nodeValue = duration xmldoc.getElementsByTagName( "width")[0].childNodes[0].nodeValue = app.project.get("width") xmldoc.getElementsByTagName( "height")[0].childNodes[0].nodeValue = app.project.get("height") xmldoc.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = app.project.get("sample_rate") xmldoc.getElementsByTagName("sequence")[0].setAttribute( "id", app.project.get("id")) for childNode in xmldoc.getElementsByTagName("timebase"): childNode.childNodes[0].nodeValue = fps_float # Get parent audio node parentAudioNode = xmldoc.getElementsByTagName("audio")[0] # Loop through tracks all_tracks = get_app().project.get("layers") track_count = 1 for track in sorted(all_tracks, key=itemgetter('number')): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track details track_locked = track.get("lock", False) clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Create video track node trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-video-template.xml')) videoTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] xmldoc.getElementsByTagName("video")[0].appendChild(videoTrackNode) # Create audio track nodes (1 for each channel) trackTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-track-audio-template.xml')) audioTrackNode = trackTemplateDoc.getElementsByTagName('track')[0] parentAudioNode.appendChild(audioTrackNode) audioTrackNode.getElementsByTagName( "outputchannelindex")[0].childNodes[0].nodeValue = track_count # Is Track Locked? if track_locked: videoTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" audioTrackNode.getElementsByTagName( "locked")[0].childNodes[0].nodeValue = "TRUE" # Loop through clips on this track for clip in clips_on_track: # Create VIDEO clip node clipNode = None if clip.data.get("reader", {}).get("has_video"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-video-template.xml')) clipNode = clipTemplateDoc.getElementsByTagName('clipitem')[0] videoTrackNode.appendChild(clipNode) # Update clip properties clipNode.setAttribute('id', clip.data.get('id')) clipNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName( "name")[1].childNodes[0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("pathurl")[0].childNodes[ 0].nodeValue = clip.data.get('title') clipNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipNode.getElementsByTagName("pproTicksIn")[0].childNodes[ 0].nodeValue = (clip.data.get('start') * fps_float) * ticks clipNode.getElementsByTagName("pproTicksOut")[0].childNodes[ 0].nodeValue = (clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Opacity", clipNode, clip.data.get('alpha', {}).get('Points', []), 100.0) # Create AUDIO clip nodes if clip.data.get("reader", {}).get("has_audio"): clipTemplateDoc = minidom.parse( os.path.join(info.RESOURCES_PATH, 'export-clip-audio-template.xml')) clipAudioNode = clipTemplateDoc.getElementsByTagName( 'clipitem')[0] audioTrackNode.appendChild(clipAudioNode) # Update audio characteristics if clipNode: clipNode.getElementsByTagName("samplerate")[0].childNodes[ 0].nodeValue = clip.data.get("reader", {}).get("channels") clipNode.getElementsByTagName("channelcount")[ 0].childNodes[0].nodeValue = clip.data.get( "reader", {}).get("sample_rate") clipAudioNode.getElementsByTagName( "file")[0].childNodes.clear() else: clipAudioNode.getElementsByTagName("name")[1].childNodes[ 0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("pathurl")[ 0].childNodes[0].nodeValue = clip.data.get('title') # Update audio clip properties clipAudioNode.setAttribute('id', "%s-audio" % clip.data.get('id')) clipAudioNode.getElementsByTagName("file")[0].setAttribute( 'id', clip.data.get('file_id')) clipAudioNode.getElementsByTagName( "trackindex")[0].childNodes[0].nodeValue = track_count clipAudioNode.getElementsByTagName( "name")[0].childNodes[0].nodeValue = clip.data.get('title') clipAudioNode.getElementsByTagName("in")[0].childNodes[ 0].nodeValue = clip.data.get('start') * fps_float clipAudioNode.getElementsByTagName("out")[0].childNodes[ 0].nodeValue = clip.data.get('end') * fps_float clipAudioNode.getElementsByTagName("start")[0].childNodes[ 0].nodeValue = clip.data.get('position') * fps_float clipAudioNode.getElementsByTagName("end")[0].childNodes[ 0].nodeValue = (clip.data.get('position') + (clip.data.get('end') - clip.data.get('start'))) * fps_float clipAudioNode.getElementsByTagName("duration")[0].childNodes[ 0].nodeValue = (clip.data.get('end') - clip.data.get('start')) * fps_float clipAudioNode.getElementsByTagName( "pproTicksIn")[0].childNodes[0].nodeValue = ( clip.data.get('start') * fps_float) * ticks clipAudioNode.getElementsByTagName( "pproTicksOut")[0].childNodes[0].nodeValue = ( clip.data.get('end') * fps_float) * ticks # Add Keyframes (if any) createEffect(xmldoc, "Audio Levels", clipAudioNode, clip.data.get('volume', {}).get('Points', []), 1.0) else: # No audio, remove audio characteristics if clipNode: clipNode.getElementsByTagName("audio").pop() # Update counter track_count += 1 try: file = open(os.fsencode(file_path), "wb") # wb needed for windows support file.write(bytes(xmldoc.toxml(), 'UTF-8')) file.close() except IOError as inst: log.error("Error writing XML export: {}".format(str(inst)))
def color_update(self, item, new_color, interpolation=-1, interpolation_details=[]): """Insert/Update a color keyframe for the selected row""" # Determine what was changed property = self.model.item(item.row(), 0).data() property_type = property[1]["type"] closest_point_x = property[1]["closest_point_x"] previous_point_x = property[1]["previous_point_x"] property_key = property[0] clip_id, item_type = item.data() if property_type == "color": # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info("color update: %s" % c.data) # Loop through each keyframe (red, blue, and green) for color, new_value in [("red", new_color.red()), ("blue", new_color.blue()), ("green", new_color.green())]: # Keyframe # Loop through points, find a matching points on this frame found_point = False for point in c.data[property_key][color]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if interpolation == -1 and point["co"][ "X"] == self.frame_number: # Found point, Update value found_point = True clip_updated = True # Update point point["co"]["Y"] = new_value log.info( "updating point: co.X = %s to value: %s" % (point["co"]["X"], float(new_value))) break elif interpolation > -1 and point["co"][ "X"] == previous_point_x: # Only update interpolation type (and the LEFT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_right"] = point.get( "handle_right") or { "Y": 0.0, "X": 0.0 } point["handle_right"][ "X"] = interpolation_details[0] point["handle_right"][ "Y"] = interpolation_details[1] log.info( "updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) elif interpolation > -1 and point["co"][ "X"] == closest_point_x: # Only update interpolation type (and the RIGHT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_left"] = point.get( "handle_left") or { "Y": 0.0, "X": 0.0 } point["handle_left"][ "X"] = interpolation_details[2] point["handle_left"][ "Y"] = interpolation_details[3] log.info( "updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) # Create new point (if needed) if not found_point: clip_updated = True log.info("Created new point at X=%s" % self.frame_number) c.data[property_key][color]["Points"].append({ 'co': { 'X': self.frame_number, 'Y': new_value }, 'interpolation': 1 }) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data[property_key]} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit() # Clear selection self.parent.clearSelection()
def color_update(self, item, new_color, interpolation=-1): """Insert/Update a color keyframe for the selected row""" # Determine what was changed property = self.model.item(item.row(), 0).data() property_type = property[1]["type"] closest_point_x = property[1]["closest_point_x"] property_key = property[0] clip_id, item_type = item.data() if property_type == "color": # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info(c.data) # Loop through each keyframe (red, blue, and green) for color, new_value in [("red", new_color.red()), ("blue", new_color.blue()), ("green", new_color.green())]: # Keyframe # Loop through points, find a matching points on this frame found_point = False for point in c.data[property_key][color]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if interpolation == -1 and point["co"]["X"] == self.frame_number: # Found point, Update value found_point = True clip_updated = True # Update point point["co"]["Y"] = new_value log.info("updating point: co.X = %s to value: %s" % (point["co"]["X"], float(new_value))) break elif interpolation > -1 and point["co"]["X"] == closest_point_x: # Only update interpolation type found_point = True clip_updated = True point["interpolation"] = interpolation log.info("updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) break # Create new point (if needed) if not found_point: clip_updated = True log.info("Created new point at X=%s" % self.frame_number) c.data[property_key][color]["Points"].append({'co': {'X': self.frame_number, 'Y': new_value}, 'interpolation': 1}) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data[property_key]} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.preview_thread.refreshFrame() # Clear selection self.parent.clearSelection()
def mouseMoveEvent(self, event): # Get data model and selection model = self.clip_properties_model.model row = self.indexAt(event.pos()).row() column = self.indexAt(event.pos()).column() if model.item(row, 0): self.selected_label = model.item(row, 0) self.selected_item = model.item(row, 1) # Is the user dragging on the value column if self.selected_label and self.selected_item: frame_number = self.clip_properties_model.frame_number # Get the position of the cursor and % value value_column_x = self.columnViewportPosition(1) value_column_y = value_column_x + self.columnWidth(1) cursor_value = event.x() - value_column_x cursor_value_percent = cursor_value / self.columnWidth(1) property = self.selected_label.data() property_key = property[0] property_name = property[1]["name"] property_type = property[1]["type"] property_max = property[1]["max"] property_min = property[1]["min"] property_value = property[1]["value"] readonly = property[1]["readonly"] item_id, item_type = self.selected_item.data() # Bail if readonly if readonly: return # Get the original data of this item (prior to any updates, for the undo/redo system) if not self.original_data: # Ignore undo/redo history temporarily (to avoid a huge pile of undo/redo history) get_app().updates.ignore_history = True # Find this clip c = None if item_type == "clip": # Get clip object c = Clip.get(id=item_id) elif item_type == "transition": # Get transition object c = Transition.get(id=item_id) elif item_type == "effect": # Get effect object c = Effect.get(id=item_id) if c: if property_key in c.data: # Grab the original data for this item/property self.original_data = c.data # Calculate percentage value if property_type in ["float", "int"]: min_max_range = float(property_max) - float(property_min) # Determine if range is unreasonably long (such as position, start, end, etc.... which can be huge #'s) if min_max_range > 1000.0: # Get the current value self.new_value = QLocale().system().toDouble(self.selected_item.text())[0] # Huge range - increment / decrement slowly if self.previous_x == -1: # init previous_x for the first time self.previous_x = event.x() # calculate # of pixels dragged drag_diff = self.previous_x - event.x() if drag_diff > 0: # Move to the left by a small amount self.new_value -= 0.50 elif drag_diff < 0: # Move to the right by a small amount self.new_value += 0.50 # update previous x self.previous_x = event.x() else: # Small range - use cursor % to calculate new value self.new_value = property_min + (min_max_range * cursor_value_percent) # Clamp value between min and max (just incase user drags too big) self.new_value = max(property_min, self.new_value) self.new_value = min(property_max, self.new_value) # Update value of this property self.clip_properties_model.value_updated(self.selected_item, -1, self.new_value) # Repaint self.viewport().update()
def create_clip(context, track): """Create a new clip based on this context dict""" app = get_app() _ = app._tr # Get FPS info fps_num = app.project.get("fps").get("num", 24) fps_den = app.project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get clip path (and prompt user if path not found) clip_path, is_modified, is_skipped = find_missing_file( context.get("clip_path", "")) if is_skipped: return # Get video context video_ctx = context.get("AX", {}).get("V", {}) audio_ctx = context.get("AX", {}).get("A", {}) # Check for this path in our existing project data file = File.get(path=clip_path) # Load filepath in libopenshot clip object (which will try multiple readers to open it) clip_obj = openshot.Clip(clip_path) if not file: # Get the JSON for the clip's internal reader try: reader = clip_obj.Reader() file_data = json.loads(reader.Json()) # Determine media type if file_data["has_video"] and not is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data # Save file file.save() except: log.warning('Error building File object for %s' % clip_path, exc_info=1) if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Create Clip object clip = Clip() clip.data = json.loads(clip_obj.Json()) clip.data["file_id"] = file.id clip.data["title"] = context.get("clip_path", "") clip.data["layer"] = track.data.get("number", 1000000) if video_ctx and not audio_ctx: # Only video clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_audio"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable audio }, "interpolation": 2 }] } elif audio_ctx and not video_ctx: # Only audio clip.data["position"] = timecodeToSeconds( audio_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( audio_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( audio_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) clip.data["has_video"] = { "Points": [{ "co": { "X": 1.0, "Y": 0.0 # Disable video }, "interpolation": 2 }] } else: # Both video and audio clip.data["position"] = timecodeToSeconds( video_ctx.get("timeline_position", "00:00:00:00"), fps_num, fps_den) clip.data["start"] = timecodeToSeconds( video_ctx.get("clip_start_time", "00:00:00:00"), fps_num, fps_den) clip.data["end"] = timecodeToSeconds( video_ctx.get("clip_end_time", "00:00:00:00"), fps_num, fps_den) # Add volume keyframes if context.get("volume"): clip.data["volume"] = {"Points": []} for keyframe in context.get("volume", []): clip.data["volume"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Add alpha keyframes if context.get("opacity"): clip.data["alpha"] = {"Points": []} for keyframe in context.get("opacity", []): clip.data["alpha"]["Points"].append({ "co": { "X": round( timecodeToSeconds(keyframe.get("time", 0.0), fps_num, fps_den) * fps_float), "Y": keyframe.get("value", 0.0) }, "interpolation": 1 # linear }) # Save clip clip.save()
def read_legacy_project_file(self, file_path): """Attempt to read a legacy version 1.x openshot project file""" import sys, pickle from classes.query import File, Track, Clip, Transition from classes.app import get_app import openshot try: import json except ImportError: import simplejson as json # Get translation method _ = get_app()._tr # Append version info v = openshot.GetVersion() project_data = {} project_data["version"] = {"openshot-qt" : info.VERSION, "libopenshot" : v.ToString()} # Get FPS from project from classes.app import get_app fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Import legacy openshot classes (from version 1.X) from classes.legacy.openshot import classes as legacy_classes from classes.legacy.openshot.classes import project as legacy_project from classes.legacy.openshot.classes import sequences as legacy_sequences from classes.legacy.openshot.classes import track as legacy_track from classes.legacy.openshot.classes import clip as legacy_clip from classes.legacy.openshot.classes import keyframe as legacy_keyframe from classes.legacy.openshot.classes import files as legacy_files from classes.legacy.openshot.classes import transition as legacy_transition from classes.legacy.openshot.classes import effect as legacy_effect from classes.legacy.openshot.classes import marker as legacy_marker sys.modules['openshot.classes'] = legacy_classes sys.modules['classes.project'] = legacy_project sys.modules['classes.sequences'] = legacy_sequences sys.modules['classes.track'] = legacy_track sys.modules['classes.clip'] = legacy_clip sys.modules['classes.keyframe'] = legacy_keyframe sys.modules['classes.files'] = legacy_files sys.modules['classes.transition'] = legacy_transition sys.modules['classes.effect'] = legacy_effect sys.modules['classes.marker'] = legacy_marker # Keep track of files that failed to load failed_files = [] with open(file_path.encode('UTF-8'), 'rb') as f: try: # Unpickle legacy openshot project file v1_data = pickle.load(f, fix_imports=True, encoding="UTF-8") file_lookup = {} # Loop through files for item in v1_data.project_folder.items: # Is this item a File (i.e. ignore folders) if isinstance(item, legacy_files.OpenShotFile): # Create file try: clip = openshot.Clip(item.name) reader = clip.Reader() file_data = json.loads(reader.Json(), strict=False) # Determine media type if file_data["has_video"] and not self.is_image(file_data): file_data["media_type"] = "video" elif file_data["has_video"] and self.is_image(file_data): file_data["media_type"] = "image" elif file_data["has_audio"] and not file_data["has_video"]: file_data["media_type"] = "audio" # Save new file to the project data file = File() file.data = file_data file.save() # Keep track of new ids and old ids file_lookup[item.unique_id] = file except: # Handle exception quietly msg = ("%s is not a valid video, audio, or image file." % item.name) log.error(msg) failed_files.append(item.name) # Delete all tracks track_list = copy.deepcopy(Track.filter()) for track in track_list: track.delete() # Create new tracks track_counter = 0 for legacy_t in reversed(v1_data.sequences[0].tracks): t = Track() t.data = {"number": track_counter, "y": 0, "label": legacy_t.name} t.save() track_counter += 1 # Loop through clips track_counter = 0 for sequence in v1_data.sequences: for track in reversed(sequence.tracks): for clip in track.clips: # Get associated file for this clip if clip.file_object.unique_id in file_lookup.keys(): file = file_lookup[clip.file_object.unique_id] else: # Skip missing file log.info("Skipping importing missing file: %s" % clip.file_object.unique_id) continue # Create clip if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json(), strict=False) new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Check for optional start and end attributes new_clip["start"] = clip.start_time new_clip["end"] = clip.end_time new_clip["position"] = clip.position_on_track new_clip["layer"] = track_counter # Clear alpha (if needed) if clip.video_fade_in or clip.video_fade_out: new_clip["alpha"]["Points"] = [] # Video Fade IN if clip.video_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, 1.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Video Fade OUT if clip.video_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, 1.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["alpha"]["Points"].append(start_object) new_clip["alpha"]["Points"].append(end_object) # Clear Audio (if needed) if clip.audio_fade_in or clip.audio_fade_out: new_clip["volume"]["Points"] = [] else: p = openshot.Point(1, clip.volume / 100.0, openshot.BEZIER) p_object = json.loads(p.Json(), strict=False) new_clip["volume"] = { "Points" : [p_object]} # Audio Fade IN if clip.audio_fade_in: # Add keyframes start = openshot.Point(round(clip.start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round((clip.start_time + clip.video_fade_in_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Audio Fade OUT if clip.audio_fade_out: # Add keyframes start = openshot.Point(round((clip.end_time - clip.video_fade_out_amount) * fps_float) + 1, clip.volume / 100.0, openshot.BEZIER) start_object = json.loads(start.Json(), strict=False) end = openshot.Point(round(clip.end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json(), strict=False) new_clip["volume"]["Points"].append(start_object) new_clip["volume"]["Points"].append(end_object) # Save clip clip_object = Clip() clip_object.data = new_clip clip_object.save() # Loop through transitions for trans in track.transitions: # Fix default transition if not trans.resource or not os.path.exists(trans.resource): trans.resource = os.path.join(info.PATH, "transitions", "common", "fade.svg") # Open up QtImageReader for transition Image transition_reader = openshot.QtImageReader(trans.resource) trans_begin_value = 1.0 trans_end_value = -1.0 if trans.reverse: trans_begin_value = -1.0 trans_end_value = 1.0 brightness = openshot.Keyframe() brightness.AddPoint(1, trans_begin_value, openshot.BEZIER) brightness.AddPoint(round(trans.length * fps_float) + 1, trans_end_value, openshot.BEZIER) contrast = openshot.Keyframe(trans.softness * 10.0) # Create transition dictionary transitions_data = { "id": get_app().project.generate_id(), "layer": track_counter, "title": "Transition", "type": "Mask", "position": trans.position_on_track, "start": 0, "end": trans.length, "brightness": json.loads(brightness.Json(), strict=False), "contrast": json.loads(contrast.Json(), strict=False), "reader": json.loads(transition_reader.Json(), strict=False), "replace_image": False } # Save transition t = Transition() t.data = transitions_data t.save() # Increment track counter track_counter += 1 except Exception as ex: # Error parsing legacy contents msg = _("Failed to load project file %(path)s: %(error)s" % {"path": file_path, "error": ex}) log.error(msg) raise Exception(msg) # Show warning if some files failed to load if failed_files: # Throw exception raise Exception(_("Failed to load the following files:\n%s" % ", ".join(failed_files))) # Return mostly empty project_data dict (with just the current version #) log.info("Successfully loaded legacy project file: %s" % file_path) return project_data
def value_updated(self, item, interpolation=-1, value=None): """ Table cell change event - also handles context menu to update interpolation value """ if self.ignore_update_signal: return # Get translation method _ = get_app()._tr # Determine what was changed property = self.model.item(item.row(), 0).data() property_name = property[1]["name"] closest_point_x = property[1]["closest_point_x"] property_type = property[1]["type"] property_key = property[0] clip_id, item_type = item.data() # Get value (if any) if item.text(): # Set and format value based on property type if value != None: # Override value new_value = value elif property_type == "string": # Use string value new_value = item.text() elif property_type == "bool": # Use boolean value if item.text() == _("False"): new_value = False else: new_value = True else: # Use numeric value new_value = QLocale().system().toFloat(item.text())[0] else: new_value = None log.info("%s for %s changed to %s at frame %s with interpolation: %s at closest x: %s" % (property_key, clip_id, new_value, self.frame_number, interpolation, closest_point_x)) # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info(c.data) # Check the type of property (some are keyframe, and some are not) if type(c.data[property_key]) == dict: # Keyframe # Loop through points, find a matching points on this frame found_point = False point_to_delete = None for point in c.data[property_key]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if interpolation == -1 and point["co"]["X"] == self.frame_number: # Found point, Update value found_point = True clip_updated = True # Update or delete point if new_value != None: point["co"]["Y"] = float(new_value) log.info("updating point: co.X = %s to value: %s" % (point["co"]["X"], float(new_value))) else: point_to_delete = point break elif interpolation > -1 and point["co"]["X"] == closest_point_x: # Only update interpolation type found_point = True clip_updated = True point["interpolation"] = interpolation log.info("updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) break # Delete point (if needed) if point_to_delete: clip_updated = True log.info("Found point to delete at X=%s" % point_to_delete["co"]["X"]) c.data[property_key]["Points"].remove(point_to_delete) # Create new point (if needed) elif not found_point and new_value != None: clip_updated = True log.info("Created new point at X=%s" % self.frame_number) c.data[property_key]["Points"].append({'co': {'X': self.frame_number, 'Y': new_value}, 'interpolation': 1}) elif property_type == "int": # Integer clip_updated = True c.data[property_key] = int(new_value) elif property_type == "float": # Float clip_updated = True c.data[property_key] = new_value elif property_type == "bool": # Boolean clip_updated = True c.data[property_key] = bool(new_value) elif property_type == "string": # String clip_updated = True c.data[property_key] = str(new_value) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data[property_key]} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.preview_thread.refreshFrame() # Clear selection self.parent.clearSelection()
def mouseMoveEvent(self, event): # Get data model and selection model = self.clip_properties_model.model row = self.indexAt(event.pos()).row() column = self.indexAt(event.pos()).column() if model.item(row, 0): self.selected_label = model.item(row, 0) self.selected_item = model.item(row, 1) # Is the user dragging on the value column if self.selected_label and self.selected_item: frame_number = self.clip_properties_model.frame_number # Get the position of the cursor and % value value_column_x = self.columnViewportPosition(1) value_column_y = value_column_x + self.columnWidth(1) cursor_value = event.x() - value_column_x cursor_value_percent = cursor_value / self.columnWidth(1) try: property = self.selected_label.data() except Exception as ex: # If item is deleted during this drag... an exception can occur # Just ignore, since this is harmless return property_key = property[0] property_name = property[1]["name"] property_type = property[1]["type"] property_max = property[1]["max"] property_min = property[1]["min"] property_value = property[1]["value"] readonly = property[1]["readonly"] item_id, item_type = self.selected_item.data() # Bail if readonly if readonly: return # Get the original data of this item (prior to any updates, for the undo/redo system) if not self.original_data: # Ignore undo/redo history temporarily (to avoid a huge pile of undo/redo history) get_app().updates.ignore_history = True # Find this clip c = None if item_type == "clip": # Get clip object c = Clip.get(id=item_id) elif item_type == "transition": # Get transition object c = Transition.get(id=item_id) elif item_type == "effect": # Get effect object c = Effect.get(id=item_id) if c: if property_key in c.data: # Grab the original data for this item/property self.original_data = c.data # Calculate percentage value if property_type in ["float", "int"]: min_max_range = float(property_max) - float(property_min) # Determine if range is unreasonably long (such as position, start, end, etc.... which can be huge #'s) if min_max_range > 1000.0: # Get the current value self.new_value = QLocale().system().toDouble( self.selected_item.text())[0] # Huge range - increment / decrement slowly if self.previous_x == -1: # init previous_x for the first time self.previous_x = event.x() # calculate # of pixels dragged drag_diff = self.previous_x - event.x() if drag_diff > 0: # Move to the left by a small amount self.new_value -= 0.50 elif drag_diff < 0: # Move to the right by a small amount self.new_value += 0.50 # update previous x self.previous_x = event.x() else: # Small range - use cursor % to calculate new value self.new_value = property_min + (min_max_range * cursor_value_percent) # Clamp value between min and max (just incase user drags too big) self.new_value = max(property_min, self.new_value) self.new_value = min(property_max, self.new_value) # Update value of this property self.clip_properties_model.value_updated( self.selected_item, -1, self.new_value) # Repaint self.viewport().update()
def accept(self): """ Ok button clicked """ log.info('accept') # Get settings from form start_position = self.txtStartTime.value() track_num = self.cmbTrack.currentData() fade_value = self.cmbFade.currentData() fade_length = self.txtFadeLength.value() transition_path = self.cmbTransition.currentData() transition_length = self.txtTransitionLength.value() image_length = self.txtImageLength.value() zoom_value = self.cmbZoom.currentData() # Init position position = start_position random_transition = False if transition_path == "random": random_transition = True # Get frames per second fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Loop through each file (in the current order) for file in self.treeFiles.timeline_model.files: # Create a clip clip = Clip() clip.data = {} if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["position"] = position new_clip["layer"] = track_num new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Skip any clips that are missing a 'reader' attribute # TODO: Determine why this even happens, as it shouldn't be possible if not new_clip.get("reader"): continue # Skip to next file # Overwrite frame rate (incase the user changed it in the File Properties) file_properties_fps = float(file.data["fps"]["num"]) / float( file.data["fps"]["den"]) file_fps = float(new_clip["reader"]["fps"]["num"]) / float( new_clip["reader"]["fps"]["den"]) fps_diff = file_fps / file_properties_fps new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"] new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"] # Scale duration / length / and end properties new_clip["reader"]["duration"] *= fps_diff new_clip["end"] *= fps_diff new_clip["duration"] *= fps_diff # Check for optional start and end attributes start_time = 0 end_time = new_clip["reader"]["duration"] if 'start' in file.data.keys(): start_time = file.data['start'] new_clip["start"] = start_time if 'end' in file.data.keys(): end_time = file.data['end'] new_clip["end"] = end_time # Adjust clip duration, start, and end new_clip["duration"] = new_clip["reader"]["duration"] if file.data["media_type"] == "image": end_time = image_length new_clip["end"] = end_time # Adjust Fade of Clips (if no transition is chosen) if not transition_path: if fade_value != None: # Overlap this clip with the previous one (if any) position = max(start_position, new_clip["position"] - fade_length) new_clip["position"] = position if fade_value == 'Fade In' or fade_value == 'Fade In & Out': start = openshot.Point( round(start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( min( round((start_time + fade_length) * fps_float) + 1, round(end_time * fps_float) + 1), 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) if fade_value == 'Fade Out' or fade_value == 'Fade In & Out': start = openshot.Point( max( round((end_time * fps_float) + 1) - (round(fade_length * fps_float) + 1), round(start_time * fps_float) + 1), 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(end_time * fps_float) + 1, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) # Adjust zoom amount if zoom_value != None: # Location animation if zoom_value == "Random": animate_start_x = uniform(-0.5, 0.5) animate_end_x = uniform(-0.15, 0.15) animate_start_y = uniform(-0.5, 0.5) animate_end_y = uniform(-0.15, 0.15) # Scale animation start_scale = uniform(0.5, 1.5) end_scale = uniform(0.85, 1.15) elif zoom_value == "Zoom In": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.0 end_scale = 1.25 elif zoom_value == "Zoom Out": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.25 end_scale = 1.0 # Add keyframes start = openshot.Point( round(start_time * fps_float) + 1, start_scale, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point( round(end_time * fps_float) + 1, end_scale, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["scale_x"]["Points"].append(start_object) new_clip["scale_x"]["Points"].append(end_object) new_clip["scale_y"]["Points"].append(start_object) new_clip["scale_y"]["Points"].append(end_object) # Add keyframes start_x = openshot.Point( round(start_time * fps_float) + 1, animate_start_x, openshot.BEZIER) start_x_object = json.loads(start_x.Json()) end_x = openshot.Point( round(end_time * fps_float) + 1, animate_end_x, openshot.BEZIER) end_x_object = json.loads(end_x.Json()) start_y = openshot.Point( round(start_time * fps_float) + 1, animate_start_y, openshot.BEZIER) start_y_object = json.loads(start_y.Json()) end_y = openshot.Point( round(end_time * fps_float) + 1, animate_end_y, openshot.BEZIER) end_y_object = json.loads(end_y.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["location_x"]["Points"].append(start_x_object) new_clip["location_x"]["Points"].append(end_x_object) new_clip["location_y"]["Points"].append(start_y_object) new_clip["location_y"]["Points"].append(end_y_object) if transition_path: # Add transition for this clip (if any) # Open up QtImageReader for transition Image if random_transition: random_index = randint(0, len(self.transitions)) transition_path = self.transitions[random_index] # Get reader for transition transition_reader = openshot.QtImageReader(transition_path) brightness = openshot.Keyframe() brightness.AddPoint(1, 1.0, openshot.BEZIER) brightness.AddPoint( round( min(transition_length, end_time - start_time) * fps_float) + 1, -1.0, openshot.BEZIER) contrast = openshot.Keyframe(3.0) # Create transition dictionary transitions_data = { "layer": track_num, "title": "Transition", "type": "Mask", "start": 0, "end": min(transition_length, end_time - start_time), "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Overlap this clip with the previous one (if any) position = max(start_position, position - transition_length) transitions_data["position"] = position new_clip["position"] = position # Create transition tran = Transition() tran.data = transitions_data tran.save() # Save Clip clip.data = new_clip clip.save() # Increment position by length of clip position += (end_time - start_time) # Accept dialog super(AddToTimeline, self).accept()
def export_edl(): """Export EDL File""" app = get_app() _ = app._tr # EDL Export format edl_string = "%03d %-9s%-6s%-9s%11s %11s %11s %11s\n" # Get FPS info fps_num = get_app().project.get("fps").get("num", 24) fps_den = get_app().project.get("fps").get("den", 1) fps_float = float(fps_num / fps_den) # Get EDL path recommended_path = app.project.current_filepath or "" if not recommended_path: recommended_path = os.path.join(info.HOME_PATH, "%s.edl" % _("Untitled Project")) else: recommended_path = recommended_path.replace(".osp", ".edl") file_path = QFileDialog.getSaveFileName( app.window, _("Export EDL..."), recommended_path, _("Edit Decision Lists (*.edl)"))[0] if not file_path: return # Append .edl if needed if not file_path.endswith(".edl"): file_path = "%s.edl" % file_path # Get filename with no extension file_name_with_ext = os.path.basename(file_path) file_name = os.path.splitext(file_name_with_ext)[0] all_tracks = get_app().project.get("layers") track_count = len(all_tracks) for track in reversed(sorted(all_tracks, key=itemgetter('number'))): existing_track = Track.get(number=track.get("number")) if not existing_track: # Log error and fail silently, and continue log.error('No track object found with number: %s' % track.get("number")) continue # Track name track_name = track.get("label") or "TRACK %s" % track_count clips_on_track = Clip.filter(layer=track.get("number")) if not clips_on_track: continue # Generate EDL File (1 per track - limitation of EDL format) # TODO: Improve and move this into its own class with open("%s-%s.edl" % (file_path.replace(".edl", ""), track_name), 'w', encoding="utf8") as f: # Add Header f.write("TITLE: %s - %s\n" % (file_name, track_name)) f.write("FCM: NON-DROP FRAME\n\n") # Loop through each track edit_index = 1 export_position = 0.0 # Loop through clips on this track for clip in clips_on_track: # Do we need a blank clip? if clip.data.get('position', 0.0) > export_position: # Blank clip (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(0.0, fps_num, fps_den) clip_end_time = secondsToTimecode( clip.data.get('position') - export_position, fps_num, fps_den) timeline_start_time = secondsToTimecode( export_position, fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) # Write blank clip f.write(edl_string % (edit_index, "BL"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) # Format clip start/end and timeline start/end values (i.e. 00:00:00:00) clip_start_time = secondsToTimecode(clip.data.get('start'), fps_num, fps_den) clip_end_time = secondsToTimecode(clip.data.get('end'), fps_num, fps_den) timeline_start_time = secondsToTimecode( clip.data.get('position'), fps_num, fps_den) timeline_end_time = secondsToTimecode( clip.data.get('position') + (clip.data.get('end') - clip.data.get('start')), fps_num, fps_den) has_video = clip.data.get("reader", {}).get("has_video", False) has_audio = clip.data.get("reader", {}).get("has_audio", False) if has_video: # Video Track f.write(edl_string % (edit_index, "AX"[:9], "V"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) if has_audio: # Audio Track f.write(edl_string % (edit_index, "AX"[:9], "A"[:6], "C", clip_start_time, clip_end_time, timeline_start_time, timeline_end_time)) f.write("* FROM CLIP NAME: %s\n" % clip.data.get('title')) # Add opacity data (if any) alpha_points = clip.data.get('alpha', {}).get('Points', []) if len(alpha_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in alpha_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = point.get('co', {}).get('Y', 0.0) * 100.0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for opacity_time in sorted(keyframes.keys()): opacity_value = keyframes.get(opacity_time) f.write( "* OPACITY LEVEL AT %s IS %0.2f%% (REEL AX)\n" % (secondsToTimecode(opacity_time, fps_num, fps_den), opacity_value)) # Add volume data (if any) volume_points = clip.data.get('volume', {}).get('Points', []) if len(volume_points) > 1: # Loop through Points (remove duplicates) keyframes = {} for point in volume_points: keyframeTime = (point.get('co', {}).get('X', 1.0) - 1) / fps_float keyframeValue = (point.get('co', {}).get('Y', 0.0) * 99.0) - 99 # Scaling 0-1 to -99-0 keyframes[keyframeTime] = keyframeValue # Write keyframe values to EDL for volume_time in sorted(keyframes.keys()): volume_value = keyframes.get(volume_time) f.write( "* AUDIO LEVEL AT %s IS %0.2f DB (REEL AX A1)\n" % (secondsToTimecode(volume_time, fps_num, fps_den), volume_value)) # Update export position export_position = clip.data.get('position') + ( clip.data.get('end') - clip.data.get('start')) f.write("\n") edit_index += 1 # Update counters track_count -= 1
def remove_keyframe(self, item): """Remove an existing keyframe (if any)""" # Determine what was changed property = self.model.item(item.row(), 0).data() property_name = property[1]["name"] property_type = property[1]["type"] closest_point_x = property[1]["closest_point_x"] property_type = property[1]["type"] property_key = property[0] clip_id, item_type = item.data() # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info("remove keyframe: %s" % c.data) # Determine type of keyframe (normal or color) keyframe_list = [] if property_type == "color": keyframe_list = [ c.data[property_key]["red"], c.data[property_key]["blue"], c.data[property_key]["green"] ] else: keyframe_list = [c.data[property_key]] # Loop through each keyframe (red, blue, and green) for keyframe in keyframe_list: # Keyframe # Loop through points, find a matching points on this frame closest_point = None point_to_delete = None for point in keyframe["Points"]: if point["co"]["X"] == self.frame_number: # Found point, Update value clip_updated = True point_to_delete = point break if point["co"]["X"] == closest_point_x: closest_point = point # If no point found, use closest point x if not point_to_delete: point_to_delete = closest_point # Delete point (if needed) if point_to_delete: clip_updated = True log.info("Found point to delete at X=%s" % point_to_delete["co"]["X"]) keyframe["Points"].remove(point_to_delete) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data[property_key]} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit() # Clear selection self.parent.clearSelection()
def remove_keyframe(self, item): """Remove an existing keyframe (if any)""" # Determine what was changed property = self.model.item(item.row(), 0).data() property_name = property[1]["name"] property_type = property[1]["type"] closest_point_x = property[1]["closest_point_x"] property_type = property[1]["type"] property_key = property[0] clip_id, item_type = item.data() # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info(c.data) # Determine type of keyframe (normal or color) keyframe_list = [] if property_type == "color": keyframe_list = [c.data[property_key]["red"], c.data[property_key]["blue"], c.data[property_key]["green"]] else: keyframe_list = [c.data[property_key]] # Loop through each keyframe (red, blue, and green) for keyframe in keyframe_list: # Keyframe # Loop through points, find a matching points on this frame closest_point = None point_to_delete = None for point in keyframe["Points"]: if point["co"]["X"] == self.frame_number: # Found point, Update value clip_updated = True point_to_delete = point break if point["co"]["X"] == closest_point_x: closest_point = point # If no point found, use closest point x if not point_to_delete: point_to_delete = closest_point # Delete point (if needed) if point_to_delete: clip_updated = True log.info("Found point to delete at X=%s" % point_to_delete["co"]["X"]) keyframe["Points"].remove(point_to_delete) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data[property_key]} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.preview_thread.refreshFrame() # Clear selection self.parent.clearSelection()
def value_updated(self, item, interpolation=-1, value=None, interpolation_details=[]): """ Table cell change event - also handles context menu to update interpolation value """ if self.ignore_update_signal: return # Get translation method _ = get_app()._tr # Determine what was changed property = self.model.item(item.row(), 0).data() property_name = property[1]["name"] closest_point_x = property[1]["closest_point_x"] previous_point_x = property[1]["previous_point_x"] property_type = property[1]["type"] property_key = property[0] clip_id, item_type = item.data() # Get value (if any) if item.text(): # Set and format value based on property type if value != None: # Override value new_value = value elif property_type == "string": # Use string value new_value = item.text() elif property_type == "bool": # Use boolean value if item.text() == _("False"): new_value = False else: new_value = True elif property_type == "int": # Use int value new_value = QLocale().system().toInt(item.text())[0] else: # Use decimal value new_value = QLocale().system().toFloat(item.text())[0] else: new_value = None log.info( "%s for %s changed to %s at frame %s with interpolation: %s at closest x: %s" % (property_key, clip_id, new_value, self.frame_number, interpolation, closest_point_x)) # Find this clip c = None clip_updated = False if item_type == "clip": # Get clip object c = Clip.get(id=clip_id) elif item_type == "transition": # Get transition object c = Transition.get(id=clip_id) elif item_type == "effect": # Get effect object c = Effect.get(id=clip_id) if c: # Update clip attribute if property_key in c.data: log.info("value updated: %s" % c.data) # Check the type of property (some are keyframe, and some are not) if type(c.data[property_key]) == dict: # Keyframe # Loop through points, find a matching points on this frame found_point = False point_to_delete = None for point in c.data[property_key]["Points"]: log.info("looping points: co.X = %s" % point["co"]["X"]) if interpolation == -1 and point["co"][ "X"] == self.frame_number: # Found point, Update value found_point = True clip_updated = True # Update or delete point if new_value != None: point["co"]["Y"] = float(new_value) log.info( "updating point: co.X = %s to value: %s" % (point["co"]["X"], float(new_value))) else: point_to_delete = point break elif interpolation > -1 and point["co"][ "X"] == previous_point_x: # Only update interpolation type (and the LEFT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_right"] = point.get( "handle_right") or { "Y": 0.0, "X": 0.0 } point["handle_right"][ "X"] = interpolation_details[0] point["handle_right"][ "Y"] = interpolation_details[1] log.info( "updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) elif interpolation > -1 and point["co"][ "X"] == closest_point_x: # Only update interpolation type (and the RIGHT side of the curve) found_point = True clip_updated = True point["interpolation"] = interpolation if interpolation == 0: point["handle_left"] = point.get( "handle_left") or { "Y": 0.0, "X": 0.0 } point["handle_left"][ "X"] = interpolation_details[2] point["handle_left"][ "Y"] = interpolation_details[3] log.info( "updating interpolation mode point: co.X = %s to %s" % (point["co"]["X"], interpolation)) log.info("use interpolation preset: %s" % str(interpolation_details)) # Delete point (if needed) if point_to_delete: clip_updated = True log.info("Found point to delete at X=%s" % point_to_delete["co"]["X"]) c.data[property_key]["Points"].remove(point_to_delete) # Create new point (if needed) elif not found_point and new_value != None: clip_updated = True log.info("Created new point at X=%s" % self.frame_number) c.data[property_key]["Points"].append({ 'co': { 'X': self.frame_number, 'Y': new_value }, 'interpolation': 1 }) elif property_type == "int": # Integer clip_updated = True c.data[property_key] = int(new_value) elif property_type == "float": # Float clip_updated = True c.data[property_key] = new_value elif property_type == "bool": # Boolean clip_updated = True c.data[property_key] = bool(new_value) elif property_type == "string": # String clip_updated = True c.data[property_key] = str(new_value) # Reduce # of clip properties we are saving (performance boost) c.data = {property_key: c.data.get(property_key)} # Save changes if clip_updated: # Save c.save() # Update the preview get_app().window.refreshFrameSignal.emit() # Clear selection self.parent.clearSelection()
def accept(self): """ Ok button clicked """ log.info('accept') # Get settings from form start_position = self.txtStartTime.value() track_num = self.cmbTrack.currentData() fade_value = self.cmbFade.currentData() fade_length = self.txtFadeLength.value() transition_path = self.cmbTransition.currentData() transition_length = self.txtTransitionLength.value() image_length = self.txtImageLength.value() zoom_value = self.cmbZoom.currentData() # Init position position = start_position random_transition = False if transition_path == "random": random_transition = True # Get frames per second fps = get_app().project.get(["fps"]) fps_float = float(fps["num"]) / float(fps["den"]) # Loop through each file (in the current order) for file in self.treeFiles.timeline_model.files: # Create a clip clip = Clip() clip.data = {} if (file.data["media_type"] == "video" or file.data["media_type"] == "image"): # Determine thumb path thumb_path = os.path.join(info.THUMBNAIL_PATH, "%s.png" % file.data["id"]) else: # Audio file thumb_path = os.path.join(info.PATH, "images", "AudioThumbnail.png") # Get file name path, filename = os.path.split(file.data["path"]) # Convert path to the correct relative path (based on this folder) file_path = file.absolute_path() # Create clip object for this file c = openshot.Clip(file_path) # Append missing attributes to Clip JSON new_clip = json.loads(c.Json()) new_clip["position"] = position new_clip["layer"] = track_num new_clip["file_id"] = file.id new_clip["title"] = filename new_clip["image"] = thumb_path # Overwrite frame rate (incase the user changed it in the File Properties) file_properties_fps = float(file.data["fps"]["num"]) / float(file.data["fps"]["den"]) file_fps = float(new_clip["reader"]["fps"]["num"]) / float(new_clip["reader"]["fps"]["den"]) fps_diff = file_fps / file_properties_fps new_clip["reader"]["fps"]["num"] = file.data["fps"]["num"] new_clip["reader"]["fps"]["den"] = file.data["fps"]["den"] # Scale duration / length / and end properties new_clip["reader"]["duration"] *= fps_diff new_clip["end"] *= fps_diff new_clip["duration"] *= fps_diff # Check for optional start and end attributes start_time = 0 end_time = new_clip["reader"]["duration"] if 'start' in file.data.keys(): start_time = file.data['start'] new_clip["start"] = start_time if 'end' in file.data.keys(): end_time = file.data['end'] new_clip["end"] = end_time # Adjust clip duration, start, and end new_clip["duration"] = new_clip["reader"]["duration"] if file.data["media_type"] == "image": end_time = image_length new_clip["end"] = end_time # Adjust Fade of Clips (if no transition is chosen) if not transition_path: if fade_value != None: # Overlap this clip with the previous one (if any) position = max(start_position, new_clip["position"] - fade_length) new_clip["position"] = position if fade_value == 'Fade In' or fade_value == 'Fade In & Out': start = openshot.Point((start_time * fps_float) + 1, 0.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point(min((start_time + fade_length) * fps_float, end_time * fps_float), 1.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) if fade_value == 'Fade Out' or fade_value == 'Fade In & Out': start = openshot.Point(max((end_time * fps_float) - (fade_length * fps_float), start_time * fps_float), 1.0, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point(end_time * fps_float, 0.0, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip['alpha']["Points"].append(start_object) new_clip['alpha']["Points"].append(end_object) # Adjust zoom amount if zoom_value != None: # Location animation if zoom_value == "Random": animate_start_x = uniform(-0.5, 0.5) animate_end_x = uniform(-0.15, 0.15) animate_start_y = uniform(-0.5, 0.5) animate_end_y = uniform(-0.15, 0.15) # Scale animation start_scale = uniform(0.5, 1.5) end_scale = uniform(0.85, 1.15) elif zoom_value == "Zoom In": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.0 end_scale = 1.25 elif zoom_value == "Zoom Out": animate_start_x = 0.0 animate_end_x = 0.0 animate_start_y = 0.0 animate_end_y = 0.0 # Scale animation start_scale = 1.25 end_scale = 1.0 # Add keyframes start = openshot.Point((start_time * fps_float) + 1, start_scale, openshot.BEZIER) start_object = json.loads(start.Json()) end = openshot.Point(end_time * fps_float, end_scale, openshot.BEZIER) end_object = json.loads(end.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["scale_x"]["Points"].append(start_object) new_clip["scale_x"]["Points"].append(end_object) new_clip["scale_y"]["Points"].append(start_object) new_clip["scale_y"]["Points"].append(end_object) # Add keyframes start_x = openshot.Point((start_time * fps_float) + 1, animate_start_x, openshot.BEZIER) start_x_object = json.loads(start_x.Json()) end_x = openshot.Point(end_time * fps_float, animate_end_x, openshot.BEZIER) end_x_object = json.loads(end_x.Json()) start_y = openshot.Point((start_time * fps_float) + 1, animate_start_y, openshot.BEZIER) start_y_object = json.loads(start_y.Json()) end_y = openshot.Point(end_time * fps_float, animate_end_y, openshot.BEZIER) end_y_object = json.loads(end_y.Json()) new_clip["gravity"] = openshot.GRAVITY_CENTER new_clip["location_x"]["Points"].append(start_x_object) new_clip["location_x"]["Points"].append(end_x_object) new_clip["location_y"]["Points"].append(start_y_object) new_clip["location_y"]["Points"].append(end_y_object) if transition_path: # Add transition for this clip (if any) # Open up QtImageReader for transition Image if random_transition: random_index = randint(0, len(self.transitions)) transition_path = self.transitions[random_index] # Get reader for transition transition_reader = openshot.QtImageReader(transition_path) brightness = openshot.Keyframe() brightness.AddPoint(1, 1.0, openshot.BEZIER) brightness.AddPoint(min(transition_length, end_time - start_time) * fps_float, -1.0, openshot.BEZIER) contrast = openshot.Keyframe(3.0) # Create transition dictionary transitions_data = { "layer": track_num, "title": "Transition", "type": "Mask", "start": 0, "end": min(transition_length, end_time - start_time), "brightness": json.loads(brightness.Json()), "contrast": json.loads(contrast.Json()), "reader": json.loads(transition_reader.Json()), "replace_image": False } # Overlap this clip with the previous one (if any) position = max(start_position, position - transition_length) transitions_data["position"] = position new_clip["position"] = position # Create transition tran = Transition() tran.data = transitions_data tran.save() # Save Clip clip.data = new_clip clip.save() # Increment position by length of clip position += (end_time - start_time) # Accept dialog super(AddToTimeline, self).accept()