def __init__(self, timeline_data=TIMELINE_DEFAULT_SETTINGS): """ initalize the timeline model with given settings this class is a singleton so it can only be initialized once @param timeline_data: dict with timeline settings (fps, width, height, sample_rate ...) """ if TimelineModel.__instance is not None: raise Exception("singleton!") TimelineModel.__instance = self fps = timeline_data["fps"] width = timeline_data["width"] height = timeline_data["height"] sample_rate = timeline_data["sample_rate"] channels = timeline_data["channels"] channel_layout = timeline_data["channel_layout"] # create openshot timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction( fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.timeline.Open() self.groups = dict()
def __init__(self, window): self.app = get_app() self.window = window project = self.app.project s = settings.get_settings() # Get some settings from the project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) # Create an instance of a libopenshot Timeline object self.timeline = openshot.Timeline( width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.timeline.info.channel_layout = channel_layout self.timeline.info.has_audio = True self.timeline.info.has_video = True self.timeline.info.video_length = 99999 self.timeline.info.duration = 999.99 self.timeline.info.sample_rate = sample_rate self.timeline.info.channels = channels # Open the timeline reader self.timeline.Open() # Add self as listener to project data updates (at the beginning of the list) # This listener will receive events before others. self.app.updates.add_listener(self, 0)
def init_timeline(self): fps = {'num': 24, 'den': 1} width = 1280 height = 720 sample_rate = 44100 channels = 2 channel_layout = 3 # Create an instance of a libopenshot Timeline object self.timeline = openshot.Timeline( width, height, openshot.Fraction(fps['num'], fps['den']), sample_rate, channels, channel_layout) self.timeline.info.channel_layout = channel_layout self.timeline.info.has_audio = True self.timeline.info.has_video = True self.timeline.info.video_length = 99999 self.timeline.info.duration = 999.99 self.timeline.info.sample_rate = sample_rate self.timeline.info.channels = channels #print(self.timeline.Json()) # Open the timeline reader self.timeline.Open()
def __init__(self, cuts_json, clips_json, preview=False): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") # If preview, hide cutting controls if preview: self.lblInstructions.setVisible(False) self.widgetControls.setVisible(False) self.setWindowTitle(_("Preview")) self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None project = get_app().project # Keep track of file object #self.file = file self.file_path = file.absolute_path() self.video_length = int(file.data['video_length']) self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.width, self.height, openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout try: # Add clip for current preview file self.clip = openshot.Clip(self.file_path) # Show waveform for audio files if not self.clip.Reader().info.has_video and self.clip.Reader( ).info.has_audio: self.clip.Waveform(True) # Set has_audio property self.r.info.has_audio = self.clip.Reader().info.has_audio if preview: # Display frame #'s during preview self.clip.display = openshot.FRAME_DISPLAY_CLIP self.r.AddClip(self.clip) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set max size of video preview (for speed) viewport_rect = self.videoPreview.centeredViewport( self.videoPreview.width(), self.videoPreview.height()) self.r.SetMaxSize(viewport_rect.width(), viewport_rect.height()) # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 if 'start' in self.file.data.keys(): start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.btnStart.clicked.connect(self.btnStart_clicked) self.btnEnd.clicked.connect(self.btnEnd_clicked) self.btnClear.clicked.connect(self.btnClear_clicked) self.btnAddClip.clicked.connect(self.btnAddClip_clicked) self.initialized = True
def __init__(self): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations app = get_app() _ = app._tr # Get settings self.s = settings.get_settings() # Track metrics track_metric_screen("export-screen") # Dynamically load tabs from settings data self.settings_data = settings.get_settings().get_all_settings() # Add buttons to interface self.export_button = QPushButton(_('Export Video')) self.buttonBox.addButton(self.export_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(QPushButton(_('Cancel')), QDialogButtonBox.RejectRole) self.exporting = False # Clear timeline preview cache (to get more avaiable memory) if get_app().window.cache_object: get_app().window.cache_object.Clear() # Hide audio channels self.lblChannels.setVisible(False) self.txtChannels.setVisible(False) # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) self.timeline.SetJson(json_timeline) # Open the "export" Timeline reader self.timeline.Open() # Default export path recommended_path = recommended_path = os.path.join(info.HOME_PATH) if app.project.current_filepath: recommended_path = os.path.dirname(app.project.current_filepath) export_path = get_app().project.get(["export_path"]) if os.path.exists(export_path): # Use last selected export path self.txtExportFolder.setText(export_path) else: # Default to home dir self.txtExportFolder.setText(recommended_path) # Is this a saved project? if not get_app().project.current_filepath: # Not saved yet self.txtFileName.setText(_("Untitled Project")) else: # Yes, project is saved # Get just the filename parent_path, filename = os.path.split( get_app().project.current_filepath) filename, ext = os.path.splitext(filename) self.txtFileName.setText( filename.replace("_", " ").replace("-", " ").capitalize()) # Default image type self.txtImageFormat.setText("%05.png") # Loop through Export To options export_options = [_("Video & Audio"), _("Image Sequence")] for option in export_options: # append profile to list self.cboExportTo.addItem(option) # Add channel layouts self.channel_layout_choices = [] for layout in [(openshot.LAYOUT_MONO, _("Mono (1 Channel)")), (openshot.LAYOUT_STEREO, _("Stereo (2 Channel)")), (openshot.LAYOUT_SURROUND, _("Surround (3 Channel)")), (openshot.LAYOUT_5POINT1, _("Surround (5.1 Channel)")), (openshot.LAYOUT_7POINT1, _("Surround (7.1 Channel)"))]: log.info(layout) self.channel_layout_choices.append(layout[0]) self.cboChannelLayout.addItem(layout[1], layout[0]) # Connect signals self.btnBrowse.clicked.connect( functools.partial(self.btnBrowse_clicked)) self.cboSimpleProjectType.currentIndexChanged.connect( functools.partial(self.cboSimpleProjectType_index_changed, self.cboSimpleProjectType)) self.cboProfile.currentIndexChanged.connect( functools.partial(self.cboProfile_index_changed, self.cboProfile)) self.cboSimpleTarget.currentIndexChanged.connect( functools.partial(self.cboSimpleTarget_index_changed, self.cboSimpleTarget)) self.cboSimpleVideoProfile.currentIndexChanged.connect( functools.partial(self.cboSimpleVideoProfile_index_changed, self.cboSimpleVideoProfile)) self.cboSimpleQuality.currentIndexChanged.connect( functools.partial(self.cboSimpleQuality_index_changed, self.cboSimpleQuality)) self.cboChannelLayout.currentIndexChanged.connect(self.updateChannels) get_app().window.ExportFrame.connect(self.updateProgressBar) # ********* Advaned Profile List ********** # Loop through profiles self.profile_names = [] self.profile_paths = {} for profile_folder in [info.USER_PROFILES_PATH, info.PROFILES_PATH]: for file in os.listdir(profile_folder): # Load Profile profile_path = os.path.join(profile_folder, file) profile = openshot.Profile(profile_path) # Add description of Profile to list self.profile_names.append(profile.info.description) self.profile_paths[profile.info.description] = profile_path # Sort list self.profile_names.sort() # Loop through sorted profiles box_index = 0 self.selected_profile_index = 0 for profile_name in self.profile_names: # Add to dropdown self.cboProfile.addItem(profile_name, self.profile_paths[profile_name]) # Set default (if it matches the project) if app.project.get(['profile']) == profile_name: self.selected_profile_index = box_index # increment item counter box_index += 1 # ********* Simple Project Type ********** # load the simple project type dropdown presets = [] for file in os.listdir(info.EXPORT_PRESETS_DIR): xmldoc = xml.parse(os.path.join(info.EXPORT_PRESETS_DIR, file)) type = xmldoc.getElementsByTagName("type") presets.append(_(type[0].childNodes[0].data)) # Exclude duplicates type_index = 0 selected_type = 0 presets = list(set(presets)) for item in sorted(presets): self.cboSimpleProjectType.addItem(item, item) if item == _("All Formats"): selected_type = type_index type_index += 1 # Always select 'All Formats' option self.cboSimpleProjectType.setCurrentIndex(selected_type) # Populate all profiles self.populateAllProfiles(app.project.get(['profile'])) # Connect framerate signals self.txtFrameRateNum.valueChanged.connect(self.updateFrameRate) self.txtFrameRateDen.valueChanged.connect(self.updateFrameRate) self.txtWidth.valueChanged.connect(self.updateFrameRate) self.txtHeight.valueChanged.connect(self.updateFrameRate) self.txtSampleRate.valueChanged.connect(self.updateFrameRate) self.txtChannels.valueChanged.connect(self.updateFrameRate) self.cboChannelLayout.currentIndexChanged.connect(self.updateFrameRate) # Determine the length of the timeline (in frames) self.updateFrameRate()
def LoadFile(self, path=None): """ Load a media file into the video player """ # Check to see if this path is already loaded # TODO: Determine why path is passed in as an empty string instead of None if path == self.clip_path or (not path and not self.clip_path): return log.info("LoadFile %s" % path) # Determine the current frame of the timeline (when switching to a clip) seek_position = 1 if path and not self.clip_path: # Track the current frame self.original_position = self.player.Position() # If blank path, switch back to self.timeline reader if not path: # Return to self.timeline reader log.debug("Set timeline reader again in player: %s" % self.timeline) self.player.Reader(self.timeline) # Clear clip reader reference self.clip_reader = None self.clip_path = None # Switch back to last timeline position seek_position = self.original_position else: # Create new timeline reader (to preview selected clip) project = get_app().project # Get some settings from the project fps = project.get("fps") width = project.get("width") height = project.get("height") sample_rate = project.get("sample_rate") channels = project.get("channels") channel_layout = project.get("channel_layout") # Create an instance of a libopenshot Timeline object self.clip_reader = openshot.Timeline( width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.clip_reader.info.channel_layout = channel_layout self.clip_reader.info.has_audio = True self.clip_reader.info.has_video = True self.clip_reader.info.video_length = 999999 self.clip_reader.info.duration = 999999 self.clip_reader.info.sample_rate = sample_rate self.clip_reader.info.channels = channels try: # Add clip for current preview file new_clip = openshot.Clip(path) self.clip_reader.AddClip(new_clip) except: log.error('Failed to load media file into video player: %s' % path) return # Assign new clip_reader self.clip_path = path # Keep track of previous clip readers (so we can Close it later) self.previous_clips.append(new_clip) self.previous_clip_readers.append(self.clip_reader) # Open and set reader self.clip_reader.Open() self.player.Reader(self.clip_reader) # Close and destroy old clip readers (leaving the 3 most recent) while len(self.previous_clip_readers) > 3: log.debug('Removing old clips from preview: %s' % self.previous_clip_readers[0]) previous_clip = self.previous_clips.pop(0) previous_clip.Close() previous_reader = self.previous_clip_readers.pop(0) previous_reader.Close() # Seek to frame 1, and resume speed self.Seek(seek_position)
def initTimeline(self, cuts, clips_json): project = get_app().project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) ''' self.file_path = file.absolute_path() self.video_length = int(file.data['video_length']) self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) ''' # Create an instance of a libopenshot Timeline object timeline = openshot.Timeline(width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) try: # Add clip for current preview file clip = openshot.Clip("/Users/admin/Downloads/BLACKPINK_Kill_This_Love.mp4") # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) # Set has_audio property #timeline.info.has_audio = clip.Reader().info.has_audio timeline.AddClip(clip) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return None timeline.Open() return timeline try: import json except ImportError: import simplejson as json s = settings.get_settings() project = get_app().project # Get some settings from the project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) # Create an instance of a libopenshot Timeline object timeline = openshot.Timeline(width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) timeline.info.channel_layout = channel_layout timeline.info.has_audio = True timeline.info.has_video = True timeline.info.video_length = 99999 timeline.info.duration = 999.99 timeline.info.sample_rate = sample_rate timeline.info.channels = channels # Open the timeline reader timeline.Open() #self.timeline.ApplyMapperToClips print("0------", timeline.info) #return timeline clips = self.jsonToClips(clips_json) print("222222222222221111", clips) for cut in cuts: intersecting_clips = self.getIntersectClips(clips, float(cut["start"])) print("111111111", intersecting_clips, fps) if intersecting_clips: for clip in intersecting_clips: print("222222222", clip["reader"]["path"]) path = clip["reader"]["path"] print("-----000000", path) c = openshot.Clip(path) #c.Start(cut["start"]) #c.End(cut["end"]) #c.Position = 0#cut["start"] # Append missing attributes to Clip JSON #new_clip = json.loads(c.Json(), strict=False) #new_clip.SetJson(clip.Json()) #new_clip["start"] = cut["start"] #new_clip["end"] = cut["end"] #new_clip["position"] = 0#cut["start"] try: # Add clip for current preview file #c.SetJson(new_clip) c.display = openshot.FRAME_DISPLAY_CLIP timeline.AddClip(c) #print('add into preview video player: %s', c.Json()) except: log.error('Failed to add into preview video player: %s' % c.Json()) # Get list of clip ids #clip_ids = [c.id for c in intersecting_clips] #self.timeline.Slice_Triggered(0, clip_ids, trans_ids, playhead_position) # Open and set reader #timeline.Open() return timeline # Connect to signal #self.window.MaxSizeChanged.connect(self.MaxSizeChangedCB) ''' if action.type == "load": # This JSON is initially loaded to libopenshot to update the timeline self.timeline.SetJson(action.json(only_value=True)) self.timeline.Open() # Re-Open the Timeline reader # The timeline's profile changed, so update all clips self.timeline.ApplyMapperToClips() # Refresh current frame (since the entire timeline was updated) self.window.refreshFrameSignal.emit() else: # This JSON DIFF is passed to libopenshot to update the timeline self.timeline.ApplyJsonDiff(action.json(is_array=True)) ''' '''
def __init__(self, cuts=[], preview=False): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") # If preview, hide cutting controls if preview: self.lblInstructions.setVisible(False) self.widgetControls.setVisible(False) self.setWindowTitle(_("Preview")) self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None #timeline = get_app().window.timeline_sync.timeline #app = get_app() #project = app.project # Get some settings from the project ''' self.fps = project.get(["fps"]) self.width = project.get(["width"]) self.height = project.get(["height"]) self.sample_rate = project.get(["sample_rate"]) self.channels = project.get(["channels"]) self.channel_layout = project.get(["channel_layout"]) self.fps_num = int(self.fps['num']) self.fps_den = int(self.fps['den']) self.fps = float(self.fps_num) / float(self.fps_den) ''' # Get the original timeline settings self.width = get_app().window.timeline_sync.timeline.info.width self.height = get_app().window.timeline_sync.timeline.info.height self.fps = get_app().window.timeline_sync.timeline.info.fps self.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.channels = get_app().window.timeline_sync.timeline.info.channels self.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.fps_num = int(self.fps.num) self.fps_den = int(self.fps.den) #self.fps = float(self.fps_num) / float(self.fps_den) ''' # Keep track of file object self.file = file self.file_path = file.absolute_path() self.video_length = 0 self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) ''' self.video_length = 0 # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.width, self.height, openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout ''' #cuts = [{"start_seconds":8.466666666666667, "end_seconds":15.3}, {"start_seconds":18.9, "end_seconds":22.133333333333333}] position = 0 print(cuts) cs = app.window.timeline_sync.timeline.Clips() for c in cs: print("-----id=", c.Id()) clip_json = Clip.filter(id=c.Id()) path = clip_json[0].data["reader"]["path"] print("==============", path, c.Position()) offset = c.Position() for cut in cuts: try: # Add clip for current preview file clip = openshot.Clip(path) self.clips.append(clip) # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) # Set has_audio property self.r.info.has_audio = clip.Reader().info.has_audio if preview: # Display frame #'s during preview clip.display = openshot.FRAME_DISPLAY_CLIP start = float(cut["start_seconds"] - offset) end = float(cut["end_seconds"] - offset) print("=======================-------start:", start, "end:", end) clip.Start(start) clip.End(end) #clip.Position(0) clip.Position(position) position = position + (end - start) - offset #clip.Duration(end-start) self.r.AddClip(clip) self.video_length = self.video_length + (end - start) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return ''' self.clips, self.video_length = CutsToClips(cuts) for clip in self.clips: # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader( ).info.has_audio: clip.Waveform(True) if preview: # Display frame #'s during preview clip.display = openshot.FRAME_DISPLAY_CLIP self.r.AddClip(clip) #self.video_length = self.video_length * self.fps_num / self.fps_den # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set max size of video preview (for speed) viewport_rect = self.videoPreview.centeredViewport( self.videoPreview.width(), self.videoPreview.height()) self.r.SetMaxSize(viewport_rect.width(), viewport_rect.height()) # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 #if 'start' in self.file.data.keys(): # start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.btnStart.clicked.connect(self.btnStart_clicked) self.btnEnd.clicked.connect(self.btnEnd_clicked) self.btnClear.clicked.connect(self.btnClear_clicked) self.btnAddClip.clicked.connect(self.btnAddClip_clicked) self.initialized = True
def __init__(self, file=None, clip=None): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None self.current_frame = 1 # Create region clip with Reader self.clip = openshot.Clip(clip.Reader()) self.clip.Open() # Set region clip start and end self.clip.Start(clip.Start()) self.clip.End(clip.End()) self.clip.Id(get_app().project.generate_id()) print("IDS {} {}".format(clip.Id(), self.clip.Id())) # Keep track of file object self.file = file self.file_path = file.absolute_path() c_info = clip.Reader().info self.fps = c_info.fps.ToInt( ) #float(self.fps_num) / float(self.fps_den) self.fps_num = self.fps #int(file.data['fps']['num']) self.fps_den = 1 #int(file.data['fps']['den']) self.width = c_info.width #int(file.data['width']) self.height = c_info.height #int(file.data['height']) self.sample_rate = c_info.sample_rate #int(file.data['sample_rate']) self.channels = c_info.channels #int(file.data['channels']) self.channel_layout = c_info.channel_layout #int(file.data['channel_layout']) self.video_length = int(self.clip.Duration() * self.fps) + 1 #int(file.data['video_length']) # Apply effects to region frames for effect in clip.Effects(): self.clip.AddEffect(effect) # Open video file with Reader log.info(self.clip.Reader()) # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set aspect ratio to match source content aspect_ratio = openshot.Fraction(self.width, self.height) aspect_ratio.Reduce() self.videoPreview.aspect_ratio = aspect_ratio # Set max size of video preview (for speed) self.viewport_rect = self.videoPreview.centeredViewport( self.width, self.height) # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.viewport_rect.width(), self.viewport_rect.height(), openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout self.r.SetMaxSize(self.viewport_rect.width(), self.viewport_rect.height()) try: # Add clip for current preview file self.clip = openshot.Clip(self.file_path) # Show waveform for audio files if not self.clip.Reader().info.has_video and self.clip.Reader( ).info.has_audio: self.clip.Waveform(True) # Set has_audio property self.r.info.has_audio = self.clip.Reader().info.has_audio # Update video_length property of the Timeline object self.r.info.video_length = self.video_length self.r.AddClip(self.clip) except: log.error( 'Failed to load media file into region select player: %s' % self.file_path) return # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 # if 'start' in self.file.data.keys(): # start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Add buttons self.cancel_button = QPushButton(_('Cancel')) self.process_button = QPushButton(_('Select Region')) self.buttonBox.addButton(self.process_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(self.cancel_button, QDialogButtonBox.RejectRole) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.initialized = True get_app().window.SelectRegionSignal.emit(clip.Id())
def __init__(self): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations _ = get_app()._tr # Get settings self.s = settings.get_settings() # 绑定 cancel 按钮和 cancel 函数 self.cancel_btn.clicked.connect(self.cancel) # 绑定 upload 按钮和 uploadSequence 函数 self.upload_btn.clicked.connect(self.uploadSequence) # 保存当前可用的视频片段为类内所有 self.avail_clips = get_app().window.timeline_sync.timeline.Clips() # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) self.timeline.SetJson(json_timeline) # Open the "export" Timeline reader self.timeline.Open() self.updateFrameRate()
import openshot # Create an empty timeline t = openshot.Timeline(720, 480, openshot.Fraction(24, 1), 44100, 2, openshot.LAYOUT_STEREO) t.Open() # lower layer lower = openshot.QtImageReader("back.png") c1 = openshot.Clip(lower) c1.Layer(1) t.AddClip(c1) # higher layer higher = openshot.QtImageReader("front3.png") c2 = openshot.Clip(higher) c2.Layer(2) #c2.alpha = openshot.Keyframe(0.5) t.AddClip(c2) # Wipe / Transition brightness = openshot.Keyframe() brightness.AddPoint(1, 1.0, openshot.BEZIER) brightness.AddPoint(24, -1.0, openshot.BEZIER) contrast = openshot.Keyframe() contrast.AddPoint(1, 20.0, openshot.BEZIER) contrast.AddPoint(24, 20.0, openshot.BEZIER) reader = openshot.QtImageReader("mask.png") e = openshot.Mask(reader, brightness, contrast)
import openshot # Create a empty clip t = openshot.Timeline(720, 480, openshot.Fraction(24, 1), 44100, 2) # lower layer lower = openshot.ImageReader( "/home/jonathan/apps/libopenshot/src/examples/back.png") c1 = openshot.Clip(lower) c1.Layer(1) t.AddClip(c1) # higher layer higher = openshot.ImageReader( "/home/jonathan/apps/libopenshot/src/examples/front3.png") c2 = openshot.Clip(higher) c2.Layer(2) #c2.alpha = openshot.Keyframe(0.5) t.AddClip(c2) # Wipe / Transition brightness = openshot.Keyframe() brightness.AddPoint(1, 100.0, openshot.BEZIER) brightness.AddPoint(24, -100.0, openshot.BEZIER) contrast = openshot.Keyframe() contrast.AddPoint(1, 20.0, openshot.BEZIER) contrast.AddPoint(24, 20.0, openshot.BEZIER) e = openshot.Wipe("/home/jonathan/apps/libopenshot/src/examples/mask.png", brightness, contrast)
def __init__(self, cuts): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations app = get_app() _ = app._tr # Get settings self.s = settings.get_settings() self.cuts = cuts # Track metrics track_metric_screen("export-screen") # Dynamically load tabs from settings data self.settings_data = settings.get_settings().get_all_settings() # Add buttons to interface self.export_button = QPushButton(_('Export Video')) self.buttonBox.addButton(self.export_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(QPushButton(_('Cancel')), QDialogButtonBox.RejectRole) self.exporting = False # Update FPS / Profile timer # Timer to use a delay before applying new profile/fps data (so we don't spam libopenshot) self.delayed_fps_timer = None self.delayed_fps_timer = QTimer() self.delayed_fps_timer.setInterval(200) self.delayed_fps_timer.timeout.connect(self.delayed_fps_callback) self.delayed_fps_timer.stop() # Pause playback (to prevent crash since we are fixing to change the timeline's max size) get_app().window.actionPlay_trigger(None, force="pause") # Clear timeline preview cache (to get more available memory) get_app().window.timeline_sync.timeline.ClearAllCache() # Hide audio channels self.lblChannels.setVisible(False) self.txtChannels.setVisible(False) # Set OMP thread disabled flag (for stability) openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = True openshot.Settings.Instance().HIGH_QUALITY_SCALING = True # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # No keyframe rescaling has happened yet (due to differences in FPS) self.keyframes_rescaled = False # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) json_str = json_timeline json_object = json.loads(json_str) json_object.pop("cuts") #json_object["clips"] = [] self.timeline.SetJson(json.dumps(json_object)) #print("======deded===", json.dumps(json_object)) ''' self.clips, self.video_length = CutsToClips(cuts) for clip in self.clips: # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) self.timeline.AddClip(clip) self.video_length = self.video_length * fps.num / fps.den ''' # Open the "export" Timeline reader self.timeline.Open() # Default export path recommended_path = recommended_path = os.path.join(info.HOME_PATH) if app.project.current_filepath: recommended_path = os.path.dirname(app.project.current_filepath) export_path = get_app().project.get(["export_path"]) if os.path.exists(export_path): # Use last selected export path self.txtExportFolder.setText(export_path) else: # Default to home dir self.txtExportFolder.setText(recommended_path) # Is this a saved project? if not get_app().project.current_filepath: # Not saved yet self.txtFileName.setText(_("Untitled Project")) else: # Yes, project is saved # Get just the filename parent_path, filename = os.path.split( get_app().project.current_filepath) filename, ext = os.path.splitext(filename) self.txtFileName.setText( filename.replace("_", " ").replace("-", " ").capitalize()) # Default image type self.txtImageFormat.setText("-%05d.png") # Loop through Export To options export_options = [ _("Video & Audio"), _("Video Only"), _("Audio Only"), _("Image Sequence") ] for option in export_options: # append profile to list self.cboExportTo.addItem(option) # Add channel layouts self.channel_layout_choices = [] for layout in [(openshot.LAYOUT_MONO, _("Mono (1 Channel)")), (openshot.LAYOUT_STEREO, _("Stereo (2 Channel)")), (openshot.LAYOUT_SURROUND, _("Surround (3 Channel)")), (openshot.LAYOUT_5POINT1, _("Surround (5.1 Channel)")), (openshot.LAYOUT_7POINT1, _("Surround (7.1 Channel)"))]: log.info(layout) self.channel_layout_choices.append(layout[0]) self.cboChannelLayout.addItem(layout[1], layout[0]) # Connect signals self.btnBrowse.clicked.connect( functools.partial(self.btnBrowse_clicked)) self.cboSimpleProjectType.currentIndexChanged.connect( functools.partial(self.cboSimpleProjectType_index_changed, self.cboSimpleProjectType)) self.cboProfile.currentIndexChanged.connect( functools.partial(self.cboProfile_index_changed, self.cboProfile)) self.cboSimpleTarget.currentIndexChanged.connect( functools.partial(self.cboSimpleTarget_index_changed, self.cboSimpleTarget)) self.cboSimpleVideoProfile.currentIndexChanged.connect( functools.partial(self.cboSimpleVideoProfile_index_changed, self.cboSimpleVideoProfile)) self.cboSimpleQuality.currentIndexChanged.connect( functools.partial(self.cboSimpleQuality_index_changed, self.cboSimpleQuality)) self.cboChannelLayout.currentIndexChanged.connect(self.updateChannels) get_app().window.ExportFrame.connect(self.updateProgressBar) # ********* Advanced Profile List ********** # Loop through profiles self.profile_names = [] self.profile_paths = {} for profile_folder in [info.USER_PROFILES_PATH, info.PROFILES_PATH]: for file in os.listdir(profile_folder): # Load Profile profile_path = os.path.join(profile_folder, file) profile = openshot.Profile(profile_path) # Add description of Profile to list profile_name = "%s (%sx%s)" % (profile.info.description, profile.info.width, profile.info.height) self.profile_names.append(profile_name) self.profile_paths[profile_name] = profile_path # Sort list self.profile_names.sort() # Loop through sorted profiles box_index = 0 self.selected_profile_index = 0 for profile_name in self.profile_names: # Add to dropdown self.cboProfile.addItem( self.getProfileName(self.getProfilePath(profile_name)), self.getProfilePath(profile_name)) # Set default (if it matches the project) if app.project.get(['profile']) in profile_name: self.selected_profile_index = box_index # increment item counter box_index += 1 # ********* Simple Project Type ********** # load the simple project type dropdown presets = [] for preset_path in [info.EXPORT_PRESETS_PATH, info.USER_PRESETS_PATH]: for file in os.listdir(preset_path): xmldoc = xml.parse(os.path.join(preset_path, file)) type = xmldoc.getElementsByTagName("type") presets.append(_(type[0].childNodes[0].data)) # Exclude duplicates type_index = 0 selected_type = 0 presets = list(set(presets)) for item in sorted(presets): self.cboSimpleProjectType.addItem(item, item) if item == _("All Formats"): selected_type = type_index type_index += 1 # Always select 'All Formats' option self.cboSimpleProjectType.setCurrentIndex(selected_type) # Populate all profiles self.populateAllProfiles(app.project.get(['profile'])) # Connect framerate signals self.txtFrameRateNum.valueChanged.connect(self.updateFrameRate) self.txtFrameRateDen.valueChanged.connect(self.updateFrameRate) self.txtWidth.valueChanged.connect(self.updateFrameRate) self.txtHeight.valueChanged.connect(self.updateFrameRate) self.txtSampleRate.valueChanged.connect(self.updateFrameRate) self.txtChannels.valueChanged.connect(self.updateFrameRate) self.cboChannelLayout.currentIndexChanged.connect(self.updateFrameRate) # Determine the length of the timeline (in frames) self.updateFrameRate()
def initTimeline(self, native_timeline, cuts, num, den): # Get some settings from the project fps = native_timeline.info.fps width = native_timeline.info.width height = native_timeline.info.height sample_rate = native_timeline.info.sample_rate channels = native_timeline.info.channels channel_layout = native_timeline.info.channel_layout # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) self.timeline.info.channel_layout = native_timeline.info.channel_layout self.timeline.info.has_audio = native_timeline.info.has_audio self.timeline.info.has_video = native_timeline.info.has_video self.timeline.info.video_length = native_timeline.info.video_length self.timeline.info.duration = native_timeline.info.duration self.timeline.info.sample_rate = native_timeline.info.sample_rate self.timeline.info.channels = native_timeline.info.channels #json_timeline = json.dumps(get_app().project._data) #self.timeline.SetJson(json_timeline) # Open the timeline reader #self.timeline.Open() #return timeline fps = float(num) / float(den) clips = self.getNativeClips() for cut in cuts: start = (int(cut["start"]) - 1) / fps end = (int(cut["end"])) / fps print("cut start-end", cut["start"], "-", cut["end"], "[", start, "-", end, "]") intersecting_clips = self.getIntersectClips(clips, start) if intersecting_clips: for clip in intersecting_clips: path = clip["reader"]["path"] c = openshot.Clip(path) self.clips.append(c) #c.Position(clip["position"]) c.Layer(0) c.Position(0) c.Start(start) c.End(end) self.video_length = end - start print("=======self.video_length:", self.video_length) #self.timeline.info.video_length = str(self.video_length) self.timeline.info.duration = end - start #data = {"start": start, "end": end} #c.SetJsonValue(json.dumps(data)) try: c.display = openshot.FRAME_DISPLAY_CLIP self.timeline.AddClip(c) print("---------", c.Json()) except: log.error( 'Failed to add into preview video player: %s' % c.Json()) # Get list of clip ids #clip_ids = [c.id for c in intersecting_clips] #self.timeline.Slice_Triggered(0, clip_ids, trans_ids, playhead_position) # Open and set reader self.timeline.Open() # Connect to signal #self.window.MaxSizeChanged.connect(self.MaxSizeChangedCB) ''' if action.type == "load": # This JSON is initially loaded to libopenshot to update the timeline self.timeline.SetJson(action.json(only_value=True)) self.timeline.Open() # Re-Open the Timeline reader # The timeline's profile changed, so update all clips self.timeline.ApplyMapperToClips() # Refresh current frame (since the entire timeline was updated) self.window.refreshFrameSignal.emit() else: # This JSON DIFF is passed to libopenshot to update the timeline self.timeline.ApplyJsonDiff(action.json(is_array=True)) ''' '''