def __init__(self, *args): # Invoke parent init QWidget.__init__(self, *args) # Init aspect ratio settings (default values) self.aspect_ratio = openshot.Fraction() self.pixel_ratio = openshot.Fraction() self.aspect_ratio.num = 16 self.aspect_ratio.den = 9 self.pixel_ratio.num = 1 self.pixel_ratio.den = 1 # Init Qt style properties (black background, ect...) p = QPalette() p.setColor(QPalette.Window, Qt.black) super().setPalette(p) super().setAttribute(Qt.WA_OpaquePaintEvent) super().setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) # Set mouse tracking self.setMouseTracking(True) # Init current frame's QImage self.current_image = None # Get a reference to the window object self.win = get_app().window
def __init__(self, *args): # Invoke parent init QWidget.__init__(self, *args) # Init aspect ratio settings (default values) self.aspect_ratio = openshot.Fraction() self.pixel_ratio = openshot.Fraction() self.aspect_ratio.num = 16 self.aspect_ratio.den = 9 self.pixel_ratio.num = 1 self.pixel_ratio.den = 1 self.transforming_clip = None self.transforming_clip_object = None self.transform = None self.topLeftHandle = None self.topRightHandle = None self.bottomLeftHandle = None self.bottomRightHandle = None self.topHandle = None self.bottomHandle = None self.leftHandle = None self.rightHandle = None self.centerHandle = None self.mouse_pressed = False self.mouse_dragging = False self.mouse_position = None self.transform_mode = None self.corner_offset_x = None self.corner_offset_y = None self.clipRect = None self.gravity_point = None self.original_clip_data = None # Mutex lock self.mutex = QMutex() # Init Qt style properties (black background, ect...) p = QPalette() p.setColor(QPalette.Window, QColor("#191919")) super().setPalette(p) super().setAttribute(Qt.WA_OpaquePaintEvent) super().setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) # Set mouse tracking self.setMouseTracking(True) # Init current frame's QImage self.current_image = None # Get a reference to the window object self.win = get_app().window # Connect to signals self.win.TransformSignal.connect(self.transformTriggered) self.win.refreshFrameSignal.connect(self.refreshTriggered)
def test_add_file(self): """ Test the File.save method by adding multiple files """ # Import additional classes that need the app defined first from classes.query import File # Find number of files in project num_files = len(File.filter()) # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(PATH, "images", "openshot.png") query_file.data["media_type"] = "image" query_file.save() self.assertTrue(query_file) self.assertEqual(len(File.filter()), num_files + 1) # Save the file again (which should not change the total # of files) query_file.save() self.assertEqual(len(File.filter()), num_files + 1)
def __init__(self, timeline_data=TIMELINE_DEFAULT_SETTINGS): """ initalize the timeline model with given settings this class is a singleton so it can only be initialized once @param timeline_data: dict with timeline settings (fps, width, height, sample_rate ...) """ if TimelineModel.__instance is not None: raise Exception("singleton!") TimelineModel.__instance = self fps = timeline_data["fps"] width = timeline_data["width"] height = timeline_data["height"] sample_rate = timeline_data["sample_rate"] channels = timeline_data["channels"] channel_layout = timeline_data["channel_layout"] # create openshot timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction( fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.timeline.Open() self.groups = dict()
def __init__(self, window): self.app = get_app() self.window = window project = self.app.project s = settings.get_settings() # Get some settings from the project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) # Create an instance of a libopenshot Timeline object self.timeline = openshot.Timeline( width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.timeline.info.channel_layout = channel_layout self.timeline.info.has_audio = True self.timeline.info.has_video = True self.timeline.info.video_length = 99999 self.timeline.info.duration = 999.99 self.timeline.info.sample_rate = sample_rate self.timeline.info.channels = channels # Open the timeline reader self.timeline.Open() # Add self as listener to project data updates (at the beginning of the list) # This listener will receive events before others. self.app.updates.add_listener(self, 0)
def init_timeline(self): fps = {'num': 24, 'den': 1} width = 1280 height = 720 sample_rate = 44100 channels = 2 channel_layout = 3 # Create an instance of a libopenshot Timeline object self.timeline = openshot.Timeline( width, height, openshot.Fraction(fps['num'], fps['den']), sample_rate, channels, channel_layout) self.timeline.info.channel_layout = channel_layout self.timeline.info.has_audio = True self.timeline.info.has_video = True self.timeline.info.video_length = 99999 self.timeline.info.duration = 999.99 self.timeline.info.sample_rate = sample_rate self.timeline.info.channels = channels #print(self.timeline.Json()) # Open the timeline reader self.timeline.Open()
def setUpClass(TestQueryClass): """ Init unit test data """ # Create Qt application TestQueryClass.app = OpenShotApp(sys.argv, mode="unittest") TestQueryClass.clip_ids = [] TestQueryClass.file_ids = [] TestQueryClass.transition_ids = [] # Import additional classes that need the app defined first from classes.query import Clip, File, Transition # Insert some clips into the project data for num in range(5): # Create clip c = openshot.Clip(os.path.join(info.IMAGES_PATH, "AboutLogo.png")) # Parse JSON clip_data = json.loads(c.Json()) # Insert into project data query_clip = Clip() query_clip.data = clip_data query_clip.save() # Keep track of the ids TestQueryClass.clip_ids.append(query_clip.id) # Insert some files into the project data for num in range(5): # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) # Parse JSON file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() # Keep track of the ids TestQueryClass.file_ids.append(query_file.id) # Insert some transitions into the project data for num in range(5): # Create mask object transition_object = openshot.Mask() transitions_data = json.loads(transition_object.Json()) # Insert into project data query_transition = Transition() query_transition.data = transitions_data query_transition.save() # Keep track of the ids TestQueryClass.transition_ids.append(query_transition.id)
def __init__(self, *args): # Invoke parent init QWidget.__init__(self, *args) # Init aspect ratio, pixel ratio self.aspect_ratio = openshot.Fraction() self.pixel_ratio = openshot.Fraction() self.aspect_ratio.num = 16 self.aspect_ratio.den = 9 self.pixel_ratio.num = 1 self.pixel_ratio.den = 1 # set view properties super().setAttribute(Qt.WA_OpaquePaintEvent) super().setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) # Init current frame's QImage self.current_image = None
def changed(self, action): # Handle change display_ratio_changed = False pixel_ratio_changed = False if action.key and action.key[0] in ["display_ratio", "pixel_ratio" ] or action.type in ["load"]: # Update display ratio (if found) if action.type == "load" and action.values.get("display_ratio"): display_ratio_changed = True self.aspect_ratio = openshot.Fraction( action.values.get("display_ratio", {}).get("num", 16), action.values.get("display_ratio", {}).get("den", 9)) log.info("Load: Set video widget display aspect ratio to: %s" % self.aspect_ratio.ToFloat()) elif action.key and action.key[0] == "display_ratio": display_ratio_changed = True self.aspect_ratio = openshot.Fraction( action.values.get("num", 16), action.values.get("den", 9)) log.info( "Update: Set video widget display aspect ratio to: %s" % self.aspect_ratio.ToFloat()) # Update pixel ratio (if found) if action.type == "load" and action.values.get("pixel_ratio"): pixel_ratio_changed = True self.pixel_ratio = openshot.Fraction( action.values.get("pixel_ratio").get("num", 16), action.values.get("pixel_ratio").get("den", 9)) log.info("Set video widget pixel aspect ratio to: %s" % self.pixel_ratio.ToFloat()) elif action.key and action.key[0] == "pixel_ratio": pixel_ratio_changed = True self.pixel_ratio = openshot.Fraction( action.values.get("num", 16), action.values.get("den", 9)) log.info("Update: Set video widget pixel aspect ratio to: %s" % self.pixel_ratio.ToFloat()) # Update max size (to size of video preview viewport) if display_ratio_changed or pixel_ratio_changed: get_app().window.timeline_sync.timeline.SetMaxSize( round(self.width() * self.pixel_ratio.ToFloat()), round(self.height() * self.pixel_ratio.ToFloat()))
def export(self): # $ dpkg -L openshot-qt | grep export # /usr/lib/python3/dist-packages/openshot_qt/windows/export.py # /usr/lib/python3/dist-packages/openshot_qt/windows/ui/export.ui # Set MaxSize (so we don't have any downsampling) #self.timeline.SetMaxSize(video_settings.get("width"), video_settings.get("height")) # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(250) self.timeline.SetCache(export_cache_object) export_file_path = THE_EXPORT_FILE_PATH #print(self.timeline.Json()) #sys.exit(0) # /usr/share/doc/libopenshot-doc/html/classopenshot_1_1FFmpegWriter.html try: w = openshot.FFmpegWriter(export_file_path) w.SetVideoOptions(True, "libvpx", openshot.Fraction(24, 1), 720, 480, openshot.Fraction(1, 1), False, False, 300000) w.SetAudioOptions(True, "libvorbis", 44100, 2, 3, 128000) # Open the writer w.Open() #240 = 24fps x 10s; #300 = 30fps x 10s; for frame in range(1, 240): w.WriteFrame(self.timeline.GetFrame(frame)) # Close writer w.Close() except Exception as e: print(e)
def test_add_file(self): # Find number of files in project num_files = len(File.filter()) # Create file r = openshot.DummyReader(openshot.Fraction(24, 1), 640, 480, 44100, 2, 30.0) file_data = json.loads(r.Json()) # Insert into project data query_file = File() query_file.data = file_data query_file.data["path"] = os.path.join(info.IMAGES_PATH, "AboutLogo.png") query_file.data["media_type"] = "image" query_file.save() self.assertTrue(query_file) self.assertEqual(len(File.filter()), num_files + 1) # Save the file again (which should not change the total # of files) query_file.save() self.assertEqual(len(File.filter()), num_files + 1)
def export_video(self): """ Exports the timeline """ options = self.view.get_data() # get the openshot timeline tm = TimelineModel.get_instance() t = tm.timeline # set audio and video options audio_options = [ options["has_audio"], options["audio_codec"], t.info.sample_rate, t.info.channels, t.info.channel_layout, options["audio_bitrate"] ] video_options = [ options["has_video"], options["video_codec"], t.info.fps, options["width"], options["height"], openshot.Fraction(4, 3), False, False, options["video_bitrate"] ] start_frame = options["start_frame"] end_frame = options["end_frame"] # set the right file extension path = options["path"] video_format = options["video_format"] if os.path.splitext(path)[1] != ("." + video_format): path = "{}.{}".format(path, video_format) # try to start the export, show window with error message if theres an exception try: tm.export(path, audio_options, video_options, start_frame, end_frame, self.view) except Exception as e: ExportErrorView(str(e)) self.view.accept()
def __init__(self, *args): # Invoke parent init QWidget.__init__(self, *args) # Init aspect ratio settings (default values) self.aspect_ratio = openshot.Fraction() self.pixel_ratio = openshot.Fraction() self.aspect_ratio.num = 16 self.aspect_ratio.den = 9 self.pixel_ratio.num = 1 self.pixel_ratio.den = 1 self.transforming_clip = None self.transforming_clip_object = None self.transform = None self.topLeftHandle = None self.topRightHandle = None self.bottomLeftHandle = None self.bottomRightHandle = None self.topHandle = None self.bottomHandle = None self.leftHandle = None self.rightHandle = None self.centerHandle = None self.mouse_pressed = False self.mouse_dragging = False self.mouse_position = None self.transform_mode = None self.corner_offset_x = None self.corner_offset_y = None self.clipRect = None self.gravity_point = None self.original_clip_data = None # Mutex lock self.mutex = QMutex() # Init Qt widget's properties (background repainting, etc...) super().setAttribute(Qt.WA_OpaquePaintEvent) super().setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) # Add self as listener to project data updates (used to update the timeline) get_app().updates.add_listener(self) # Set mouse tracking self.setMouseTracking(True) # Init current frame's QImage self.current_image = None # Get a reference to the window object self.win = get_app().window # Show Property timer # Timer to use a delay before sending MaxSizeChanged signals (so we don't spam libopenshot) self.delayed_size = None self.delayed_resize_timer = QTimer() self.delayed_resize_timer.setInterval(200) self.delayed_resize_timer.timeout.connect(self.delayed_resize_callback) self.delayed_resize_timer.stop() # Connect to signals self.win.TransformSignal.connect(self.transformTriggered) self.win.refreshFrameSignal.connect(self.refreshTriggered)
def accept(self): """ Start exporting video, but don't close window """ # Get settings self.s = settings.get_settings() # Disable controls self.txtFileName.setEnabled(False) self.txtExportFolder.setEnabled(False) self.tabWidget.setEnabled(False) self.export_button.setEnabled(False) self.exporting = True # Test Succeeded # Determine final exported file path file_name_with_ext = "%s.%s" % (self.txtFileName.text().strip(), self.txtVideoFormat.text().strip()) export_file_path = os.path.join(self.txtExportFolder.text().strip(), file_name_with_ext) log.info(export_file_path) # Translate object _ = get_app()._tr # Handle exception if os.path.exists(export_file_path): # File already exists! Prompt user ret = QMessageBox.question(self, _("Export Video"), _("%s already exists.\nDo you want to replace it?") % file_name_with_ext, QMessageBox.No | QMessageBox.Yes) if ret == QMessageBox.No: # Stop and don't do anything # Re-enable controls self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options w.SetVideoOptions(True, self.txtVideoCodec.text(), openshot.Fraction(self.txtFrameRateNum.value(), self.txtFrameRateDen.value()), self.txtWidth.value(), self.txtHeight.value(), openshot.Fraction(self.txtPixelRatioNum.value(), self.txtPixelRatioDen.value()), False, False, int(self.convert_to_bytes(self.txtVideoBitRate.text()))) # Set audio options w.SetAudioOptions(True, self.txtAudioCodec.text(), self.txtSampleRate.value(), self.txtChannels.value(), self.cboChannelLayout.currentData(), int(self.convert_to_bytes(self.txtAudioBitrate.text()))) # Open the writer w.Open() # Init progress bar self.progressExportVideo.setMinimum(self.txtStartFrame.value()) self.progressExportVideo.setMaximum(self.txtEndFrame.value()) # Write each frame in the selected range for frame in range(self.txtStartFrame.value(), self.txtEndFrame.value() + 1): # Update progress bar self.progressExportVideo.setValue(frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(get_app().window.timeline_sync.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break # Close writer w.Close() except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (self.txtChannels.value())) track_metric_error("invalid-channels-%s-%s-%s-%s" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text(), self.txtChannels.value())) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (self.txtSampleRate.value())) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text(), self.txtSampleRate.value())) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (self.txtVideoFormat.text())) track_metric_error("invalid-format-%s" % (self.txtVideoFormat.text())) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text())) track_metric_error("invalid-codec-%s-%s-%s" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text())) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text())) track_metric_error("video-encode-%s-%s-%s" % (self.txtVideoFormat.text(), self.txtVideoCodec.text(), self.txtAudioCodec.text())) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() _ = get_app()._tr msg.setWindowTitle(_("Export Error")) msg.setText(_("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Accept dialog super(Export, self).accept() # Restore timeline settings self.restoreTimeline() log.info("End Accept")
def LoadFile(self, path=None): """ Load a media file into the video player """ # Check to see if this path is already loaded # TODO: Determine why path is passed in as an empty string instead of None if path == self.clip_path or (not path and not self.clip_path): return log.info("LoadFile %s" % path) # Determine the current frame of the timeline (when switching to a clip) seek_position = 1 if path and not self.clip_path: # Track the current frame self.original_position = self.player.Position() # If blank path, switch back to self.timeline reader if not path: # Return to self.timeline reader log.debug("Set timeline reader again in player: %s" % self.timeline) self.player.Reader(self.timeline) # Clear clip reader reference self.clip_reader = None self.clip_path = None # Switch back to last timeline position seek_position = self.original_position else: # Create new timeline reader (to preview selected clip) project = get_app().project # Get some settings from the project fps = project.get("fps") width = project.get("width") height = project.get("height") sample_rate = project.get("sample_rate") channels = project.get("channels") channel_layout = project.get("channel_layout") # Create an instance of a libopenshot Timeline object self.clip_reader = openshot.Timeline( width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) self.clip_reader.info.channel_layout = channel_layout self.clip_reader.info.has_audio = True self.clip_reader.info.has_video = True self.clip_reader.info.video_length = 999999 self.clip_reader.info.duration = 999999 self.clip_reader.info.sample_rate = sample_rate self.clip_reader.info.channels = channels try: # Add clip for current preview file new_clip = openshot.Clip(path) self.clip_reader.AddClip(new_clip) except: log.error('Failed to load media file into video player: %s' % path) return # Assign new clip_reader self.clip_path = path # Keep track of previous clip readers (so we can Close it later) self.previous_clips.append(new_clip) self.previous_clip_readers.append(self.clip_reader) # Open and set reader self.clip_reader.Open() self.player.Reader(self.clip_reader) # Close and destroy old clip readers (leaving the 3 most recent) while len(self.previous_clip_readers) > 3: log.debug('Removing old clips from preview: %s' % self.previous_clip_readers[0]) previous_clip = self.previous_clips.pop(0) previous_clip.Close() previous_reader = self.previous_clip_readers.pop(0) previous_reader.Close() # Seek to frame 1, and resume speed self.Seek(seek_position)
# Write test video to this path EXPORT_TESTS = os.path.join(os.path.expanduser("~"), ".openshot_qt", "tests") # Check for the correct # of arguments if len(sys.argv) != 15: print("Error: %s is not the correct # of arguments (15 expected)" % len(sys.argv)) exit() print("Params:") print(sys.argv) # Get video params from the arguments passed to this script format = sys.argv[1] codec = sys.argv[2] fps = openshot.Fraction(int(sys.argv[3]), int(sys.argv[4])) width = int(sys.argv[5]) height = int(sys.argv[6]) pixel_ratio = openshot.Fraction(int(sys.argv[7]), int(sys.argv[8])) bitrate = int(sys.argv[9]) # Get audio params audio_codec = sys.argv[10] sample_rate = int(sys.argv[11]) channels = int(sys.argv[12]) channel_layout = int(sys.argv[13]) audio_bitrate = int(sys.argv[14]) # Determine final exported file path export_file_path = os.path.join(EXPORT_TESTS, "test.%s" % format)
import openshot # Create an empty timeline t = openshot.Timeline(720, 480, openshot.Fraction(24, 1), 44100, 2, openshot.LAYOUT_STEREO) t.Open() # lower layer lower = openshot.QtImageReader("back.png") c1 = openshot.Clip(lower) c1.Layer(1) t.AddClip(c1) # higher layer higher = openshot.QtImageReader("front3.png") c2 = openshot.Clip(higher) c2.Layer(2) #c2.alpha = openshot.Keyframe(0.5) t.AddClip(c2) # Wipe / Transition brightness = openshot.Keyframe() brightness.AddPoint(1, 1.0, openshot.BEZIER) brightness.AddPoint(24, -1.0, openshot.BEZIER) contrast = openshot.Keyframe() contrast.AddPoint(1, 20.0, openshot.BEZIER) contrast.AddPoint(24, 20.0, openshot.BEZIER) reader = openshot.QtImageReader("mask.png") e = openshot.Mask(reader, brightness, contrast)
def __init__(self, file=None, clip=None): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None self.current_frame = 1 # Create region clip with Reader self.clip = openshot.Clip(clip.Reader()) self.clip.Open() # Set region clip start and end self.clip.Start(clip.Start()) self.clip.End(clip.End()) self.clip.Id(get_app().project.generate_id()) print("IDS {} {}".format(clip.Id(), self.clip.Id())) # Keep track of file object self.file = file self.file_path = file.absolute_path() c_info = clip.Reader().info self.fps = c_info.fps.ToInt( ) #float(self.fps_num) / float(self.fps_den) self.fps_num = self.fps #int(file.data['fps']['num']) self.fps_den = 1 #int(file.data['fps']['den']) self.width = c_info.width #int(file.data['width']) self.height = c_info.height #int(file.data['height']) self.sample_rate = c_info.sample_rate #int(file.data['sample_rate']) self.channels = c_info.channels #int(file.data['channels']) self.channel_layout = c_info.channel_layout #int(file.data['channel_layout']) self.video_length = int(self.clip.Duration() * self.fps) + 1 #int(file.data['video_length']) # Apply effects to region frames for effect in clip.Effects(): self.clip.AddEffect(effect) # Open video file with Reader log.info(self.clip.Reader()) # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set aspect ratio to match source content aspect_ratio = openshot.Fraction(self.width, self.height) aspect_ratio.Reduce() self.videoPreview.aspect_ratio = aspect_ratio # Set max size of video preview (for speed) self.viewport_rect = self.videoPreview.centeredViewport( self.width, self.height) # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.viewport_rect.width(), self.viewport_rect.height(), openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout self.r.SetMaxSize(self.viewport_rect.width(), self.viewport_rect.height()) try: # Add clip for current preview file self.clip = openshot.Clip(self.file_path) # Show waveform for audio files if not self.clip.Reader().info.has_video and self.clip.Reader( ).info.has_audio: self.clip.Waveform(True) # Set has_audio property self.r.info.has_audio = self.clip.Reader().info.has_audio # Update video_length property of the Timeline object self.r.info.video_length = self.video_length self.r.AddClip(self.clip) except: log.error( 'Failed to load media file into region select player: %s' % self.file_path) return # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 # if 'start' in self.file.data.keys(): # start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Add buttons self.cancel_button = QPushButton(_('Cancel')) self.process_button = QPushButton(_('Select Region')) self.buttonBox.addButton(self.process_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(self.cancel_button, QDialogButtonBox.RejectRole) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.initialized = True get_app().window.SelectRegionSignal.emit(clip.Id())
def accept(self): """ Start exporting video """ # get translations app = get_app() _ = app._tr # Disable controls self.txtFileName.setEnabled(False) self.txtExportFolder.setEnabled(False) self.tabWidget.setEnabled(False) self.export_button.setEnabled(False) self.exporting = True # Determine type of export (video+audio, video, audio, image sequences) # _("Video & Audio"), _("Video Only"), _("Audio Only"), _("Image Sequence") export_type = self.cboExportTo.currentText() # Determine final exported file path if export_type != _("Image Sequence"): file_name_with_ext = "%s.%s" % (self.txtFileName.text().strip(), self.txtVideoFormat.text().strip()) else: file_name_with_ext = "%s%s" % (self.txtFileName.text().strip(), self.txtImageFormat.text().strip()) export_file_path = os.path.join(self.txtExportFolder.text().strip(), file_name_with_ext) log.info(export_file_path) # Translate object _ = get_app()._tr file = File.get(path=export_file_path) if file: ret = QMessageBox.question( self, _("Export Video"), _("%s is an input file.\nPlease choose a different name.") % file_name_with_ext, QMessageBox.Ok) self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Handle exception if os.path.exists(export_file_path) and export_type in [ _("Video & Audio"), _("Video Only"), _("Audio Only") ]: # File already exists! Prompt user ret = QMessageBox.question( self, _("Export Video"), _("%s already exists.\nDo you want to replace it?") % file_name_with_ext, QMessageBox.No | QMessageBox.Yes) if ret == QMessageBox.No: # Stop and don't do anything # Re-enable controls self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Init export settings video_settings = { "vformat": self.txtVideoFormat.text(), "vcodec": self.txtVideoCodec.text(), "fps": { "num": self.txtFrameRateNum.value(), "den": self.txtFrameRateDen.value() }, "width": self.txtWidth.value(), "height": self.txtHeight.value(), "pixel_ratio": { "num": self.txtPixelRatioNum.value(), "den": self.txtPixelRatioDen.value() }, "video_bitrate": int(self.convert_to_bytes(self.txtVideoBitRate.text())), "start_frame": self.txtStartFrame.value(), "end_frame": self.txtEndFrame.value() + 1 } audio_settings = { "acodec": self.txtAudioCodec.text(), "sample_rate": self.txtSampleRate.value(), "channels": self.txtChannels.value(), "channel_layout": self.cboChannelLayout.currentData(), "audio_bitrate": int(self.convert_to_bytes(self.txtAudioBitrate.text())) } # Override vcodec and format for Image Sequences if export_type == _("Image Sequence"): image_ext = os.path.splitext( self.txtImageFormat.text().strip())[1].replace(".", "") video_settings["vformat"] = image_ext if image_ext in ["jpg", "jpeg"]: video_settings["vcodec"] = "mjpeg" else: video_settings["vcodec"] = image_ext # Set MaxSize (so we don't have any downsampling) self.timeline.SetMaxSize(video_settings.get("width"), video_settings.get("height")) # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(250) self.timeline.SetCache(export_cache_object) # Rescale all keyframes and reload project if self.export_fps_factor != 1.0: self.keyframes_rescaled = True get_app().project.rescale_keyframes(self.export_fps_factor) # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) self.timeline.SetJson(json_timeline) # Re-update the timeline FPS again (since the timeline just got clobbered) self.updateFrameRate() # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options if export_type in [ _("Video & Audio"), _("Video Only"), _("Image Sequence") ]: w.SetVideoOptions( True, video_settings.get("vcodec"), openshot.Fraction( video_settings.get("fps").get("num"), video_settings.get("fps").get("den")), video_settings.get("width"), video_settings.get("height"), openshot.Fraction( video_settings.get("pixel_ratio").get("num"), video_settings.get("pixel_ratio").get("den")), False, False, video_settings.get("video_bitrate")) # Set audio options if export_type in [_("Video & Audio"), _("Audio Only")]: w.SetAudioOptions(True, audio_settings.get("acodec"), audio_settings.get("sample_rate"), audio_settings.get("channels"), audio_settings.get("channel_layout"), audio_settings.get("audio_bitrate")) # Prepare the streams w.PrepareStreams() # These extra options should be set in an extra method # No feedback is given to the user # TODO: Tell user if option is not avaliable # Set the quality in case crf was selected if "crf" in self.txtVideoBitRate.text(): w.SetOption(openshot.VIDEO_STREAM, "crf", str(int(video_settings.get("video_bitrate")))) # Open the writer w.Open() # Notify window of export started export_file_path = "" get_app().window.ExportStarted.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame")) ''' progressstep = max(1 , round(( video_settings.get("end_frame") - video_settings.get("start_frame") ) / 1000)) start_time_export = time.time() start_frame_export = video_settings.get("start_frame") end_frame_export = video_settings.get("end_frame") # Write each frame in the selected range for frame in range(video_settings.get("start_frame"), video_settings.get("end_frame")): # Update progress bar (emit signal to main window) if (frame % progressstep) == 0: end_time_export = time.time() if ((( frame - start_frame_export ) != 0) & (( end_time_export - start_time_export ) != 0)): seconds_left = round(( start_time_export - end_time_export )*( frame - end_frame_export )/( frame - start_frame_export )) fps_encode = ((frame - start_frame_export)/(end_time_export-start_time_export)) export_file_path = _("%(hours)d:%(minutes)02d:%(seconds)02d Remaining (%(fps)5.2f FPS)") % { 'hours' : seconds_left / 3600, 'minutes': (seconds_left / 60) % 60, 'seconds': seconds_left % 60, 'fps': fps_encode } get_app().window.ExportFrame.emit(export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break ''' print("====cuts====", self.cuts) fps_num = float(video_settings.get("fps").get("num")) fps_den = float(video_settings.get("fps").get("den")) for cut in self.cuts: start_frame_export = round( float(cut["start"]) * fps_num / fps_den) - 1 end_frame_export = round( float(cut["end"]) * fps_num / fps_den) + 1 progressstep = max( 1, round(end_frame_export - start_frame_export) / 1000) start_time_export = time.time() # Write each frame in the selected range for frame in range(start_frame_export, end_frame_export + 1): print("---frame:", frame) # Update progress bar (emit signal to main window) if (frame % progressstep) == 0: end_time_export = time.time() if (((frame - start_frame_export) != 0) & ((end_time_export - start_time_export) != 0)): seconds_left = round( (start_time_export - end_time_export) * (frame - end_frame_export) / (frame - start_frame_export)) fps_encode = ( (frame - start_frame_export) / (end_time_export - start_time_export)) export_file_path = _( "%(hours)d:%(minutes)02d:%(seconds)02d Remaining (%(fps)5.2f FPS)" ) % { 'hours': seconds_left / 3600, 'minutes': (seconds_left / 60) % 60, 'seconds': seconds_left % 60, 'fps': fps_encode } get_app().window.ExportFrame.emit( export_file_path, start_frame_export, end_frame_export, frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break # Close writer w.Close() except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (audio_settings.get("channels"))) track_metric_error("invalid-channels-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("channels"))) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (audio_settings.get("sample_rate"))) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("sample_rate"))) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (video_settings.get("vformat"))) track_metric_error("invalid-format-%s" % (video_settings.get("vformat"))) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("invalid-codec-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("video-encode-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() _ = get_app()._tr msg.setWindowTitle(_("Export Error")) msg.setText( _("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Notify window of export started get_app().window.ExportEnded.emit(export_file_path) # Close timeline object self.timeline.Close() # Clear all cache self.timeline.ClearAllCache() # Re-set OMP thread enabled flag if self.s.get("omp_threads_enabled"): openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = False else: openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = True # Return scale mode to lower quality scaling (for faster previews) openshot.Settings.Instance().HIGH_QUALITY_SCALING = False # Return keyframes to preview scaling if self.keyframes_rescaled: get_app().project.rescale_keyframes(self.original_fps_factor) # Accept dialog super(Exportting, self).accept()
def __init__(self, cuts=[], preview=False): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") # If preview, hide cutting controls if preview: self.lblInstructions.setVisible(False) self.widgetControls.setVisible(False) self.setWindowTitle(_("Preview")) self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None #timeline = get_app().window.timeline_sync.timeline #app = get_app() #project = app.project # Get some settings from the project ''' self.fps = project.get(["fps"]) self.width = project.get(["width"]) self.height = project.get(["height"]) self.sample_rate = project.get(["sample_rate"]) self.channels = project.get(["channels"]) self.channel_layout = project.get(["channel_layout"]) self.fps_num = int(self.fps['num']) self.fps_den = int(self.fps['den']) self.fps = float(self.fps_num) / float(self.fps_den) ''' # Get the original timeline settings self.width = get_app().window.timeline_sync.timeline.info.width self.height = get_app().window.timeline_sync.timeline.info.height self.fps = get_app().window.timeline_sync.timeline.info.fps self.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.channels = get_app().window.timeline_sync.timeline.info.channels self.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.fps_num = int(self.fps.num) self.fps_den = int(self.fps.den) #self.fps = float(self.fps_num) / float(self.fps_den) ''' # Keep track of file object self.file = file self.file_path = file.absolute_path() self.video_length = 0 self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) ''' self.video_length = 0 # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.width, self.height, openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout ''' #cuts = [{"start_seconds":8.466666666666667, "end_seconds":15.3}, {"start_seconds":18.9, "end_seconds":22.133333333333333}] position = 0 print(cuts) cs = app.window.timeline_sync.timeline.Clips() for c in cs: print("-----id=", c.Id()) clip_json = Clip.filter(id=c.Id()) path = clip_json[0].data["reader"]["path"] print("==============", path, c.Position()) offset = c.Position() for cut in cuts: try: # Add clip for current preview file clip = openshot.Clip(path) self.clips.append(clip) # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) # Set has_audio property self.r.info.has_audio = clip.Reader().info.has_audio if preview: # Display frame #'s during preview clip.display = openshot.FRAME_DISPLAY_CLIP start = float(cut["start_seconds"] - offset) end = float(cut["end_seconds"] - offset) print("=======================-------start:", start, "end:", end) clip.Start(start) clip.End(end) #clip.Position(0) clip.Position(position) position = position + (end - start) - offset #clip.Duration(end-start) self.r.AddClip(clip) self.video_length = self.video_length + (end - start) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return ''' self.clips, self.video_length = CutsToClips(cuts) for clip in self.clips: # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader( ).info.has_audio: clip.Waveform(True) if preview: # Display frame #'s during preview clip.display = openshot.FRAME_DISPLAY_CLIP self.r.AddClip(clip) #self.video_length = self.video_length * self.fps_num / self.fps_den # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set max size of video preview (for speed) viewport_rect = self.videoPreview.centeredViewport( self.videoPreview.width(), self.videoPreview.height()) self.r.SetMaxSize(viewport_rect.width(), viewport_rect.height()) # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 #if 'start' in self.file.data.keys(): # start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.btnStart.clicked.connect(self.btnStart_clicked) self.btnEnd.clicked.connect(self.btnEnd_clicked) self.btnClear.clicked.connect(self.btnClear_clicked) self.btnAddClip.clicked.connect(self.btnAddClip_clicked) self.initialized = True
-16, # x offset -16, # y offset openshot.GRAVITY_BOTTOM_RIGHT, html_code, css_code, "#000000" # background color ) r.Open() # Open the reader r.DisplayInfo() # Display metadata # Set up Writer w = openshot.FFmpegWriter("pyHtmlExample.mp4") w.SetVideoOptions(True, "libx264", openshot.Fraction(30000, 1000), 1280, 720, openshot.Fraction(1, 1), False, False, 3000000) w.info.metadata["title"] = "testtest" w.info.metadata["artist"] = "aaa" w.info.metadata["album"] = "bbb" w.info.metadata["year"] = "2015" w.info.metadata["description"] = "ddd" w.info.metadata["comment"] = "eee" w.info.metadata["comment"] = "comment" w.info.metadata["copyright"] = "copyright OpenShot!" # Open the Writer w.Open() # Grab 30 frames from Reader and encode to Writer
def __init__(self, cuts): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations app = get_app() _ = app._tr # Get settings self.s = settings.get_settings() self.cuts = cuts # Track metrics track_metric_screen("export-screen") # Dynamically load tabs from settings data self.settings_data = settings.get_settings().get_all_settings() # Add buttons to interface self.export_button = QPushButton(_('Export Video')) self.buttonBox.addButton(self.export_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(QPushButton(_('Cancel')), QDialogButtonBox.RejectRole) self.exporting = False # Update FPS / Profile timer # Timer to use a delay before applying new profile/fps data (so we don't spam libopenshot) self.delayed_fps_timer = None self.delayed_fps_timer = QTimer() self.delayed_fps_timer.setInterval(200) self.delayed_fps_timer.timeout.connect(self.delayed_fps_callback) self.delayed_fps_timer.stop() # Pause playback (to prevent crash since we are fixing to change the timeline's max size) get_app().window.actionPlay_trigger(None, force="pause") # Clear timeline preview cache (to get more available memory) get_app().window.timeline_sync.timeline.ClearAllCache() # Hide audio channels self.lblChannels.setVisible(False) self.txtChannels.setVisible(False) # Set OMP thread disabled flag (for stability) openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = True openshot.Settings.Instance().HIGH_QUALITY_SCALING = True # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # No keyframe rescaling has happened yet (due to differences in FPS) self.keyframes_rescaled = False # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) json_str = json_timeline json_object = json.loads(json_str) json_object.pop("cuts") #json_object["clips"] = [] self.timeline.SetJson(json.dumps(json_object)) #print("======deded===", json.dumps(json_object)) ''' self.clips, self.video_length = CutsToClips(cuts) for clip in self.clips: # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) self.timeline.AddClip(clip) self.video_length = self.video_length * fps.num / fps.den ''' # Open the "export" Timeline reader self.timeline.Open() # Default export path recommended_path = recommended_path = os.path.join(info.HOME_PATH) if app.project.current_filepath: recommended_path = os.path.dirname(app.project.current_filepath) export_path = get_app().project.get(["export_path"]) if os.path.exists(export_path): # Use last selected export path self.txtExportFolder.setText(export_path) else: # Default to home dir self.txtExportFolder.setText(recommended_path) # Is this a saved project? if not get_app().project.current_filepath: # Not saved yet self.txtFileName.setText(_("Untitled Project")) else: # Yes, project is saved # Get just the filename parent_path, filename = os.path.split( get_app().project.current_filepath) filename, ext = os.path.splitext(filename) self.txtFileName.setText( filename.replace("_", " ").replace("-", " ").capitalize()) # Default image type self.txtImageFormat.setText("-%05d.png") # Loop through Export To options export_options = [ _("Video & Audio"), _("Video Only"), _("Audio Only"), _("Image Sequence") ] for option in export_options: # append profile to list self.cboExportTo.addItem(option) # Add channel layouts self.channel_layout_choices = [] for layout in [(openshot.LAYOUT_MONO, _("Mono (1 Channel)")), (openshot.LAYOUT_STEREO, _("Stereo (2 Channel)")), (openshot.LAYOUT_SURROUND, _("Surround (3 Channel)")), (openshot.LAYOUT_5POINT1, _("Surround (5.1 Channel)")), (openshot.LAYOUT_7POINT1, _("Surround (7.1 Channel)"))]: log.info(layout) self.channel_layout_choices.append(layout[0]) self.cboChannelLayout.addItem(layout[1], layout[0]) # Connect signals self.btnBrowse.clicked.connect( functools.partial(self.btnBrowse_clicked)) self.cboSimpleProjectType.currentIndexChanged.connect( functools.partial(self.cboSimpleProjectType_index_changed, self.cboSimpleProjectType)) self.cboProfile.currentIndexChanged.connect( functools.partial(self.cboProfile_index_changed, self.cboProfile)) self.cboSimpleTarget.currentIndexChanged.connect( functools.partial(self.cboSimpleTarget_index_changed, self.cboSimpleTarget)) self.cboSimpleVideoProfile.currentIndexChanged.connect( functools.partial(self.cboSimpleVideoProfile_index_changed, self.cboSimpleVideoProfile)) self.cboSimpleQuality.currentIndexChanged.connect( functools.partial(self.cboSimpleQuality_index_changed, self.cboSimpleQuality)) self.cboChannelLayout.currentIndexChanged.connect(self.updateChannels) get_app().window.ExportFrame.connect(self.updateProgressBar) # ********* Advanced Profile List ********** # Loop through profiles self.profile_names = [] self.profile_paths = {} for profile_folder in [info.USER_PROFILES_PATH, info.PROFILES_PATH]: for file in os.listdir(profile_folder): # Load Profile profile_path = os.path.join(profile_folder, file) profile = openshot.Profile(profile_path) # Add description of Profile to list profile_name = "%s (%sx%s)" % (profile.info.description, profile.info.width, profile.info.height) self.profile_names.append(profile_name) self.profile_paths[profile_name] = profile_path # Sort list self.profile_names.sort() # Loop through sorted profiles box_index = 0 self.selected_profile_index = 0 for profile_name in self.profile_names: # Add to dropdown self.cboProfile.addItem( self.getProfileName(self.getProfilePath(profile_name)), self.getProfilePath(profile_name)) # Set default (if it matches the project) if app.project.get(['profile']) in profile_name: self.selected_profile_index = box_index # increment item counter box_index += 1 # ********* Simple Project Type ********** # load the simple project type dropdown presets = [] for preset_path in [info.EXPORT_PRESETS_PATH, info.USER_PRESETS_PATH]: for file in os.listdir(preset_path): xmldoc = xml.parse(os.path.join(preset_path, file)) type = xmldoc.getElementsByTagName("type") presets.append(_(type[0].childNodes[0].data)) # Exclude duplicates type_index = 0 selected_type = 0 presets = list(set(presets)) for item in sorted(presets): self.cboSimpleProjectType.addItem(item, item) if item == _("All Formats"): selected_type = type_index type_index += 1 # Always select 'All Formats' option self.cboSimpleProjectType.setCurrentIndex(selected_type) # Populate all profiles self.populateAllProfiles(app.project.get(['profile'])) # Connect framerate signals self.txtFrameRateNum.valueChanged.connect(self.updateFrameRate) self.txtFrameRateDen.valueChanged.connect(self.updateFrameRate) self.txtWidth.valueChanged.connect(self.updateFrameRate) self.txtHeight.valueChanged.connect(self.updateFrameRate) self.txtSampleRate.valueChanged.connect(self.updateFrameRate) self.txtChannels.valueChanged.connect(self.updateFrameRate) self.cboChannelLayout.currentIndexChanged.connect(self.updateFrameRate) # Determine the length of the timeline (in frames) self.updateFrameRate()
def uploadSequence(self): """ Start exporting video """ # get translations _ = get_app()._tr # Init progress bar # 应该仅仅是用来展示进度条 # self.progressExportVideo.setMinimum(self.txtStartFrame.value()) # self.progressExportVideo.setMaximum(self.txtEndFrame.value()) # self.progressExportVideo.setValue(self.txtStartFrame.value()) # 这个是默认的图片文件输出格式 self.image_format = "-%05d.png" export_type = _("Image Sequence") # Determine final exported file path (and replace blank paths with default ones) default_filename = "IM" default_folder = os.path.join(info.HOME_PATH, 'Desktop/temp') # 如果要导出图片序列,就规定好导出文件的命名 file_name_with_ext = "%s%s" % (default_filename, self.image_format.strip()) # 确定导出文件的路径 export_file_path = os.path.join(default_folder, file_name_with_ext) log.info("锁定了的文件保存路径: %s" % export_file_path) # Init export settings # 以下的设定全部都已经写死了 video_settings = { "vformat": 'mp4', "vcodec": 'libx264', "fps": { "num": 25, "den": 1 }, "width": 1024, "height": 576, "pixel_ratio": { "num": 1, "den": 1 }, "video_bitrate": 15000000, "start_frame": 1, "end_frame": 17 } audio_settings = { "acodec": 'aac', "sample_rate": 48000, "channels": 2, "channel_layout": 3, "audio_bitrate": 192000 } # Override vcodec and format for Image Sequences image_ext = os.path.splitext(self.image_format.strip())[1].replace( ".", "") video_settings["vformat"] = image_ext if image_ext in ["jpg", "jpeg"]: video_settings["vcodec"] = "mjpeg" else: video_settings["vcodec"] = image_ext # Store updated export folder path in project file get_app().updates.update_untracked(["export_path"], os.path.dirname(export_file_path)) # Mark project file as unsaved get_app().project.has_unsaved_changes = True # Set MaxSize (so we don't have any downsampling) self.timeline.SetMaxSize(video_settings.get("width"), video_settings.get("height")) # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(500) self.timeline.SetCache(export_cache_object) # Rescale all keyframes (if needed) if self.export_fps_factor != 1.0: log.info("导出文件fps因子不为1") # Get a copy of rescaled project data (this does not modify the active project) rescaled_app_data = get_app().project.rescale_keyframes( self.export_fps_factor) # Load the "export" Timeline reader with the JSON from the real timeline self.timeline.SetJson(json.dumps(rescaled_app_data)) # Re-update the timeline FPS again (since the timeline just got clobbered) self.updateFrameRate() # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options if export_type in [ _("Video & Audio"), _("Video Only"), _("Image Sequence") ]: w.SetVideoOptions( True, video_settings.get("vcodec"), openshot.Fraction( video_settings.get("fps").get("num"), video_settings.get("fps").get("den")), video_settings.get("width"), video_settings.get("height"), openshot.Fraction( video_settings.get("pixel_ratio").get("num"), video_settings.get("pixel_ratio").get("den")), False, False, video_settings.get("video_bitrate")) # Prepare the streams w.PrepareStreams() # These extra options should be set in an extra method # No feedback is given to the user # TODO: Tell user if option is not available # Muxing options for mp4/mov w.SetOption(openshot.VIDEO_STREAM, "muxing_preset", "mp4_faststart") # Set the quality in case crf was selected # if "crf" in self.txtVideoBitRate.text(): # w.SetOption(openshot.VIDEO_STREAM, "crf", str(int(video_settings.get("video_bitrate")))) # # Set the quality in case qp was selected # if "qp" in self.txtVideoBitRate.text(): # w.SetOption(openshot.VIDEO_STREAM, "qp", str(int(video_settings.get("video_bitrate")))) # Open the writer w.Open() # Notify window of export started title_message = "" get_app().window.ExportStarted.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame")) progressstep = max( 1, round((video_settings.get("end_frame") - video_settings.get("start_frame")) / 1000)) start_time_export = time.time() start_frame_export = video_settings.get("start_frame") end_frame_export = video_settings.get("end_frame") # Write each frame in the selected range # 接下来就是导出动作的重要内容 for frame in range(video_settings.get("start_frame"), video_settings.get("end_frame") + 1): # Update progress bar (emit signal to main window) if (frame % progressstep) == 0: end_time_export = time.time() if (((frame - start_frame_export) != 0) & ((end_time_export - start_time_export) != 0)): seconds_left = round( (start_time_export - end_time_export) * (frame - end_frame_export) / (frame - start_frame_export)) fps_encode = ((frame - start_frame_export) / (end_time_export - start_time_export)) title_message = _( "%(hours)d:%(minutes)02d:%(seconds)02d Remaining (%(fps)5.2f FPS)" ) % { 'hours': seconds_left / 3600, 'minutes': (seconds_left / 60) % 60, 'seconds': seconds_left % 60, 'fps': fps_encode } # Emit frame exported # get_app().window.ExportFrame.emit(title_message, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) # Process events (to show the progress bar moving) # QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out # if not self.exporting: # break # Close writer w.Close() # 下面的内容应该都是配合进度提示的,删除 ''' # Emit final exported frame (with elapsed time) seconds_run = round((end_time_export - start_time_export)) title_message = _("%(hours)d:%(minutes)02d:%(seconds)02d Elapsed (%(fps)5.2f FPS)") % { 'hours': seconds_run / 3600, 'minutes': (seconds_run / 60) % 60, 'seconds': seconds_run % 60, 'fps': fps_encode} get_app().window.ExportFrame.emit(title_message, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) ''' except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (audio_settings.get("channels"))) track_metric_error("invalid-channels-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("channels"))) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (audio_settings.get("sample_rate"))) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("sample_rate"))) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (video_settings.get("vformat"))) track_metric_error("invalid-format-%s" % (video_settings.get("vformat"))) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("invalid-codec-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("video-encode-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() msg.setWindowTitle(_("Export Error")) msg.setText( _("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Notify window of export started get_app().window.ExportEnded.emit(export_file_path) # Close timeline object self.timeline.Close() # Clear all cache self.timeline.ClearAllCache() # Re-set OMP thread enabled flag if self.s.get("omp_threads_enabled"): openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = False else: openshot.Settings.Instance().WAIT_FOR_VIDEO_PROCESSING_TASK = True # Return scale mode to lower quality scaling (for faster previews) openshot.Settings.Instance().HIGH_QUALITY_SCALING = False # Handle end of export (for non-canceled exports) # if self.s.get("show_finished_window") and self.exporting: # # Hide cancel and export buttons # self.cancel_button.setVisible(False) # self.export_button.setVisible(False) # # # Reveal done button # self.close_button.setVisible(True) # # # Make progress bar green (to indicate we are done) # # from PyQt5.QtGui import QPalette # # p = QPalette() # # p.setColor(QPalette.Highlight, Qt.green) # # self.progressExportVideo.setPalette(p) # # # Raise the window # self.show() # else: # # Accept dialog # super(SuperResolution, self).accept() success_hint = QDialog() success_hint.setWindowTitle("成功") success_hint.exec_()
def __init__(self, parent, *args): # Invoke parent init QWidget.__init__(self, parent, *args) self.parent = parent # Init aspect ratio settings (default values) self.aspect_ratio = openshot.Fraction() self.pixel_ratio = openshot.Fraction() self.aspect_ratio.num = 16 self.aspect_ratio.den = 9 self.pixel_ratio.num = 1 self.pixel_ratio.den = 1 self.transforming_clip = None self.transforming_clip_object = None self.transform = None self.topLeftHandle = None self.topRightHandle = None self.bottomLeftHandle = None self.bottomRightHandle = None self.topHandle = None self.bottomHandle = None self.leftHandle = None self.rightHandle = None self.centerHandle = None self.mouse_pressed = False self.mouse_dragging = False self.mouse_position = None self.transform_mode = None self.corner_offset_x = None self.corner_offset_y = None self.clipRect = None self.gravity_point = None self.original_clip_data = None # Mutex lock self.mutex = QMutex() # Init Qt style properties (black background, etc...) p = QPalette() p.setColor(QPalette.Window, QColor("#191919")) super().setPalette(p) super().setAttribute(Qt.WA_OpaquePaintEvent) super().setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) # Set mouse tracking self.setMouseTracking(True) # Init current frame's QImage self.current_image = None # Get a reference to the window object self.win = self.parent #get_app().window #self.win = get_app().window # Show Property timer # Timer to use a delay before sending MaxSizeChanged signals (so we don't spam libopenshot) self.delayed_size = None self.delayed_resize_timer = QTimer() self.delayed_resize_timer.setInterval(200) self.delayed_resize_timer.timeout.connect(self.delayed_resize_callback) self.delayed_resize_timer.stop()
def __init__(self): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations app = get_app() _ = app._tr # Get settings self.s = settings.get_settings() # Track metrics track_metric_screen("export-screen") # Dynamically load tabs from settings data self.settings_data = settings.get_settings().get_all_settings() # Add buttons to interface self.export_button = QPushButton(_('Export Video')) self.buttonBox.addButton(self.export_button, QDialogButtonBox.AcceptRole) self.buttonBox.addButton(QPushButton(_('Cancel')), QDialogButtonBox.RejectRole) self.exporting = False # Clear timeline preview cache (to get more avaiable memory) if get_app().window.cache_object: get_app().window.cache_object.Clear() # Hide audio channels self.lblChannels.setVisible(False) self.txtChannels.setVisible(False) # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) self.timeline.SetJson(json_timeline) # Open the "export" Timeline reader self.timeline.Open() # Default export path recommended_path = recommended_path = os.path.join(info.HOME_PATH) if app.project.current_filepath: recommended_path = os.path.dirname(app.project.current_filepath) export_path = get_app().project.get(["export_path"]) if os.path.exists(export_path): # Use last selected export path self.txtExportFolder.setText(export_path) else: # Default to home dir self.txtExportFolder.setText(recommended_path) # Is this a saved project? if not get_app().project.current_filepath: # Not saved yet self.txtFileName.setText(_("Untitled Project")) else: # Yes, project is saved # Get just the filename parent_path, filename = os.path.split( get_app().project.current_filepath) filename, ext = os.path.splitext(filename) self.txtFileName.setText( filename.replace("_", " ").replace("-", " ").capitalize()) # Default image type self.txtImageFormat.setText("%05.png") # Loop through Export To options export_options = [_("Video & Audio"), _("Image Sequence")] for option in export_options: # append profile to list self.cboExportTo.addItem(option) # Add channel layouts self.channel_layout_choices = [] for layout in [(openshot.LAYOUT_MONO, _("Mono (1 Channel)")), (openshot.LAYOUT_STEREO, _("Stereo (2 Channel)")), (openshot.LAYOUT_SURROUND, _("Surround (3 Channel)")), (openshot.LAYOUT_5POINT1, _("Surround (5.1 Channel)")), (openshot.LAYOUT_7POINT1, _("Surround (7.1 Channel)"))]: log.info(layout) self.channel_layout_choices.append(layout[0]) self.cboChannelLayout.addItem(layout[1], layout[0]) # Connect signals self.btnBrowse.clicked.connect( functools.partial(self.btnBrowse_clicked)) self.cboSimpleProjectType.currentIndexChanged.connect( functools.partial(self.cboSimpleProjectType_index_changed, self.cboSimpleProjectType)) self.cboProfile.currentIndexChanged.connect( functools.partial(self.cboProfile_index_changed, self.cboProfile)) self.cboSimpleTarget.currentIndexChanged.connect( functools.partial(self.cboSimpleTarget_index_changed, self.cboSimpleTarget)) self.cboSimpleVideoProfile.currentIndexChanged.connect( functools.partial(self.cboSimpleVideoProfile_index_changed, self.cboSimpleVideoProfile)) self.cboSimpleQuality.currentIndexChanged.connect( functools.partial(self.cboSimpleQuality_index_changed, self.cboSimpleQuality)) self.cboChannelLayout.currentIndexChanged.connect(self.updateChannels) get_app().window.ExportFrame.connect(self.updateProgressBar) # ********* Advaned Profile List ********** # Loop through profiles self.profile_names = [] self.profile_paths = {} for profile_folder in [info.USER_PROFILES_PATH, info.PROFILES_PATH]: for file in os.listdir(profile_folder): # Load Profile profile_path = os.path.join(profile_folder, file) profile = openshot.Profile(profile_path) # Add description of Profile to list self.profile_names.append(profile.info.description) self.profile_paths[profile.info.description] = profile_path # Sort list self.profile_names.sort() # Loop through sorted profiles box_index = 0 self.selected_profile_index = 0 for profile_name in self.profile_names: # Add to dropdown self.cboProfile.addItem(profile_name, self.profile_paths[profile_name]) # Set default (if it matches the project) if app.project.get(['profile']) == profile_name: self.selected_profile_index = box_index # increment item counter box_index += 1 # ********* Simple Project Type ********** # load the simple project type dropdown presets = [] for file in os.listdir(info.EXPORT_PRESETS_DIR): xmldoc = xml.parse(os.path.join(info.EXPORT_PRESETS_DIR, file)) type = xmldoc.getElementsByTagName("type") presets.append(_(type[0].childNodes[0].data)) # Exclude duplicates type_index = 0 selected_type = 0 presets = list(set(presets)) for item in sorted(presets): self.cboSimpleProjectType.addItem(item, item) if item == _("All Formats"): selected_type = type_index type_index += 1 # Always select 'All Formats' option self.cboSimpleProjectType.setCurrentIndex(selected_type) # Populate all profiles self.populateAllProfiles(app.project.get(['profile'])) # Connect framerate signals self.txtFrameRateNum.valueChanged.connect(self.updateFrameRate) self.txtFrameRateDen.valueChanged.connect(self.updateFrameRate) self.txtWidth.valueChanged.connect(self.updateFrameRate) self.txtHeight.valueChanged.connect(self.updateFrameRate) self.txtSampleRate.valueChanged.connect(self.updateFrameRate) self.txtChannels.valueChanged.connect(self.updateFrameRate) self.cboChannelLayout.currentIndexChanged.connect(self.updateFrameRate) # Determine the length of the timeline (in frames) self.updateFrameRate()
def accept(self): """ Start exporting video """ # get translations app = get_app() _ = app._tr # Disable controls self.txtFileName.setEnabled(False) self.txtExportFolder.setEnabled(False) self.tabWidget.setEnabled(False) self.export_button.setEnabled(False) self.exporting = True # Determine type of export (video+audio, video, audio, image sequences) # _("Video & Audio"), _("Video Only"), _("Audio Only"), _("Image Sequence") export_type = self.cboExportTo.currentText() # Determine final exported file path if export_type != _("Image Sequence"): file_name_with_ext = "%s.%s" % (self.txtFileName.text().strip(), self.txtVideoFormat.text().strip()) else: file_name_with_ext = "%s%s" % (self.txtFileName.text().strip(), self.txtImageFormat.text().strip()) export_file_path = os.path.join(self.txtExportFolder.text().strip(), file_name_with_ext) log.info(export_file_path) # Translate object _ = get_app()._tr file = File.get(path=export_file_path) if file: ret = QMessageBox.question( self, _("Export Video"), _("%s is an input file.\nPlease choose a different name.") % file_name_with_ext, QMessageBox.Ok) self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Handle exception if os.path.exists(export_file_path) and export_type in [ _("Video & Audio"), _("Video Only"), _("Audio Only") ]: # File already exists! Prompt user ret = QMessageBox.question( self, _("Export Video"), _("%s already exists.\nDo you want to replace it?") % file_name_with_ext, QMessageBox.No | QMessageBox.Yes) if ret == QMessageBox.No: # Stop and don't do anything # Re-enable controls self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Init export settings video_settings = { "vformat": self.txtVideoFormat.text(), "vcodec": self.txtVideoCodec.text(), "fps": { "num": self.txtFrameRateNum.value(), "den": self.txtFrameRateDen.value() }, "width": self.txtWidth.value(), "height": self.txtHeight.value(), "pixel_ratio": { "num": self.txtPixelRatioNum.value(), "den": self.txtPixelRatioDen.value() }, "video_bitrate": int(self.convert_to_bytes(self.txtVideoBitRate.text())), "start_frame": self.txtStartFrame.value(), "end_frame": self.txtEndFrame.value() + 1 } audio_settings = { "acodec": self.txtAudioCodec.text(), "sample_rate": self.txtSampleRate.value(), "channels": self.txtChannels.value(), "channel_layout": self.cboChannelLayout.currentData(), "audio_bitrate": int(self.convert_to_bytes(self.txtAudioBitrate.text())) } # Override vcodec and format for Image Sequences if export_type == _("Image Sequence"): image_ext = os.path.splitext( self.txtImageFormat.text().strip())[1].replace(".", "") video_settings["vformat"] = image_ext if image_ext in ["jpg", "jpeg"]: video_settings["vcodec"] = "mjpeg" else: video_settings["vcodec"] = image_ext # Set MaxSize (so we don't have any downsampling) self.timeline.SetMaxSize(video_settings.get("width"), video_settings.get("height")) # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(250) self.timeline.SetCache(export_cache_object) # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options if export_type in [ _("Video & Audio"), _("Video Only"), _("Image Sequence") ]: w.SetVideoOptions( True, video_settings.get("vcodec"), openshot.Fraction( video_settings.get("fps").get("num"), video_settings.get("fps").get("den")), video_settings.get("width"), video_settings.get("height"), openshot.Fraction( video_settings.get("pixel_ratio").get("num"), video_settings.get("pixel_ratio").get("den")), False, False, video_settings.get("video_bitrate")) # Set audio options if export_type in [_("Video & Audio"), _("Audio Only")]: w.SetAudioOptions(True, audio_settings.get("acodec"), audio_settings.get("sample_rate"), audio_settings.get("channels"), audio_settings.get("channel_layout"), audio_settings.get("audio_bitrate")) # Open the writer w.Open() # Notify window of export started export_file_path = "" get_app().window.ExportStarted.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame")) progressstep = max( 1, round((video_settings.get("end_frame") - video_settings.get("start_frame")) / 1000)) start_time_export = time.time() start_frame_export = video_settings.get("start_frame") end_frame_export = video_settings.get("end_frame") # Write each frame in the selected range for frame in range(video_settings.get("start_frame"), video_settings.get("end_frame")): # Update progress bar (emit signal to main window) if (frame % progressstep) == 0: end_time_export = time.time() if (((frame - start_frame_export) != 0) & ((end_time_export - start_time_export) != 0)): seconds_left = round( (start_time_export - end_time_export) * (frame - end_frame_export) / (frame - start_frame_export)) fps_encode = ((frame - start_frame_export) / (end_time_export - start_time_export)) export_file_path = _( "%(hours)d:%(minutes)02d:%(seconds)02d Remaining (%(fps)5.2f FPS)" ) % { 'hours': seconds_left / 3600, 'minutes': (seconds_left / 60) % 60, 'seconds': seconds_left % 60, 'fps': fps_encode } get_app().window.ExportFrame.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break # Close writer w.Close() except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (audio_settings.get("channels"))) track_metric_error("invalid-channels-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("channels"))) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (audio_settings.get("sample_rate"))) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("sample_rate"))) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (video_settings.get("vformat"))) track_metric_error("invalid-format-%s" % (video_settings.get("vformat"))) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("invalid-codec-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("video-encode-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() _ = get_app()._tr msg.setWindowTitle(_("Export Error")) msg.setText( _("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Notify window of export started get_app().window.ExportEnded.emit(export_file_path) # Close timeline object self.timeline.Close() # Clear all cache self.timeline.ClearAllCache() # Re-set OMP thread enabled flag if self.s.get("omp_threads_enabled"): os.environ['OS2_OMP_THREADS'] = "1" else: os.environ['OS2_OMP_THREADS'] = "0" # Accept dialog super(Export, self).accept()
def accept(self): """ Start exporting video """ # Disable controls self.txtFileName.setEnabled(False) self.txtExportFolder.setEnabled(False) self.tabWidget.setEnabled(False) self.export_button.setEnabled(False) self.exporting = True # Determine final exported file path file_name_with_ext = "%s.%s" % (self.txtFileName.text().strip(), self.txtVideoFormat.text().strip()) export_file_path = os.path.join(self.txtExportFolder.text().strip(), file_name_with_ext) log.info(export_file_path) # Translate object _ = get_app()._tr # Handle exception if os.path.exists(export_file_path): # File already exists! Prompt user ret = QMessageBox.question( self, _("Export Video"), _("%s already exists.\nDo you want to replace it?") % file_name_with_ext, QMessageBox.No | QMessageBox.Yes) if ret == QMessageBox.No: # Stop and don't do anything # Re-enable controls self.txtFileName.setEnabled(True) self.txtExportFolder.setEnabled(True) self.tabWidget.setEnabled(True) self.export_button.setEnabled(True) self.exporting = False return # Init export settings video_settings = { "vformat": self.txtVideoFormat.text(), "vcodec": self.txtVideoCodec.text(), "fps": { "num": self.txtFrameRateNum.value(), "den": self.txtFrameRateDen.value() }, "width": self.txtWidth.value(), "height": self.txtHeight.value(), "pixel_ratio": { "num": self.txtPixelRatioNum.value(), "den": self.txtPixelRatioDen.value() }, "video_bitrate": int(self.convert_to_bytes(self.txtVideoBitRate.text())), "start_frame": self.txtStartFrame.value(), "end_frame": self.txtEndFrame.value() + 1 } audio_settings = { "acodec": self.txtAudioCodec.text(), "sample_rate": self.txtSampleRate.value(), "channels": self.txtChannels.value(), "channel_layout": self.cboChannelLayout.currentData(), "audio_bitrate": int(self.convert_to_bytes(self.txtAudioBitrate.text())) } # Set lossless cache settings (temporarily) export_cache_object = openshot.CacheMemory(250) self.timeline.SetCache(export_cache_object) # Create FFmpegWriter try: w = openshot.FFmpegWriter(export_file_path) # Set video options w.SetVideoOptions( True, video_settings.get("vcodec"), openshot.Fraction( video_settings.get("fps").get("num"), video_settings.get("fps").get("den")), video_settings.get("width"), video_settings.get("height"), openshot.Fraction( video_settings.get("pixel_ratio").get("num"), video_settings.get("pixel_ratio").get("den")), False, False, video_settings.get("video_bitrate")) # Set audio options w.SetAudioOptions(True, audio_settings.get("acodec"), audio_settings.get("sample_rate"), audio_settings.get("channels"), audio_settings.get("channel_layout"), audio_settings.get("audio_bitrate")) # Open the writer w.Open() # Notify window of export started get_app().window.ExportStarted.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame")) # Write each frame in the selected range for frame in range(video_settings.get("start_frame"), video_settings.get("end_frame")): # Update progress bar (emit signal to main window) get_app().window.ExportFrame.emit( export_file_path, video_settings.get("start_frame"), video_settings.get("end_frame"), frame) # Process events (to show the progress bar moving) QCoreApplication.processEvents() # Write the frame object to the video w.WriteFrame(self.timeline.GetFrame(frame)) # Check if we need to bail out if not self.exporting: break # Close writer w.Close() except Exception as e: # TODO: Find a better way to catch the error. This is the only way I have found that # does not throw an error error_type_str = str(e) log.info("Error type string: %s" % error_type_str) if "InvalidChannels" in error_type_str: log.info("Error setting invalid # of channels (%s)" % (audio_settings.get("channels"))) track_metric_error("invalid-channels-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("channels"))) elif "InvalidSampleRate" in error_type_str: log.info("Error setting invalid sample rate (%s)" % (audio_settings.get("sample_rate"))) track_metric_error("invalid-sample-rate-%s-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"), audio_settings.get("sample_rate"))) elif "InvalidFormat" in error_type_str: log.info("Error setting invalid format (%s)" % (video_settings.get("vformat"))) track_metric_error("invalid-format-%s" % (video_settings.get("vformat"))) elif "InvalidCodec" in error_type_str: log.info("Error setting invalid codec (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("invalid-codec-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) elif "ErrorEncodingVideo" in error_type_str: log.info("Error encoding video frame (%s/%s/%s)" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) track_metric_error("video-encode-%s-%s-%s" % (video_settings.get("vformat"), video_settings.get("vcodec"), audio_settings.get("acodec"))) # Show friendly error friendly_error = error_type_str.split("> ")[0].replace("<", "") # Prompt error message msg = QMessageBox() _ = get_app()._tr msg.setWindowTitle(_("Export Error")) msg.setText( _("Sorry, there was an error exporting your video: \n%s") % friendly_error) msg.exec_() # Notify window of export started get_app().window.ExportEnded.emit(export_file_path) # Close timeline object self.timeline.Close() # Clear cache export_cache_object.Clear() # Accept dialog super(Export, self).accept()
def initTimeline(self, cuts, clips_json): project = get_app().project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) ''' self.file_path = file.absolute_path() self.video_length = int(file.data['video_length']) self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) ''' # Create an instance of a libopenshot Timeline object timeline = openshot.Timeline(width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) try: # Add clip for current preview file clip = openshot.Clip("/Users/admin/Downloads/BLACKPINK_Kill_This_Love.mp4") # Show waveform for audio files if not clip.Reader().info.has_video and clip.Reader().info.has_audio: clip.Waveform(True) # Set has_audio property #timeline.info.has_audio = clip.Reader().info.has_audio timeline.AddClip(clip) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return None timeline.Open() return timeline try: import json except ImportError: import simplejson as json s = settings.get_settings() project = get_app().project # Get some settings from the project fps = project.get(["fps"]) width = project.get(["width"]) height = project.get(["height"]) sample_rate = project.get(["sample_rate"]) channels = project.get(["channels"]) channel_layout = project.get(["channel_layout"]) # Create an instance of a libopenshot Timeline object timeline = openshot.Timeline(width, height, openshot.Fraction(fps["num"], fps["den"]), sample_rate, channels, channel_layout) timeline.info.channel_layout = channel_layout timeline.info.has_audio = True timeline.info.has_video = True timeline.info.video_length = 99999 timeline.info.duration = 999.99 timeline.info.sample_rate = sample_rate timeline.info.channels = channels # Open the timeline reader timeline.Open() #self.timeline.ApplyMapperToClips print("0------", timeline.info) #return timeline clips = self.jsonToClips(clips_json) print("222222222222221111", clips) for cut in cuts: intersecting_clips = self.getIntersectClips(clips, float(cut["start"])) print("111111111", intersecting_clips, fps) if intersecting_clips: for clip in intersecting_clips: print("222222222", clip["reader"]["path"]) path = clip["reader"]["path"] print("-----000000", path) c = openshot.Clip(path) #c.Start(cut["start"]) #c.End(cut["end"]) #c.Position = 0#cut["start"] # Append missing attributes to Clip JSON #new_clip = json.loads(c.Json(), strict=False) #new_clip.SetJson(clip.Json()) #new_clip["start"] = cut["start"] #new_clip["end"] = cut["end"] #new_clip["position"] = 0#cut["start"] try: # Add clip for current preview file #c.SetJson(new_clip) c.display = openshot.FRAME_DISPLAY_CLIP timeline.AddClip(c) #print('add into preview video player: %s', c.Json()) except: log.error('Failed to add into preview video player: %s' % c.Json()) # Get list of clip ids #clip_ids = [c.id for c in intersecting_clips] #self.timeline.Slice_Triggered(0, clip_ids, trans_ids, playhead_position) # Open and set reader #timeline.Open() return timeline # Connect to signal #self.window.MaxSizeChanged.connect(self.MaxSizeChangedCB) ''' if action.type == "load": # This JSON is initially loaded to libopenshot to update the timeline self.timeline.SetJson(action.json(only_value=True)) self.timeline.Open() # Re-Open the Timeline reader # The timeline's profile changed, so update all clips self.timeline.ApplyMapperToClips() # Refresh current frame (since the entire timeline was updated) self.window.refreshFrameSignal.emit() else: # This JSON DIFF is passed to libopenshot to update the timeline self.timeline.ApplyJsonDiff(action.json(is_array=True)) ''' '''
def __init__(self, cuts_json, clips_json, preview=False): _ = get_app()._tr # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # Track metrics track_metric_screen("cutting-screen") # If preview, hide cutting controls if preview: self.lblInstructions.setVisible(False) self.widgetControls.setVisible(False) self.setWindowTitle(_("Preview")) self.start_frame = 1 self.start_image = None self.end_frame = 1 self.end_image = None project = get_app().project # Keep track of file object #self.file = file self.file_path = file.absolute_path() self.video_length = int(file.data['video_length']) self.fps_num = int(file.data['fps']['num']) self.fps_den = int(file.data['fps']['den']) self.fps = float(self.fps_num) / float(self.fps_den) self.width = int(file.data['width']) self.height = int(file.data['height']) self.sample_rate = int(file.data['sample_rate']) self.channels = int(file.data['channels']) self.channel_layout = int(file.data['channel_layout']) # Open video file with Reader log.info(self.file_path) # Create an instance of a libopenshot Timeline object self.r = openshot.Timeline( self.width, self.height, openshot.Fraction(self.fps_num, self.fps_den), self.sample_rate, self.channels, self.channel_layout) self.r.info.channel_layout = self.channel_layout try: # Add clip for current preview file self.clip = openshot.Clip(self.file_path) # Show waveform for audio files if not self.clip.Reader().info.has_video and self.clip.Reader( ).info.has_audio: self.clip.Waveform(True) # Set has_audio property self.r.info.has_audio = self.clip.Reader().info.has_audio if preview: # Display frame #'s during preview self.clip.display = openshot.FRAME_DISPLAY_CLIP self.r.AddClip(self.clip) except: log.error('Failed to load media file into preview player: %s' % self.file_path) return # Add Video Widget self.videoPreview = VideoWidget() self.videoPreview.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding) self.verticalLayout.insertWidget(0, self.videoPreview) # Set max size of video preview (for speed) viewport_rect = self.videoPreview.centeredViewport( self.videoPreview.width(), self.videoPreview.height()) self.r.SetMaxSize(viewport_rect.width(), viewport_rect.height()) # Open reader self.r.Open() # Start the preview thread self.initialized = False self.transforming_clip = False self.preview_parent = PreviewParent() self.preview_parent.Init(self, self.r, self.videoPreview) self.preview_thread = self.preview_parent.worker # Set slider constraints self.sliderIgnoreSignal = False self.sliderVideo.setMinimum(1) self.sliderVideo.setMaximum(self.video_length) self.sliderVideo.setSingleStep(1) self.sliderVideo.setSingleStep(1) self.sliderVideo.setPageStep(24) # Determine if a start or end attribute is in this file start_frame = 1 if 'start' in self.file.data.keys(): start_frame = (float(self.file.data['start']) * self.fps) + 1 # Display start frame (and then the previous frame) QTimer.singleShot( 500, functools.partial(self.sliderVideo.setValue, start_frame + 1)) QTimer.singleShot( 600, functools.partial(self.sliderVideo.setValue, start_frame)) # Connect signals self.actionPlay.triggered.connect(self.actionPlay_Triggered) self.btnPlay.clicked.connect(self.btnPlay_clicked) self.sliderVideo.valueChanged.connect(self.sliderVideo_valueChanged) self.btnStart.clicked.connect(self.btnStart_clicked) self.btnEnd.clicked.connect(self.btnEnd_clicked) self.btnClear.clicked.connect(self.btnClear_clicked) self.btnAddClip.clicked.connect(self.btnAddClip_clicked) self.initialized = True
def __init__(self): # Create dialog class QDialog.__init__(self) # Load UI from designer ui_util.load_ui(self, self.ui_path) # Init UI ui_util.init_ui(self) # get translations _ = get_app()._tr # Get settings self.s = settings.get_settings() # 绑定 cancel 按钮和 cancel 函数 self.cancel_btn.clicked.connect(self.cancel) # 绑定 upload 按钮和 uploadSequence 函数 self.upload_btn.clicked.connect(self.uploadSequence) # 保存当前可用的视频片段为类内所有 self.avail_clips = get_app().window.timeline_sync.timeline.Clips() # Get the original timeline settings width = get_app().window.timeline_sync.timeline.info.width height = get_app().window.timeline_sync.timeline.info.height fps = get_app().window.timeline_sync.timeline.info.fps sample_rate = get_app().window.timeline_sync.timeline.info.sample_rate channels = get_app().window.timeline_sync.timeline.info.channels channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout # Create new "export" openshot.Timeline object self.timeline = openshot.Timeline(width, height, openshot.Fraction(fps.num, fps.den), sample_rate, channels, channel_layout) # Init various properties self.timeline.info.channel_layout = get_app( ).window.timeline_sync.timeline.info.channel_layout self.timeline.info.has_audio = get_app( ).window.timeline_sync.timeline.info.has_audio self.timeline.info.has_video = get_app( ).window.timeline_sync.timeline.info.has_video self.timeline.info.video_length = get_app( ).window.timeline_sync.timeline.info.video_length self.timeline.info.duration = get_app( ).window.timeline_sync.timeline.info.duration self.timeline.info.sample_rate = get_app( ).window.timeline_sync.timeline.info.sample_rate self.timeline.info.channels = get_app( ).window.timeline_sync.timeline.info.channels # Load the "export" Timeline reader with the JSON from the real timeline json_timeline = json.dumps(get_app().project._data) self.timeline.SetJson(json_timeline) # Open the "export" Timeline reader self.timeline.Open() self.updateFrameRate()