def _discover_one(self): """ Callback to start media discovery process, used to retrieve video parameters. """ discoverer = Discoverer(self.source) discoverer.connect('discovered', self._discovered) discoverer.discover() return False
def analyse(self, filePath, timeout=None): deferred = defer.Deferred() discoverer = Discoverer(filePath, max_interleave=compconsts.MAX_INTERLEAVE) discoverer.connect('discovered', self._discoverer_callback) to = utils.createTimeout(timeout, self.__analyseTimeout, discoverer) self._pending[discoverer] = (filePath, deferred, to) discoverer.discover() return deferred
class GstFile: """ Analyses one or more files and prints out the multimedia information of each file. """ def __init__(self, files,attrs): self.files = files self.attrs = attrs self.mainloop = gobject.MainLoop() self.current = None def run(self): gobject.idle_add(self._discover_one) self.mainloop.run() def _discovered(self, d,ismedia): self.attrs['media type'] = str(d.mimetype) if d.is_video: #self.attrs['video.caps'] = str(d.videocaps) self.attrs['video.width'] = d.videowidth self.attrs['video.height'] = d.videoheight self.attrs['video.length'] = d.videolength self.attrs['video.framerate'] = d.videorate.num*1.0/d.videorate.denom self.attrs['video.framerate.num'] = d.videorate.num self.attrs['video.framerate.denom'] = d.videorate.denom if d.is_audio: #self.attrs['audio.caps'] = str(d.audiocaps) self.attrs['audio.format'] = d.audiofloat and 'floating-point' or 'integer' self.attrs['audio.samplerate'] = d.audiorate self.attrs['audio.samplewidth'] = d.audiowidth self.attrs['audio.sampledepth'] = d.audiodepth self.attrs['audio.length'] = d.audiolength self.attrs['audio.channels'] = d.audiochannels self.current = None if len(self.files): print "\n" gobject.idle_add(self._discover_one) def _discover_one(self): if not len(self.files): gobject.idle_add(self.mainloop.quit) return False filename = self.files.pop(0) if not os.path.isfile(filename): gobject.idle_add(self._discover_one) return False # create a discoverer for that file self.current = Discoverer(filename) # connect a callback on the 'discovered' signal self.current.connect('discovered', self._discovered) # start the discovery self.current.discover() return False
def _discover_one(self): if not len(self.files): gobject.idle_add(self.mainloop.quit) return False filename = self.files.pop(0) if not os.path.isfile(filename): gobject.idle_add(self._discover_one) return False print "Running on", filename # create a discoverer for that file self.current = Discoverer(filename) # connect a callback on the 'discovered' signal self.current.connect('discovered', self._discovered) # start the discovery self.current.discover() return False
def _run(self, filename, cutlist, app): self.app = app self.config = app.config self.filename = filename self.cutlist = self.load_cutlist(cutlist) self.keyframes, error = self.get_keyframes_from_file(filename) if self.keyframes == None: print "Error: Keyframes konnten nicht ausgelesen werden." self.movie_window.set_size_request( self.config.get('general', 'cutinterface_resolution_x'), self.config.get('general', 'cutinterface_resolution_y')) self.hide_cuts = self.config.get('general', 'cutinterface_hide_cuts') def discovered(d, is_media): if is_media: self.videolength = d.videolength self.framerate_num = d.videorate.num self.framerate_denom = d.videorate.denom self.frames = self.videolength * self.framerate_num / self.framerate_denom / gst.SECOND self.key_seek.stepnanoseconds = 25 * gst.SECOND * self.framerate_denom / self.framerate_num self.timelines = [ self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames) ] self.videowidth = d.videowidth self.videoheight = d.videoheight self.ready_callback() else: print "error: %r does not appear to be a media file" % filename self.d = Discoverer(filename) self.d.connect("discovered", discovered) self.d.discover() if gtk.RESPONSE_OK == self.run(): self.set_cuts(self.cutlist, self.timelines[-1]) else: self.set_cuts(self.cutlist, []) if self.timer != None: gobject.source_remove(self.timer) return self.cutlist
def queue_file(self, filename, name=None, pos=0, userdata=None): try: os.stat(filename) except: print "file not found",filename return if name is None: name = os.path.split(filename)[1] if RUNNING_HILDON: ext = os.path.splitext(filename)[1][1:] known_good = ['mp3', 'wav', 'm4a', 'wma', 'mpg', 'avi', '3gp', 'rm', 'asf', 'mp4'] try: gst.element_factory_make("oggdemux", "test") known_good += ['ogg'] except: pass self._on_type_discovered(None, ext in known_good, filename, name, pos, userdata) else: #thanks gstfile.py d = Discoverer(filename) d.connect('discovered', self._on_type_discovered, filename, name, pos, userdata) d.discover()
class GstFile: """ Analyses one or more files and prints out the multimedia information of each file. """ def __init__(self, files): self.files = files self.mainloop = gobject.MainLoop() self.current = None def run(self): gobject.idle_add(self._discover_one) self.mainloop.run() def _discovered(self, discoverer, ismedia): discoverer.print_info() self.current = None if len(self.files): print "\n" gobject.idle_add(self._discover_one) def _discover_one(self): if not len(self.files): gobject.idle_add(self.mainloop.quit) return False filename = self.files.pop(0) if not os.path.isfile(filename): gobject.idle_add(self._discover_one) return False print "Running on", filename # create a discoverer for that file self.current = Discoverer(filename) # connect a callback on the 'discovered' signal self.current.connect('discovered', self._discovered) # start the discovery self.current.discover() return False
def play(self, url): # Try loading as a module first. module = None try: module = imp.load_source('playlist', url) except: pass if module: self.playlist = Playlist(module.playlist()) url = self.playlist.next() if url: d = Discoverer(url) d.connect('discovered', self._discovered) d.discover() url = self._fix_url(url) self.stop() self.current_song = url self.player.set_state(gst.STATE_READY) self.player.set_property('uri', url) self.player.set_state(gst.STATE_PLAYING)
def _run(self, filename, cutlist, app): self.app = app self.config = app.config self.filename = filename self.cutlist = self.load_cutlist(cutlist) self.keyframes, error = self.get_keyframes_from_file(filename) if self.keyframes == None: print "Error: Keyframes konnten nicht ausgelesen werden." self.movie_window.set_size_request(self.config.get('general', 'cutinterface_resolution_x'), self.config.get('general', 'cutinterface_resolution_y')) self.hide_cuts = self.config.get('general', 'cutinterface_hide_cuts') def discovered(d, is_media): if is_media: self.videolength = d.videolength self.framerate_num = d.videorate.num self.framerate_denom = d.videorate.denom self.frames = self.videolength * self.framerate_num / self.framerate_denom / gst.SECOND self.key_seek.stepnanoseconds = 25 * gst.SECOND * self.framerate_denom / self.framerate_num self.timelines = [self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames)] self.videowidth = d.videowidth self.videoheight = d.videoheight self.ready_callback() else: print "error: %r does not appear to be a media file" % filename self.d = Discoverer(filename) self.d.connect("discovered", discovered) self.d.discover() if gtk.RESPONSE_OK == self.run(): self.set_cuts(self.cutlist, self.timelines[-1]) else: self.set_cuts(self.cutlist, []) if self.timer != None: gobject.source_remove(self.timer) return self.cutlist
def discover(self): infile = self.infiles[self.i] discoverer = Discoverer(infile) discoverer.connect('discovered', self.on_discovered, infile) discoverer.discover() return False # Don't repeat idle call
class CutinterfaceDialog(gtk.Dialog, gtk.Buildable, Cut): __gtype_name__ = "CutinterfaceDialog" def __init__(self): self.marker_a, self.marker_b = 0, -1 self.timelines = [ [] ] self.cut_selected = -1 self.timer = None self.hide_cuts = False self.frames = 0 self.slider = None self.keyframes = None def do_parser_finished(self, builder): self.builder = builder self.builder.connect_signals(self) self.slider = self.builder.get_object('slider') self.movie_window = self.builder.get_object('movie_window') self.movie_window.connect('realize', self.on_realize) self.movie_window.connect('unrealize', self.on_unrealize) self.hide_cuts = self.builder.get_object('checkbutton_hide_cuts').get_active() self.audio_caps = gst.Caps("audio/x-raw-int;audio/x-raw-float") cutslistmodel = self.builder.get_object('cutslist') cutslistmodel.set_default_sort_func(None) cutslistselection = self.builder.get_object('cutsview').get_selection() cutslistselection.connect('changed', self.on_cuts_selection_changed) button_delete_cut = self.builder.get_object('button_delete_cut') button_delete_cut.set_sensitive(False) button_deselect = self.builder.get_object('button_deselect') button_deselect.set_sensitive(False) #player state self.is_playing = False def on_realize(self,widget,data=None): # xid must be retrieved first in GUI-thread and before creating pipeline to prevent racing conditions self.movie_xid = self.movie_window.window.xid # The pipeline self.player = gst.Pipeline() # Create bus and connect several handlers bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message) bus.connect("sync-message::element", self.on_sync_message) self.audio_composition = gst.element_factory_make("gnlcomposition", "audio-composition") self.video_composition = gst.element_factory_make("gnlcomposition", "video-composition") # Create sinks self.audiosink = gst.element_factory_make('autoaudiosink') self.videosink = gst.element_factory_make('autovideosink') # pipeline elements self.audioconvert = gst.element_factory_make('audioconvert') self.audioresample = gst.element_factory_make('audioresample') self.videoscale = gst.element_factory_make('videoscale') self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace') # Connect handler for 'pad-added' signal def on_pad_added(element, pad, sink_pad): caps = pad.get_caps() name = caps[0].get_name() if name == 'video/x-raw-rgb': if not sink_pad.is_linked(): # Only link once pad.link(sink_pad) elif 'audio/x-raw-float' or name == 'audio/x-raw-int': if not sink_pad.is_linked(): # Only link once pad.link(sink_pad) self.key_seek = KeySeekElement.KeySeekElement() self.audio_composition.connect('pad-added', on_pad_added, self.key_seek.get_pad('keyseek-sink')) self.video_composition.connect('pad-added', on_pad_added, self.key_seek.get_pad('secondary-sink')) # Add elements to pipeline self.player.add(self.audio_composition, self.audioconvert, self.audioresample, self.audiosink, self.video_composition, self.key_seek, self.ffmpegcolorspace, self.videoscale, self.videosink) self.key_seek.get_pad('secondary-src').link(self.ffmpegcolorspace.get_pad('sink')) gst.element_link_many(self.ffmpegcolorspace, self.videoscale, self.videosink ) self.key_seek.get_pad('keyseek-src').link(self.audioconvert.get_pad('sink')) gst.element_link_many(self.audioconvert, self.audioresample, self.audiosink ) def on_unrealize(self,widget,data=None): # to prevent racing conditions when closing the window while playing self.player.set_state(gst.STATE_NULL) def get_cuts_in_frames(self, cuts, in_frames): if cuts == []: res = [ (0, self.frames) ] elif in_frames: res = cuts else: res = [] for start, duration in cuts: start_frame = int(start * self.framerate_num / self.framerate_denom) duration_frames = int(duration * self.framerate_num / self.framerate_denom) res.append((start_frame,duration_frames)) return res def load_cutlist(self, filename): cutlist = cutlists.Cutlist() cutlist.intended_app = 'VirtualDub.exe' if filename != None and os.path.exists(filename): cutlist.local_filename = filename cutlist.read_from_file() cutlist.read_cuts() if cutlist.author != self.app.config.get('general', 'cutlist_username'): cutlist.usercomment = 'Mit OTR-Verwaltung++ geschnitten; Vorlage von ' + cutlist.author + '; ' + cutlist.usercomment if cutlist.cuts_frames: self.initial_cutlist = cutlist.cuts_frames self.initial_cutlist_in_frames = True else: self.initial_cutlist = cutlist.cuts_seconds self.initial_cutlist_in_frames = False else: cutlist.usercomment = 'Mit OTR-Verwaltung++ geschnitten' self.initial_cutlist = [] self.initial_cutlist_in_frames = True if self.timer != None: # Running self.timelines.append(self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames)) if self.slider: self.slider.queue_draw() return cutlist def set_cuts(self, cutlist, cuts): cutlist.fps = float(self.framerate_num) / float(self.framerate_denom) cutlist.cuts_frames = cuts cutlist.cuts_seconds = [] cutlist.app = 'OTR-Verwaltung++;Cutinterface' for start, duration in cuts: s = start * self.framerate_denom / float(self.framerate_num) d = duration * self.framerate_denom / float(self.framerate_num) cutlist.cuts_seconds.append((s,d)) def _run(self, filename, cutlist, app): self.app = app self.config = app.config self.filename = filename self.cutlist = self.load_cutlist(cutlist) self.keyframes, error = self.get_keyframes_from_file(filename) if self.keyframes == None: print "Error: Keyframes konnten nicht ausgelesen werden." self.movie_window.set_size_request(self.config.get('general', 'cutinterface_resolution_x'), self.config.get('general', 'cutinterface_resolution_y')) self.hide_cuts = self.config.get('general', 'cutinterface_hide_cuts') def discovered(d, is_media): if is_media: self.videolength = d.videolength self.framerate_num = d.videorate.num self.framerate_denom = d.videorate.denom self.frames = self.videolength * self.framerate_num / self.framerate_denom / gst.SECOND self.key_seek.stepnanoseconds = 25 * gst.SECOND * self.framerate_denom / self.framerate_num self.timelines = [self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames)] self.videowidth = d.videowidth self.videoheight = d.videoheight self.ready_callback() else: print "error: %r does not appear to be a media file" % filename self.d = Discoverer(filename) self.d.connect("discovered", discovered) self.d.discover() if gtk.RESPONSE_OK == self.run(): self.set_cuts(self.cutlist, self.timelines[-1]) else: self.set_cuts(self.cutlist, []) if self.timer != None: gobject.source_remove(self.timer) return self.cutlist def ready_callback(self): self.builder.get_object('label_filename').set_markup("Aktuelle Datei: <b>%s</b>" % os.path.basename(self.filename)) self.update_timeline() self.update_listview() self.timer = gobject.timeout_add(200, self.tick) def tick(self): self.update_frames_and_time() self.update_slider() self.builder.get_object('checkbutton_hide_cuts').set_active(self.hide_cuts) return True def jump_to(self, frames=None, seconds=None, nanoseconds=0, flags = gst.SEEK_FLAG_ACCURATE): if frames: if frames >= self.get_frames(): frames = self.get_frames()-1 nanoseconds = frames * gst.SECOND * self.framerate_denom / self.framerate_num elif seconds: nanoseconds = seconds * gst.SECOND self.player.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | flags, int(nanoseconds)) def set_marker(self, a=None, b=None): """ Set markers a and/or b to a specific frame posititon and update the buttons """ if a is not None: self.marker_a = a if a != -1 and self.marker_b < 0: self.marker_b = self.get_frames()-1 if b is not None: self.marker_b = b if b != -1 and self.marker_a < 0: self.marker_a = 0 if self.marker_a != -1 and self.marker_b != -1 and self.marker_a > self.marker_b: print "Switch a and b" c = self.marker_b self.marker_b = self.marker_a self.marker_a = c if self.marker_a == -1: self.builder.get_object('button_jump_to_marker_a').set_label('-') else: self.builder.get_object('button_jump_to_marker_a').set_label(str(self.marker_a)) if self.marker_b == -1: self.builder.get_object('button_jump_to_marker_b').set_label('-') else: self.builder.get_object('button_jump_to_marker_b').set_label(str(self.marker_b)) self.slider.queue_draw() def update_timeline(self): self.player.set_state(gst.STATE_NULL) # remove all gnlfilesources for media in ['audio', 'video']: elements = [] for element in getattr(self, '%s_composition' % media).elements(): elements.append(element) for element in elements: getattr(self, '%s_composition' % media).remove(element) # add new filesources timeline = [] if self.hide_cuts: timeline = enumerate(self.timelines[-1]) else: timeline = [ (0, (0, self.frames)) ] timeline_position = 0 for count, (start, duration) in timeline: for media in ['audio', 'video']: part = gst.element_factory_make("gnlfilesource", "%s-part-%i" % (media, count)) if media == 'audio': part.set_property("caps", self.audio_caps) part.set_property("location", self.filename) part.set_property("start", timeline_position * gst.SECOND * self.framerate_denom / self.framerate_num) part.set_property("duration", duration * gst.SECOND * self.framerate_denom / self.framerate_num) part.set_property("media-start", start * gst.SECOND *self.framerate_denom / self.framerate_num) part.set_property("media-duration", duration * gst.SECOND * self.framerate_denom / self.framerate_num) getattr(self, '%s_composition' % media).add(part) timeline_position += duration self.player.set_state(gst.STATE_PLAYING) self.player.set_state(gst.STATE_PAUSED) # update slider range self.builder.get_object('slider').set_range(0, self.get_frames()) self.slider.queue_draw() # get absolute frame, assuming that the given frame corresponds to the current display modus def get_absolute_position(self, rel_pos): if not self.hide_cuts: return rel_pos elif rel_pos == -1: return -1 durations = 0 for start, duration in self.timelines[-1]: if rel_pos - durations < duration: return start + rel_pos - durations else: durations = durations + duration return self.frames-1 # convert the absolute position into the corresponding relative position def get_relative_position(self, abs_pos): if abs_pos == -1: return -1 durations = 0 for start, duration in self.timelines[-1]: if abs_pos - start < 0: return durations elif abs_pos - start < duration: return durations + abs_pos - start else: durations = durations + duration return durations-1 # inverts the cuts (between timeline and cut-out list) assuming the list is flawless # and should be faster than the full version below def invert_simple(self, cuts): inverted = [] if cuts[0][0] > 0: inverted.append( (0, cuts[0][0]) ) next_start = cuts[0][0] + cuts[0][1] for start, duration in cuts[1:]: inverted.append( (next_start, start - next_start) ) next_start = start + duration if next_start < self.frames: inverted.append( (next_start, self.frames - next_start) ) return inverted # inverts the cuts (between timeline and cut-out list) removing all kinds of overlaps etc. def invert_full(self, cuts): inverted = [] sorted_cuts = sorted(cuts, key=lambda c:c[0]) # sort cuts after start frame if sorted_cuts[0][0] > 0: inverted.append( (0, sorted_cuts[0][0]) ) next_start = sorted_cuts[0][0] + sorted_cuts[0][1] for start, duration in sorted_cuts[1:]: if duration < 0: # correct invalid values duration = - duration start = start - duration + 1 if start < 0: start = 0 if start + duration > self.frames: duration = self.frames - start if start < next_start: # handle overlapping cuts next_start = max(next_start, start + duration) else: if start - next_start > 0: # don't add cuts with zero length inverted.append( (next_start, start - next_start) ) next_start = start + duration if next_start < self.frames: inverted.append( (next_start, self.frames - next_start) ) return inverted def remove_segment(self, rel_s, rel_d): print "\n\033[1;31m-- Entering remove_segment\033[1;m" print "Current timeline is: ", self.timelines[-1] abs_start = self.get_absolute_position(rel_s) abs_end = self.get_absolute_position(rel_s + rel_d - 1) inverted_timeline = self.invert_simple(self.timelines[-1]) inverted_timeline.append( (abs_start, abs_end - abs_start + 1) ) self.timelines.append(self.invert_full(inverted_timeline)) print self.timelines print "Current timeline is: ", self.timelines[-1] print "\033[1;31m-- Leaving remove_segment\033[1;m\n" self.update_timeline() self.update_listview() time.sleep(0.2) if self.hide_cuts: print "Seek To: ", rel_s self.jump_to(frames=rel_s) else: print "Seek To: ", abs_end + 1 self.jump_to(frames=abs_end+1) def get_frames(self): """ Returns the current number of frames to be shown. """ if self.hide_cuts: frames = sum([duration for start, duration in self.timelines[-1]]) else: frames = self.frames return frames def update_frames_and_time(self): #Versuch, Informationen zu erhalten und zu updaten try: current_position = self.player.query_position(gst.FORMAT_TIME, None)[0] duration = self.player.query_duration(gst.FORMAT_TIME)[0] except Exception, e: #manchmal geht es nicht, bspw. wenn gerade erst geseekt wurde print "except!", e return self.current_frame_position = current_position * self.framerate_num / self.framerate_denom / gst.SECOND if self.keyframes != None and self.current_frame_position in self.keyframes : self.builder.get_object('label_time').set_text('Frame(K): %i/%i, Zeit %s/%s' % (self.current_frame_position, self.get_frames() - 1, self.convert_sec(current_position), self.convert_sec(duration))) else: self.builder.get_object('label_time').set_text('Frame: %i/%i, Zeit %s/%s' % (self.current_frame_position, self.get_frames() - 1, self.convert_sec(current_position), self.convert_sec(duration)))
def on_idle(self, filename): d = Discoverer(filename) d.connect('discovered', self.on_data) d.discover() return False
class CutinterfaceDialog(gtk.Dialog, gtk.Buildable, Cut): __gtype_name__ = "CutinterfaceDialog" def __init__(self): self.marker_a, self.marker_b = 0, -1 self.timelines = [[]] self.cut_selected = -1 self.timer = None self.hide_cuts = False self.frames = 0 self.slider = None self.keyframes = None def do_parser_finished(self, builder): self.builder = builder self.builder.connect_signals(self) self.slider = self.builder.get_object('slider') self.movie_window = self.builder.get_object('movie_window') self.movie_window.connect('realize', self.on_realize) self.movie_window.connect('unrealize', self.on_unrealize) self.hide_cuts = self.builder.get_object( 'checkbutton_hide_cuts').get_active() self.audio_caps = gst.Caps("audio/x-raw-int;audio/x-raw-float") cutslistmodel = self.builder.get_object('cutslist') cutslistmodel.set_default_sort_func(None) cutslistselection = self.builder.get_object('cutsview').get_selection() cutslistselection.connect('changed', self.on_cuts_selection_changed) button_delete_cut = self.builder.get_object('button_delete_cut') button_delete_cut.set_sensitive(False) button_deselect = self.builder.get_object('button_deselect') button_deselect.set_sensitive(False) #player state self.is_playing = False def on_realize(self, widget, data=None): # xid must be retrieved first in GUI-thread and before creating pipeline to prevent racing conditions self.movie_xid = self.movie_window.window.xid # The pipeline self.player = gst.Pipeline() # Create bus and connect several handlers bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message) bus.connect("sync-message::element", self.on_sync_message) self.audio_composition = gst.element_factory_make( "gnlcomposition", "audio-composition") self.video_composition = gst.element_factory_make( "gnlcomposition", "video-composition") # Create sinks self.audiosink = gst.element_factory_make('autoaudiosink') self.videosink = gst.element_factory_make('autovideosink') # pipeline elements self.audioconvert = gst.element_factory_make('audioconvert') self.audioresample = gst.element_factory_make('audioresample') self.videoscale = gst.element_factory_make('videoscale') self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace') # Connect handler for 'pad-added' signal def on_pad_added(element, pad, sink_pad): caps = pad.get_caps() name = caps[0].get_name() if name == 'video/x-raw-rgb': if not sink_pad.is_linked(): # Only link once pad.link(sink_pad) elif 'audio/x-raw-float' or name == 'audio/x-raw-int': if not sink_pad.is_linked(): # Only link once pad.link(sink_pad) self.key_seek = KeySeekElement.KeySeekElement() self.audio_composition.connect('pad-added', on_pad_added, self.key_seek.get_pad('keyseek-sink')) self.video_composition.connect('pad-added', on_pad_added, self.key_seek.get_pad('secondary-sink')) # Add elements to pipeline self.player.add(self.audio_composition, self.audioconvert, self.audioresample, self.audiosink, self.video_composition, self.key_seek, self.ffmpegcolorspace, self.videoscale, self.videosink) self.key_seek.get_pad('secondary-src').link( self.ffmpegcolorspace.get_pad('sink')) gst.element_link_many(self.ffmpegcolorspace, self.videoscale, self.videosink) self.key_seek.get_pad('keyseek-src').link( self.audioconvert.get_pad('sink')) gst.element_link_many(self.audioconvert, self.audioresample, self.audiosink) def on_unrealize(self, widget, data=None): # to prevent racing conditions when closing the window while playing self.player.set_state(gst.STATE_NULL) def get_cuts_in_frames(self, cuts, in_frames): if cuts == []: res = [(0, self.frames)] elif in_frames: res = cuts else: res = [] for start, duration in cuts: start_frame = int(start * self.framerate_num / self.framerate_denom) duration_frames = int(duration * self.framerate_num / self.framerate_denom) res.append((start_frame, duration_frames)) return res def load_cutlist(self, filename): cutlist = cutlists.Cutlist() cutlist.intended_app = 'VirtualDub.exe' if filename != None and os.path.exists(filename): cutlist.local_filename = filename cutlist.read_from_file() cutlist.read_cuts() if cutlist.author != self.app.config.get('general', 'cutlist_username'): cutlist.usercomment = 'Mit OTR-Verwaltung++ geschnitten; Vorlage von ' + cutlist.author + '; ' + cutlist.usercomment if cutlist.cuts_frames: self.initial_cutlist = cutlist.cuts_frames self.initial_cutlist_in_frames = True else: self.initial_cutlist = cutlist.cuts_seconds self.initial_cutlist_in_frames = False else: cutlist.usercomment = 'Mit OTR-Verwaltung++ geschnitten' self.initial_cutlist = [] self.initial_cutlist_in_frames = True if self.timer != None: # Running self.timelines.append( self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames)) if self.slider: self.slider.queue_draw() return cutlist def set_cuts(self, cutlist, cuts): cutlist.fps = float(self.framerate_num) / float(self.framerate_denom) cutlist.cuts_frames = cuts cutlist.cuts_seconds = [] cutlist.app = 'OTR-Verwaltung++;Cutinterface' for start, duration in cuts: s = start * self.framerate_denom / float(self.framerate_num) d = duration * self.framerate_denom / float(self.framerate_num) cutlist.cuts_seconds.append((s, d)) def _run(self, filename, cutlist, app): self.app = app self.config = app.config self.filename = filename self.cutlist = self.load_cutlist(cutlist) self.keyframes, error = self.get_keyframes_from_file(filename) if self.keyframes == None: print "Error: Keyframes konnten nicht ausgelesen werden." self.movie_window.set_size_request( self.config.get('general', 'cutinterface_resolution_x'), self.config.get('general', 'cutinterface_resolution_y')) self.hide_cuts = self.config.get('general', 'cutinterface_hide_cuts') def discovered(d, is_media): if is_media: self.videolength = d.videolength self.framerate_num = d.videorate.num self.framerate_denom = d.videorate.denom self.frames = self.videolength * self.framerate_num / self.framerate_denom / gst.SECOND self.key_seek.stepnanoseconds = 25 * gst.SECOND * self.framerate_denom / self.framerate_num self.timelines = [ self.get_cuts_in_frames(self.initial_cutlist, self.initial_cutlist_in_frames) ] self.videowidth = d.videowidth self.videoheight = d.videoheight self.ready_callback() else: print "error: %r does not appear to be a media file" % filename self.d = Discoverer(filename) self.d.connect("discovered", discovered) self.d.discover() if gtk.RESPONSE_OK == self.run(): self.set_cuts(self.cutlist, self.timelines[-1]) else: self.set_cuts(self.cutlist, []) if self.timer != None: gobject.source_remove(self.timer) return self.cutlist def ready_callback(self): self.builder.get_object('label_filename').set_markup( "Aktuelle Datei: <b>%s</b>" % os.path.basename(self.filename)) self.update_timeline() self.update_listview() self.timer = gobject.timeout_add(200, self.tick) def tick(self): self.update_frames_and_time() self.update_slider() self.builder.get_object('checkbutton_hide_cuts').set_active( self.hide_cuts) return True def jump_to(self, frames=None, seconds=None, nanoseconds=0, flags=gst.SEEK_FLAG_ACCURATE): if frames: if frames >= self.get_frames(): frames = self.get_frames() - 1 nanoseconds = frames * gst.SECOND * self.framerate_denom / self.framerate_num elif seconds: nanoseconds = seconds * gst.SECOND self.player.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH | flags, int(nanoseconds)) def set_marker(self, a=None, b=None): """ Set markers a and/or b to a specific frame posititon and update the buttons """ if a is not None: self.marker_a = a if a != -1 and self.marker_b < 0: self.marker_b = self.get_frames() - 1 if b is not None: self.marker_b = b if b != -1 and self.marker_a < 0: self.marker_a = 0 if self.marker_a != -1 and self.marker_b != -1 and self.marker_a > self.marker_b: print "Switch a and b" c = self.marker_b self.marker_b = self.marker_a self.marker_a = c if self.marker_a == -1: self.builder.get_object('button_jump_to_marker_a').set_label('-') else: self.builder.get_object('button_jump_to_marker_a').set_label( str(self.marker_a)) if self.marker_b == -1: self.builder.get_object('button_jump_to_marker_b').set_label('-') else: self.builder.get_object('button_jump_to_marker_b').set_label( str(self.marker_b)) self.slider.queue_draw() def update_timeline(self): self.player.set_state(gst.STATE_NULL) # remove all gnlfilesources for media in ['audio', 'video']: elements = [] for element in getattr(self, '%s_composition' % media).elements(): elements.append(element) for element in elements: getattr(self, '%s_composition' % media).remove(element) # add new filesources timeline = [] if self.hide_cuts: timeline = enumerate(self.timelines[-1]) else: timeline = [(0, (0, self.frames))] timeline_position = 0 for count, (start, duration) in timeline: for media in ['audio', 'video']: part = gst.element_factory_make("gnlfilesource", "%s-part-%i" % (media, count)) if media == 'audio': part.set_property("caps", self.audio_caps) part.set_property("location", self.filename) part.set_property( "start", timeline_position * gst.SECOND * self.framerate_denom / self.framerate_num) part.set_property( "duration", duration * gst.SECOND * self.framerate_denom / self.framerate_num) part.set_property( "media-start", start * gst.SECOND * self.framerate_denom / self.framerate_num) part.set_property( "media-duration", duration * gst.SECOND * self.framerate_denom / self.framerate_num) getattr(self, '%s_composition' % media).add(part) timeline_position += duration self.player.set_state(gst.STATE_PLAYING) self.player.set_state(gst.STATE_PAUSED) # update slider range self.builder.get_object('slider').set_range(0, self.get_frames()) self.slider.queue_draw() # get absolute frame, assuming that the given frame corresponds to the current display modus def get_absolute_position(self, rel_pos): if not self.hide_cuts: return rel_pos elif rel_pos == -1: return -1 durations = 0 for start, duration in self.timelines[-1]: if rel_pos - durations < duration: return start + rel_pos - durations else: durations = durations + duration return self.frames - 1 # convert the absolute position into the corresponding relative position def get_relative_position(self, abs_pos): if abs_pos == -1: return -1 durations = 0 for start, duration in self.timelines[-1]: if abs_pos - start < 0: return durations elif abs_pos - start < duration: return durations + abs_pos - start else: durations = durations + duration return durations - 1 # inverts the cuts (between timeline and cut-out list) assuming the list is flawless # and should be faster than the full version below def invert_simple(self, cuts): inverted = [] if cuts[0][0] > 0: inverted.append((0, cuts[0][0])) next_start = cuts[0][0] + cuts[0][1] for start, duration in cuts[1:]: inverted.append((next_start, start - next_start)) next_start = start + duration if next_start < self.frames: inverted.append((next_start, self.frames - next_start)) return inverted # inverts the cuts (between timeline and cut-out list) removing all kinds of overlaps etc. def invert_full(self, cuts): inverted = [] sorted_cuts = sorted(cuts, key=lambda c: c[0]) # sort cuts after start frame if sorted_cuts[0][0] > 0: inverted.append((0, sorted_cuts[0][0])) next_start = sorted_cuts[0][0] + sorted_cuts[0][1] for start, duration in sorted_cuts[1:]: if duration < 0: # correct invalid values duration = -duration start = start - duration + 1 if start < 0: start = 0 if start + duration > self.frames: duration = self.frames - start if start < next_start: # handle overlapping cuts next_start = max(next_start, start + duration) else: if start - next_start > 0: # don't add cuts with zero length inverted.append((next_start, start - next_start)) next_start = start + duration if next_start < self.frames: inverted.append((next_start, self.frames - next_start)) return inverted def remove_segment(self, rel_s, rel_d): print "\n\033[1;31m-- Entering remove_segment\033[1;m" print "Current timeline is: ", self.timelines[-1] abs_start = self.get_absolute_position(rel_s) abs_end = self.get_absolute_position(rel_s + rel_d - 1) inverted_timeline = self.invert_simple(self.timelines[-1]) inverted_timeline.append((abs_start, abs_end - abs_start + 1)) self.timelines.append(self.invert_full(inverted_timeline)) print self.timelines print "Current timeline is: ", self.timelines[-1] print "\033[1;31m-- Leaving remove_segment\033[1;m\n" self.update_timeline() self.update_listview() time.sleep(0.2) if self.hide_cuts: print "Seek To: ", rel_s self.jump_to(frames=rel_s) else: print "Seek To: ", abs_end + 1 self.jump_to(frames=abs_end + 1) def get_frames(self): """ Returns the current number of frames to be shown. """ if self.hide_cuts: frames = sum([duration for start, duration in self.timelines[-1]]) else: frames = self.frames return frames def update_frames_and_time(self): #Versuch, Informationen zu erhalten und zu updaten try: current_position = self.player.query_position( gst.FORMAT_TIME, None)[0] duration = self.player.query_duration(gst.FORMAT_TIME)[0] except Exception, e: #manchmal geht es nicht, bspw. wenn gerade erst geseekt wurde print "except!", e return self.current_frame_position = current_position * self.framerate_num / self.framerate_denom / gst.SECOND if self.keyframes != None and self.current_frame_position in self.keyframes: self.builder.get_object('label_time').set_text( 'Frame(K): %i/%i, Zeit %s/%s' % (self.current_frame_position, self.get_frames() - 1, self.convert_sec(current_position), self.convert_sec(duration))) else: self.builder.get_object('label_time').set_text( 'Frame: %i/%i, Zeit %s/%s' % (self.current_frame_position, self.get_frames() - 1, self.convert_sec(current_position), self.convert_sec(duration)))