def prepareGST(self, url): self.checked_video = False self.playing = False bin = gst.Bin('my-bin') self.textoverlay = gst.element_factory_make('textoverlay') bin.add(self.textoverlay) pad = self.textoverlay.get_pad("video_sink") ghostpad = gst.GhostPad("sink", pad) bin.add_pad(ghostpad) videosink = gst.gst_parse_bin_from_description("videobalance name=balance ! autovideosink", True) bin.add(videosink) gst.element_link_many(self.textoverlay, videosink) self.player = gst.element_factory_make("playbin2", "player") bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("sync-message::element", self.on_sync_message) bus.connect("message", self.on_message) self.player.set_property("video-sink", bin) self.player.set_property("uri", url) self.player.set_state(gst.STATE_PLAYING) self.mprunning = True gobject.idle_add(self.callback)
def __init__(self): self.overlay_buffer = None self.overlay_text = "Foo Bar Baz 123" self.bus = dbus.SystemBus() #self.bus = dbus.SessionBus() #textsignal = self.bus.add_signal_receiver(self.overlay_text_changed, 'textchanged', 'com.example') textsignal = self.bus.add_signal_receiver(self.overlay_text_changed, dbus_interface = "com.example.TestService", signal_name = "HelloSignal") window = gtk.Window(gtk.WINDOW_TOPLEVEL) window.set_title("Mpeg2-Player") window.set_default_size(500, 400) window.connect("destroy", gtk.main_quit, "WM destroy") vbox = gtk.VBox() window.add(vbox) hbox = gtk.HBox() vbox.pack_start(hbox, False) #self.entry = gtk.Entry() #hbox.add(self.entry) self.button = gtk.Button("Start") hbox.pack_start(self.button, False) self.button.connect("clicked", self.start_stop) self.movie_window = gtk.DrawingArea() vbox.add(self.movie_window) window.show_all() # gst-launch v4l2src ! video/x-raw-yuv,format=\(fourcc\)YUY2,width=640,height=480 ! ffmpegcolorspace ! autovideosink #self.player = gst.parse_launch('videotestsrc name=source ! video/x-raw-yuv,format=(fourcc)AYUV ! videomixer name=mix ! ffmpegcolorspace ! autovideosink name=videosink') # PONDER: How to get the resolution from the videosink ? #self.player = gst.parse_launch('videotestsrc name=source ! video/x-raw-yuv,format=(fourcc)AYUV ! videomixer name=mix ! ffmpegcolorspace ! videoscale ! autovideosink name=videosink') #self.player = gst.parse_launch('videotestsrc name=source ! videoscale ! video/x-raw-yuv,format=(fourcc)AYUV ! videomixer name=mix ! ffmpegcolorspace ! autovideosink name=videosink') # This hardcoded resolution works. #self.player = gst.parse_launch('videotestsrc name=source ! video/x-raw-yuv,format=(fourcc)AYUV,width=500,height=400 ! videomixer name=mix ! ffmpegcolorspace ! autovideosink name=videosink') self.player = gst.parse_launch('videotestsrc name=source ! tee name=splitter') self.bin1 = gst.gst_parse_bin_from_description('queue ! video/x-raw-yuv,format=(fourcc)AYUV,width=500,height=400 ! videomixer name=mix ! ffmpegcolorspace ! autovideosink name=videosink', True) self.player.add(self.bin1) self.bin2 = gst.gst_parse_bin_from_description('queue ! video/x-raw-yuv,format=(fourcc)AYUV,width=500,height=400 ! videomixer name=mix ! ffmpegcolorspace ! tcpserversink name=videosink protocol=none port=3000', True) self.player.add(self.bin2) self.tee = self.player.get_by_name("splitter") self.tee.link(self.bin1) self.tee.link(self.bin2) # adapted from https://code.fluendo.com/flumotion/trac/browser/flumotion/trunk/flumotion/component/converters/overlay/overlay.py self.videomixer = self.bin1.get_by_name("mix") self.converter = self.bin1.get_by_name("conv") self.videomixer.get_pad('sink_0').connect('notify::caps', self._notify_caps_cb) self.sourceBin = gst.Bin() self.overlay = gst.element_factory_make('appsrc', 'overlay') self.overlay.set_property('do-timestamp', True) self.overlay.connect('need-data', self.push_buffer) self.sourceBin.add(self.overlay) self.alphacolor = gst.element_factory_make('alphacolor') self.sourceBin.add(self.alphacolor) self.overlay.link(self.alphacolor) self.sourceBin.add_pad(gst.GhostPad('src', self.alphacolor.get_pad('src'))) self.sourceBin.set_locked_state(True) self.player.add(self.sourceBin) bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message) bus.connect("sync-message::element", self.on_sync_message) self.start_stop(None)
def play(self): self.emit("preplay") self.player = gst.Pipeline("player") self.queue_video = gst.element_factory_make("queue", "queue_video") self.player.add(self.queue_video) self.input_type = 0 # Source selection self.source_pads = {} self.audio_pads = {} self.pip_pads = {} self.output_bins = {} type = 0 source_number = 0 pip_number = 0 self.pip = PictureInPicture() self.player.add(self.pip) for row in self.sources.get_store(): (name, source) = row element = source.create() self.player.add(element) if element.does_audio(): if not self.input_type & MEDIA_AUDIO: # The pipeline has audio sources, and this is the first # audio source we add if self.audio_source is None: self.emit("error", "You need to select an audio source") self.emit("stopped") return self.input_type |= MEDIA_AUDIO self.input_selector = gst.element_factory_make( "input-selector", "audio-selector" ) self.player.add(self.input_selector) audiobin = audioinputbin.AudioInputBin(source) self.player.add(audiobin) element.audio_pad.link(audiobin.get_static_pad("sink")) self.audio_pads[name] = \ self.input_selector.get_request_pad("sink%d") audiobin.src_pad.link(self.audio_pads[name]) if element.does_video(): self.input_type |= MEDIA_VIDEO self.source_pads[name] = source_number source_number = source_number + 1 # Thumbnail preview tee = gst.element_factory_make("tee", None) self.player.add(tee) element.video_pad.link(tee.sink_pads().next()) thumbnail_queue = gst.element_factory_make("queue", None) self.player.add(thumbnail_queue) self.thumbnails[name] = Preview(self) self.player.add(self.thumbnails[name]) thumbnail_err = gst.element_link_many( tee, thumbnail_queue, self.thumbnails[name] ) if thumbnail_err == False: self.emit("error", "Error conecting thumbnail preview.") # Picture in Picture self.pip_pads[name] = pip_number pip_number = pip_number + 1 main_queue = gst.element_factory_make("queue", None) self.player.add(main_queue) pip_queue = gst.element_factory_make("queue", None) self.player.add(pip_queue) tee.link(main_queue) tee.link(pip_queue) main_queue.src_pads().next().link(self.pip.get_request_pad_A()) pip_queue.src_pads().next().link(self.pip.get_request_pad_B()) if name == self.video_source: type |= element.get_type() if name == self.audio_source: type |= element.get_type() self.watermark = gst.element_factory_make( "cairoimageoverlay", "cairoimageoverlay" ) self.player.add(self.watermark) self.colorspace = gst.element_factory_make( "ffmpegcolorspace", "colorspace-imageoverlay-videobalance" ) self.player.add(self.colorspace) self.videobalance = gst.element_factory_make( "videobalance", "videobalance" ) self.player.add(self.videobalance) if self.videobalance_contrast: self.videobalance.set_property( "contrast", self.videobalance_contrast ) if self.videobalance_brightness: self.videobalance.set_property( "brightness", self.videobalance_brightness ) if self.videobalance_hue: self.videobalance.set_property( "hue", self.videobalance_hue ) if self.videobalance_saturation: self.videobalance.set_property( "saturation", self.videobalance_saturation ) gst.element_link_many( self.pip, self.watermark, self.colorspace, self.videobalance, self.queue_video ) self._switch_source() self._switch_pip() if self.pip_position: self.pip.set_property("position", self.pip_position) self.effect[MEDIA_VIDEO] = effect.video_effect.VideoEffect( self.effect_name[MEDIA_VIDEO] ) self.player.add(self.effect[MEDIA_VIDEO]) self.overlay = gst.element_factory_make("textoverlay", "overlay") self.overlay.set_property("font-desc", self.overlay_font) self.overlay.set_property("halign", self.halign) self.overlay.set_property("valign", self.valign) self.player.add(self.overlay) # microblog overlay self.microblog_overlay = gst.gst_parse_bin_from_description("ffmpegcolorspace name=_i ! rsvgoverlay name=rsvg ! queue name=_o", True) i = self.microblog_overlay.get_by_name("_i"); o = self.microblog_overlay.get_by_name("_o"); self.rsvg = self.microblog_overlay.get_by_name("rsvg"); self.player.add(self.microblog_overlay) gst.element_link_many( self.queue_video, self.effect[MEDIA_VIDEO], self.overlay, i, ) self.preview_tee = multeequeue.MulTeeQueue() self.player.add(self.preview_tee) self.microblog_overlay.link(self.preview_tee) if self.input_type & MEDIA_AUDIO: self.convert = gst.element_factory_make("audioconvert", "convert") self.player.add(self.convert) self.effect[MEDIA_AUDIO] = effect.audio_effect.AudioEffect( self.effect_name[MEDIA_AUDIO] ) self.player.add(self.effect[MEDIA_AUDIO]) self.audio_tee = gst.element_factory_make("tee", "audio_tee") self.player.add(self.audio_tee) self.volume = volume.Volume() self.player.add(self.volume) self.level = gst.element_factory_make("level", "audio_level") self.level.set_property("message", True) self.player.add(self.level) gst.element_link_many( self.input_selector, self.volume, self.level, self.effect[MEDIA_AUDIO], self.convert, self.audio_tee ) self.input_selector.set_property( "active-pad", self.audio_pads[self.audio_source] ) added_encoders = {} pip_width = 0 pip_height = 0 for row in self.outputs.get_store(): (name, output) = row output_bin = outputbin.OutputBin(output) self.output_bins[name] = output_bin self.player.add(output_bin) encoder_name = output.get_config()["parent"] encoder_item = self.encoders.get_item(encoder_name) if encoder_item is None: self.emit("error", "Please, add an encoder.") break if added_encoders.has_key(encoder_name): tee = added_encoders[encoder_name] tee.link(output_bin) else: tee = gst.element_factory_make("tee", None) self.player.add(tee) converter_item = encoder_item.parent converter = converter_item.create() if converter_item.config["width"] > pip_width: pip_width = converter_item.config["width"] if converter_item.config["height"] > pip_height: pip_height = converter_item.config["height"] self.player.add(converter) encoder = encoder_item.factory.create(type) if encoder.vorbisenc: self.metadata = metadata.Metadata(encoder.vorbisenc) self.metadata.set_tags(self.taglist) encoder.config(encoder_item.config) self.player.add(encoder) added_encoders[encoder_name] = tee self.preview_tee.get_src_pad().link( converter.sink_pads().next() ) gst.element_link_many( converter, encoder, tee, output_bin ) if self.input_type & MEDIA_AUDIO: audio_queue = gst.element_factory_make("queue", None) self.player.add(audio_queue) gst.element_link_many(self.audio_tee, audio_queue, encoder) if self.preview_enabled: self.preview = Preview(self) self.player.add(self.preview) self.preview_tee.get_src_pad().link(self.preview.sink_pads().next()) if pip_width == 0: pip_width = 320 pip_height = 240 self.pip.set_property("width", int(pip_width)) self.pip.set_property("height", int(pip_height)) self.video_width = int(pip_width) self.video_height = int(pip_height) self._set_watermark(self.video_width, self.video_height) self.overlay.set_property("text", self.overlay_text) if self.volume_value is not None: self.volume.set_property("volume", self.volume_value) self.emit("pipeline-ready") bus = self.player.get_bus() bus.add_signal_watch() bus.enable_sync_message_emission() bus.connect("message", self.on_message) bus.connect("sync-message::element", self.on_sync_message) cr = self.player.set_state(gst.STATE_PLAYING) if cr == gst.STATE_CHANGE_SUCCESS: self.emit("playing") elif cr == gst.STATE_CHANGE_ASYNC: self.pending_state = gst.STATE_PLAYING