def __init_pipeline(self):
        """Creates a gstreamer pipeline. Returns True on success."""

        if self.bin:
            return True

        pipeline = config.get("player", "gst_pipeline")
        pipeline, self._pipeline_desc = GStreamerSink(pipeline)
        if not pipeline:
            return False

        if self._use_eq and Gst.ElementFactory.find('equalizer-10bands'):
            # The equalizer only operates on 16-bit ints or floats, and
            # will only pass these types through even when inactive.
            # We push floats through to this point, then let the second
            # audioconvert handle pushing to whatever the rest of the
            # pipeline supports. As a bonus, this seems to automatically
            # select the highest-precision format supported by the
            # rest of the chain.
            filt = Gst.ElementFactory.make('capsfilter', None)
            filt.set_property('caps',
                              Gst.Caps.from_string('audio/x-raw,format=F32LE'))
            eq = Gst.ElementFactory.make('equalizer-10bands', None)
            self._eq_element = eq
            self.update_eq_values()
            conv = Gst.ElementFactory.make('audioconvert', None)
            resample = Gst.ElementFactory.make('audioresample', None)
            pipeline = [filt, eq, conv, resample] + pipeline

        # playbin2 has started to control the volume through pulseaudio,
        # which means the volume property can change without us noticing.
        # Use our own volume element for now until this works with PA.
        self._vol_element = Gst.ElementFactory.make('volume', None)
        pipeline.insert(0, self._vol_element)

        # Get all plugin elements and append audio converters.
        # playbin already includes one at the end
        plugin_pipeline = []
        for plugin in self._get_plugin_elements():
            plugin_pipeline.append(plugin)
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioconvert', None))
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioresample', None))
        pipeline = plugin_pipeline + pipeline

        bufbin = Gst.Bin()
        for element in pipeline:
            assert element is not None, pipeline
            bufbin.add(element)

        if len(pipeline) > 1:
            if not link_many(pipeline):
                print_w("Could not link GStreamer pipeline")
                self.__destroy_pipeline()
                return False

        # Test to ensure output pipeline can preroll
        bufbin.set_state(Gst.State.READY)
        result, state, pending = bufbin.get_state(timeout=STATE_CHANGE_TIMEOUT)
        if result == Gst.StateChangeReturn.FAILURE:
            bufbin.set_state(Gst.State.NULL)
            self.__destroy_pipeline()
            return False

        # Make the sink of the first element the sink of the bin
        gpad = Gst.GhostPad.new('sink', pipeline[0].get_static_pad('sink'))
        bufbin.add_pad(gpad)

        self.bin = Gst.ElementFactory.make('playbin', None)
        assert self.bin
        self.bin = BufferingWrapper(self.bin, self)
        self.__atf_id = self.bin.connect('about-to-finish',
            self.__about_to_finish)

        # set buffer duration
        duration = config.getfloat("player", "gst_buffer")
        self._set_buffer_duration(int(duration * 1000))

        # connect playbin to our pluing/volume/eq pipeline
        self.bin.set_property('audio-sink', bufbin)

        # by default playbin will render video -> suppress using fakesink
        fakesink = Gst.ElementFactory.make('fakesink', None)
        self.bin.set_property('video-sink', fakesink)

        # disable all video/text decoding in playbin
        GST_PLAY_FLAG_VIDEO = 1 << 0
        GST_PLAY_FLAG_TEXT = 1 << 2
        flags = self.bin.get_property("flags")
        flags &= ~(GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_TEXT)
        self.bin.set_property("flags", flags)

        # find the (uri)decodebin after setup and use autoplug-sort
        # to sort elements like decoders
        def source_setup(*args):
            def autoplug_sort(decode, pad, caps, factories):
                def set_prio(x):
                    i, f = x
                    i = {
                        "mad": -1,
                        "mpg123audiodec": -2
                    }.get(f.get_name(), i)
                    return (i, f)
                return zip(*sorted(map(set_prio, enumerate(factories))))[1]

            for e in iter_to_list(self.bin.iterate_recurse):
                try:
                    e.connect("autoplug-sort", autoplug_sort)
                except TypeError:
                    pass
                else:
                    break
        self.bin.connect("source-setup", source_setup)

        # ReplayGain information gets lost when destroying
        self.volume = self.volume

        bus = self.bin.get_bus()
        bus.add_signal_watch()
        self.__bus_id = bus.connect('message', self.__message, self._librarian)

        if self.song:
            self.bin.set_property('uri', self.song("~uri"))

        return True
Esempio n. 2
0
    def __init_pipeline(self):
        """Creates a gstreamer pipeline. Returns True on success."""

        if self.bin:
            return True

        # reset error state
        self.error = False

        pipeline = config.get("player", "gst_pipeline")
        print_d(f"User pipeline (from player.gst_pipeline): {pipeline!r}")
        try:
            pipeline, self._pipeline_desc = GStreamerSink(pipeline)
        except PlayerError as e:
            self._error(e)
            return False

        if self._use_eq and Gst.ElementFactory.find('equalizer-10bands'):
            # The equalizer only operates on 16-bit ints or floats, and
            # will only pass these types through even when inactive.
            # We push floats through to this point, then let the second
            # audioconvert handle pushing to whatever the rest of the
            # pipeline supports. As a bonus, this seems to automatically
            # select the highest-precision format supported by the
            # rest of the chain.
            print_d("Setting up Gstreamer equalizer")
            filt = self._make('capsfilter', None)
            filt.set_property('caps',
                              Gst.Caps.from_string('audio/x-raw,format=F32LE'))
            eq = self._make('equalizer-10bands', None)
            self._eq_element = eq
            self.update_eq_values()
            conv = self._make('audioconvert', None)
            resample = self._make('audioresample', None)
            pipeline = [filt, eq, conv, resample] + pipeline

        # playbin2 has started to control the volume through pulseaudio,
        # which means the volume property can change without us noticing.
        # Use our own volume element for now until this works with PA.
        self._int_vol_element = self._make('volume', None)
        pipeline.insert(0, self._int_vol_element)

        # Get all plugin elements and append audio converters.
        # playbin already includes one at the end
        plugin_pipeline = []
        for plugin in self._get_plugin_elements():
            plugin_pipeline.append(plugin)
            plugin_pipeline.append(self._make('audioconvert', None))
            plugin_pipeline.append(self._make('audioresample', None))
        print_d(f"GStreamer plugin pipeline: {plugin_pipeline}")
        pipeline = plugin_pipeline + pipeline

        bufbin = Gst.Bin()
        for element in pipeline:
            assert element is not None, pipeline
            bufbin.add(element)

        if len(pipeline) > 1:
            try:
                link_many(pipeline)
            except OSError as e:
                print_w("Linking the GStreamer pipeline failed")
                self._error(
                    PlayerError(
                        _("Could not create GStreamer pipeline (%s)" % e)))
                return False

        # see if the sink provides a volume property, if yes, use it
        sink_element = pipeline[-1]
        if isinstance(sink_element, Gst.Bin):
            sink_element = iter_to_list(sink_element.iterate_recurse)[-1]

        self._ext_vol_element = None
        if hasattr(sink_element.props, "volume"):
            self._ext_vol_element = sink_element

            # In case we use the sink volume directly we can increase buffering
            # without affecting the volume change delay too much and save some
            # CPU time... (2x default for now).
            if hasattr(sink_element.props, "buffer_time"):
                sink_element.set_property("buffer-time", 400000)

            def ext_volume_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "volume")

            self._ext_vol_element.connect("notify::volume", ext_volume_notify)

        self._ext_mute_element = None
        if hasattr(sink_element.props, "mute") and \
                sink_element.get_factory().get_name() != "directsoundsink":
            # directsoundsink has a mute property but it doesn't work
            # https://bugzilla.gnome.org/show_bug.cgi?id=755106
            self._ext_mute_element = sink_element

            def mute_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "mute")

            self._ext_mute_element.connect("notify::mute", mute_notify)

        # Make the sink of the first element the sink of the bin
        gpad = Gst.GhostPad.new('sink', pipeline[0].get_static_pad('sink'))
        bufbin.add_pad(gpad)

        bin_ = self._make('playbin', None)
        assert bin_

        self.bin = BufferingWrapper(bin_, self)
        self._seeker = Seeker(self.bin, self)

        bus = bin_.get_bus()
        bus.add_signal_watch()
        self.__bus_id = bus.connect('message', self.__message, self._librarian)

        self.__atf_id = self.bin.connect('about-to-finish',
                                         self.__about_to_finish)

        # set buffer duration
        duration = config.getfloat("player", "gst_buffer")
        self._set_buffer_duration(int(duration * 1000))

        # connect playbin to our plugin / volume / EQ pipeline
        self.bin.set_property('audio-sink', bufbin)

        # by default playbin will render video -> suppress using fakesink
        vsink = self._make(AudioSinks.FAKE.value, None)
        self.bin.set_property('video-sink', vsink)

        # disable all video/text decoding in playbin
        GST_PLAY_FLAG_VIDEO = 1 << 0
        GST_PLAY_FLAG_TEXT = 1 << 2
        flags = self.bin.get_property("flags")
        flags &= ~(GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_TEXT)
        self.bin.set_property("flags", flags)

        if not self.has_external_volume:
            # Restore volume/ReplayGain and mute state
            self.props.volume = self._volume
            self.mute = self._mute

        # ReplayGain information gets lost when destroying
        self._reset_replaygain()

        if self.song:
            self._set_uri(self.song("~uri"))

        return True
Esempio n. 3
0
    def __init__(self):
        window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
        window.set_title("Videocorum")
        window.set_default_size(500, 400)
        window.connect("destroy", self.main_quit, "WM destroy")
        

        vbox = Gtk.VBox(False, 0)
        window.add(vbox)
        
        #Menu Bar:
        
        menu_bar = Gtk.MenuBar()
        hbox = Gtk.HBox()
        hbox.add(menu_bar)
        vbox.pack_start(hbox, False, False, 0)
        menu_bar.show()
        
        menu_bar.append(self.file_submenu())
        menu_bar.append(self.subtitles_submenu())
        menu_bar.append(self.generate_dummy_list_items("Settings"))
        menu_bar.append(self.generate_dummy_list_items("Help"))

        hbox = Gtk.HBox()
        vbox.pack_start(hbox, False, False, 0)
        self.movie_window = Gtk.DrawingArea()
        vbox.add(self.movie_window)
       
        hbox = Gtk.HBox()
        self.subtitle_box = Gtk.Label(" ")
        self.subtitle_box.show()
        hbox.add(self.subtitle_box)
        vbox.pack_start(hbox, False, False, 0)

        # SEEK BAR
        
        #creating a slider and calculating its range      
        self.slider = Gtk.Scale.new_with_range(Gtk.Orientation.HORIZONTAL, 0, 1000, 1)
        self.slider.set_draw_value(False)
        self.slider_handler_id = self.slider.connect("value-changed", self.on_slider_seek)
        # self.box.pack_start(self.slider, True, True, 0)
        hbox_slider = Gtk.HBox()
        hbox_slider.add(self.slider)
        vbox.pack_start(hbox_slider, False, False, 0)

        hbox = Gtk.HBox()
        toolbar = Gtk.Toolbar()
        hbox.add(toolbar)
        self.time_label = Gtk.Label()
        self.time_label.set_text("00:00 / 00:00")
        
        #TASK BAR: PLAY, PAUSE, STOP, FORWARD, REWIND, VOLUME:

        self.play_toolbutton = Gtk.ToolButton()
        self.play_toolbutton.set_label("gtk-media-pause")
        self.play_toolbutton.set_icon_name("gtk-media-pause")
        self.play_toolbutton.connect("clicked", self.action_pause)
        toolbar.add(self.play_toolbutton)

        self.rewind_toolbutton = Gtk.ToolButton()
        self.rewind_toolbutton.set_icon_name("gtk-media-rewind")
        self.rewind_toolbutton.connect("clicked", self.rewind_callback)        
        toolbar.add(self.rewind_toolbutton)

        self.stop_toolbutton = Gtk.ToolButton()
        self.stop_toolbutton.set_label("gtk-media-stop")
        self.stop_toolbutton.set_icon_name("gtk-media-stop")
        self.stop_toolbutton.connect("clicked", self.action_stop)
        toolbar.add(self.stop_toolbutton)

        self.forward_toolbutton = Gtk.ToolButton()
        self.forward_toolbutton.set_icon_name("gtk-media-forward")
        self.forward_toolbutton.connect("clicked", self.forward_callback)
        toolbar.add(self.forward_toolbutton)

        self.playback = Gtk.Label()
        self.playback.set_text("Playback Speed (1x):")
        hbox.pack_start(self.playback, False, False, 4)

        self.fast_button = Gtk.Button()
        self.fast_button.set_label("+")
        self.fast_button.connect("clicked", self.fast_callback)
        hbox.pack_start(self.fast_button, False, False, 4)

        self.slow_button = Gtk.Button()
        self.slow_button.set_label("-")
        self.slow_button.connect("clicked", self.slow_callback)
        hbox.pack_start(self.slow_button, False, False, 4)


        sink = "autoaudiosink"
        bin = Gst.Bin()
        self.speedchanger = Gst.ElementFactory.make("pitch")
        if self.speedchanger is None:
            print ("You need to install the Gstreamer soundtouch elements for "
                    "play it slowly to. They are part of Gstreamer-plugins-bad. Consult the "
                    "README if you need more information.")
        bin.add(self.speedchanger)
        self.audiosink = Gst.parse_launch(sink)

        bin.add(self.audiosink)
        convert = Gst.ElementFactory.make("audioconvert")
        bin.add(convert)
        self.speedchanger.link(convert)
        convert.link(self.audiosink)
        sink_pad = Gst.GhostPad.new("sink", self.speedchanger.get_static_pad("sink"))
        bin.add_pad(sink_pad)


        self.player = Gst.ElementFactory.make("playbin", "player")
        if len(sys.argv) == 2:
            self.player.set_state(Gst.State.NULL)
            self.player.set_property("uri", "file:///"+sys.argv[1])
            self.filename = "file:///" + sys.argv[1]
            self.player.set_property("audio-sink", bin)
            self.speedchanger.set_property("pitch", 0)
            self.player.set_state(Gst.State.PLAYING)
            widget = 1
            GLib.timeout_add(1000, self.update_slider, widget)
        else:
            self.player.set_property("audio-sink", bin)
            self.speedchanger.set_property("pitch", 0)
        
        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)


        self.volume_button = Gtk.VolumeButton()
        self.volume_button.connect("value-changed", self.change_volume)
        self.volume_button.set_value(1)
        hbox.pack_end(self.volume_button, False, False, 10)
        hbox.pack_end(self.time_label, False, False, 4)
        vbox.pack_start(hbox, False, False, 0)

        hbox = Gtk.HBox()
        self.filename_display = Gtk.Label()
        hbox.pack_start(self.filename_display, False, False, 0)
        vbox.pack_start(hbox, False, False, 0)
        try:
            self.filename_display.set_text(self.filename.split('/')[-1])
        except:
            pass


        self.pbRate = 1
        self.gen = None
        
        window.show_all()
    def set_pipeline(self):
        """
        Crea el pipe para grabar desde x y autoaudio.
        """

        if self.pipeline:
            del (self.pipeline)

        self.pipeline = Gst.Pipeline()

        screen = GdkX11.X11Screen()
        width = int(screen.width())
        height = int(screen.height())

        # >>> Video
        print "Grabando un Escritorio de:", "%sx%s" % (width, height)

        ximagesrc = Gst.ElementFactory.make('ximagesrc', "ximagesrc")
        #self.ximagesrc.set_property("screen-num", self.screen.get_screen_number())
        #self.ximagesrc.set_property('use-damage', False)
        ximagesrc.set_property('startx', 0)
        ximagesrc.set_property('endx',
                               200)  #ximagesrc.set_property('endx', width)
        ximagesrc.set_property('starty', 0)
        ximagesrc.set_property('endy',
                               100)  #ximagesrc.set_property('endy', height)

        que_encode_video = Gst.ElementFactory.make("queue", "que_encode_video")
        '''
        que_encode_video.set_property('max-size-buffers', 1000)
        que_encode_video.set_property('max-size-bytes', 0)
        que_encode_video.set_property('max-size-time', 0)'''

        videoscale = Gst.ElementFactory.make('videoscale', 'videoscale')
        video_capsfilter = Gst.ElementFactory.make("capsfilter", "scalecaps")
        scalecaps = Gst.Caps()
        scalecaps.from_string("video/x-raw-yuv,width=640,height=480")
        video_capsfilter.set_property("caps", scalecaps)

        videoconvert = Gst.ElementFactory.make('videoconvert', 'videoconvert')

        theoraenc = Gst.ElementFactory.make('theoraenc', 'theoraenc')
        '''
        theoraenc.set_property("bitrate", 1024) # kbps compresion + resolucion = calidad
        theoraenc.set_property('keyframe-freq', 15)
        theoraenc.set_property('cap-overflow', False)
        theoraenc.set_property('speed-level', 0)
        theoraenc.set_property('cap-underflow', True)
        theoraenc.set_property('vp3-compatible', True)'''

        que_video_mux = Gst.ElementFactory.make('queue', "que_video_mux")
        '''
        que_video_mux.set_property('max-size-buffers', 12000)
        que_video_mux.set_property('max-size-bytes', 0)
        que_video_mux.set_property('max-size-time', 0)'''

        videobin = Gst.Bin()

        videobin.add(ximagesrc)
        videobin.add(videoconvert)
        videobin.add(videoscale)
        videobin.add(video_capsfilter)
        #videobin.add(que_encode_video)
        videobin.add(theoraenc)
        #videobin.add(que_video_mux)

        ximagesrc.link(videoconvert)
        videoconvert.link(videoscale)
        videoscale.link(video_capsfilter)
        #video_capsfilter.link(que_encode_video)
        video_capsfilter.link(theoraenc)
        #theoraenc.link(que_video_mux)

        #pad = que_encode_video.get_static_pad("sink")
        #videobin.add_pad(Gst.GhostPad.new("sink", pad))
        pad = theoraenc.get_static_pad("src")
        videobin.add_pad(Gst.GhostPad.new("src", pad))
        print "\tvideobin:"
        print videobin.children
        # <<< Video

        # >>> Audio
        autoaudiosrc = Gst.ElementFactory.make('autoaudiosrc', "autoaudiosrc")
        audioconvert = Gst.ElementFactory.make('audioconvert', "audioconvert")
        vorbisenc = Gst.ElementFactory.make('vorbisenc', "vorbisenc")

        que_audio_mux = Gst.ElementFactory.make('queue', "que_audio_mux")
        '''
        que_audio_mux.set_property('max-size-buffers', 5000)
        que_audio_mux.set_property('max-size-bytes', 0)
        que_audio_mux.set_property('max-size-time', 0)'''

        audiobin = Gst.Bin()

        audiobin.add(autoaudiosrc)
        audiobin.add(audioconvert)
        audiobin.add(vorbisenc)
        audiobin.add(que_audio_mux)

        autoaudiosrc.link(audioconvert)
        audioconvert.link(vorbisenc)
        vorbisenc.link(que_audio_mux)

        pad = que_audio_mux.get_static_pad("src")
        audiobin.add_pad(Gst.GhostPad.new("src", pad))
        print "\taudiobin:"
        print audiobin.children
        # <<< Audio

        oggmux = Gst.ElementFactory.make('oggmux', "oggmux")
        oggmux.set_property("skeleton", True)

        sink = Gst.ElementFactory.make('filesink', "archivo")
        sink.set_property("location", self.file_path)

        self.pipeline.add(videobin)
        self.pipeline.add(audiobin)
        self.pipeline.add(oggmux)
        self.pipeline.add(sink)

        audiobin.link(oggmux)
        videobin.link(oggmux)
        oggmux.link(sink)

        print "\tself.pipeline:"
        print self.pipeline.children
        '''
        fakesink = Gst.ElementFactory.make('fakesink', "fakesink")
        self.pipeline.add(videobin)
        self.pipeline.add(fakesink)
        videobin.link(fakesink)'''

        self.bus = self.pipeline.get_bus()
        self.bus.enable_sync_message_emission()
        self.bus.add_signal_watch()

        self.bus.connect("sync-message::element", self.on_sync_message)
        self.bus.connect("message", self.on_message)
Esempio n. 5
0
    def build_pipeline(self):
        sink = 'xvimagesink'
        if config.data.player['vout'] == 'x11':
            sink = 'ximagesink'
        elif config.data.player['vout'] == 'gl':
            sink = 'glimagesink'
        if config.data.os == 'win32':
            sink = 'd3dvideosink'

        self.player = Gst.ElementFactory.make("playbin", "player")

        self.video_sink = Gst.Bin()

        # TextOverlay does not seem to be present in win32 installer. Do without it.
        try:
            self.captioner = Gst.ElementFactory.make('textoverlay',
                                                     'captioner')
            # FIXME: move to config.data
            self.captioner.props.font_desc = 'Sans 24'
            #self.caption.props.text="Foobar"
        except:
            self.captioner = None

        self.imageoverlay = None
        if config.data.player['svg'] and svgelement:
            try:
                self.imageoverlay = Gst.ElementFactory.make(
                    svgelement, 'overlay')
                self.imageoverlay.props.fit_to_frame = True
            except:
                logger.error("Gstreamer SVG overlay element is not available",
                             exc_info=True)

        self.imagesink = Gst.ElementFactory.make(sink, 'sink')
        try:
            self.imagesink.set_property('force-aspect-ratio', True)
        except TypeError:
            logger.warn("Cannot set force-aspect-ratio on video sink")

        elements = []
        elements.append(Gst.ElementFactory.make('videoconvert', None))
        elements.append(Gst.ElementFactory.make('videoscale', None))
        if self.imageoverlay is not None:
            # FIXME: Issue: rsvgoverlay.fit_to_frame expects that the
            # dimensions of the input buffers match the aspect ratio
            # of the original video, which is currently not the case.
            elements.append(Gst.ElementFactory.make('queue', None))
            elements.append(self.imageoverlay)
        if self.captioner is not None:
            elements.append(self.captioner)

        csp = Gst.ElementFactory.make('videoconvert', None)
        elements.extend((csp, self.imagesink))

        for el in elements:
            self.video_sink.add(el)
        if len(elements) >= 2:
            for src, dst in zip(elements, elements[1:]):
                src.link(dst)

        self.log("using " + sink)

        # Note: it is crucial to make ghostpad an attribute, so that
        # it is not garbage-collected at the end of the build_pipeline
        # method.
        self._video_ghostpad = Gst.GhostPad.new(
            'sink', elements[0].get_static_pad('video_sink')
            or elements[0].get_static_pad('sink'))
        # Idem for elements
        self._video_elements = elements

        logger.debug("Using video sink pipeline %s", self._video_elements)
        self.video_sink.add_pad(self._video_ghostpad)

        self.player.props.video_sink = self.video_sink
        self.player.props.force_aspect_ratio = True

        self.audio_sink = Gst.parse_launch(
            'scaletempo name=scaletempo ! audioconvert ! audioresample ! autoaudiosink'
        )
        self.audio_sink.add_pad(
            Gst.GhostPad.new(
                'sink',
                self.audio_sink.get_child_by_name('scaletempo').get_static_pad(
                    'sink')))
        self.player.props.audio_sink = self.audio_sink

        bus = self.player.get_bus()
        bus.enable_sync_message_emission()
        bus.connect('sync-message::element', self.on_sync_message)
        bus.add_signal_watch()
        bus.connect('message::error', self.on_bus_message_error)
        bus.connect('message::warning', self.on_bus_message_warning)
Esempio n. 6
0
  def __init__(self,myplaylist=None,loop=None,starttoplay=False,myaudiosink=None):
    self.playlist=myplaylist
    #self.player = gst.element_factory_make("playbin2", "playbin2")
    Gst.init(None)
    self.player = Gst.ElementFactory.make("playbin", None)
    self.playmode = "Stopped"
    self.recoverplaymode = "Stopped"
    self.statuschanged = False
    self.starttoplay=starttoplay
    self.loop=loop

    if self.player is None:
        logging.error( "creating player")
        raise Exception("cannot create player!")

    #fakesink = gst.element_factory_make("fakesink", "fakesink")
    fakesink = Gst.ElementFactory.make("fakesink", None)
    self.player.set_property("video-sink", fakesink)

    ##icecast
    #print "Icecast selected"
    #bin = gst.Bin("my-bin")

    #audioconvert = gst.element_factory_make("audioconvert")
    #bin.add(audioconvert)
    #pad = audioconvert.get_pad("sink")
    #ghostpad = gst.GhostPad("sink", pad)
    #bin.add_pad(ghostpad)

    #audioresample = gst.element_factory_make("audioresample")
    #audioresample.set_property("quality", 0)
    #bin.add(audioresample)
    #capsfilter = gst.element_factory_make('capsfilter')
    #capsfilter.set_property('caps', gst.caps_from_string('audio/x-raw,rate=44100,channels=2'))
    ##bin.add(capsfilter)
    #vorbisenc = gst.element_factory_make("vorbisenc")
    #vorbisenc.set_property("quality", 0)
    #bin.add(vorbisenc)
    #oggmux = gst.element_factory_make("oggmux")
    #bin.add(oggmux)

    #streamsink = gst.element_factory_make("shout2send", "streamsink")
    #streamsink.set_property("ip", "localhost")
    ##streamsink.set_property("username", "source")
    #streamsink.set_property("password", "ackme")
    #streamsink.set_property("port", 8000)
    #streamsink.set_property("mount", "/myradio.ogg")
    #bin.add(streamsink)

    ### Link the elements
    #queue = gst.element_factory_make("queue", "queue")
    ##queue.link(audioresample, capsfilter)
    #bin.add(queue)

    #gst.element_link_many(audioconvert,audioresample,queue,vorbisenc,oggmux,streamsink)
    #self.player.set_property("audio-sink", bin)


    #audiosink = gst.element_factory_make("autoaudiosink")
    #audiosink = gst.element_factory_make("jackaudiosink")


    # ReplayGain
    if (Gst.ElementFactory.find('rgvolume') and
        Gst.ElementFactory.find('rglimiter')):
      self.audioconvert = Gst.ElementFactory.make('audioconvert',None)

      self.rgvolume = Gst.ElementFactory.make('rgvolume',None)
      self.rgvolume.set_property('album-mode', False)
      self.rgvolume.set_property('pre-amp', 0)
      self.rgvolume.set_property('fallback-gain', 0)

      self.rgvolume.set_property('headroom',0)
      self.rgvolume.set_property('pre-amp',0)

      self.rglimiter = Gst.ElementFactory.make('rglimiter',None)
      self.rglimiter.set_property('enabled', True)

      self.rgfilter = Gst.Bin()
      self.rgfilter.add(self.rgvolume)
      self.rgfilter.add(self.rglimiter)
      self.rgvolume.link(self.rglimiter)
      self.rgfilter.add_pad(Gst.GhostPad.new('sink',
                self.rgvolume.get_static_pad('sink')))
      self.rgfilter.add_pad(Gst.GhostPad.new('src',
                self.rglimiter.get_static_pad('src')))
      try:
        self.player.set_property('audio-filter', self.rgfilter)
      except:
        logging.error( "setting replaygain player")
        #raise Exception("cannot manage replaygain!")
        

#    TODO replaygain
#+++++++
#
#Example 40
#
#From project rhythmbox-multiple-libraries, under directory plugins/replaygain/replaygain, in source file player.py.
#
#def setup_playbin2_mode(self):
#		print "using output filter for rgvolume and rglimiter"
#		self.rgvolume = gst.element_factory_make("rgvolume")
#		self.rgvolume.connect("notify::target-gain", self.playbin2_target_gain_cb)
#		self.rglimiter = gst.element_factory_make("rglimiter")
#
#		# on track changes, we need to reset the rgvolume state, otherwise it
#		# carries over the tags from the previous track
#		self.pec_id = self.shell_player.connect('playing-song-changed', self.playing_entry_changed)
#
#		# watch playbin2's uri property to see when a new track is opened
#		playbin = self.player.props.playbin
#		if playbin is None:
#			self.player.connect("notify::playbin", self.playbin2_notify_cb)
#		else:
#			playbin.connect("notify::uri", self.playbin2_uri_notify_cb)
#
#		self.rgfilter = gst.Bin()
#		self.rgfilter.add(self.rgvolume, self.rglimiter)
#		self.rgvolume.link(self.rglimiter)
#		self.rgfilter.add_pad(gst.GhostPad("sink", self.rgvolume.get_static_pad("sink")))
#		self.rgfilter.add_pad(gst.GhostPad("src", self.rglimiter.get_static_pad("src")))
#		self.player.add_filter(self.rgfilter)
#
#+++++++++

    if myaudiosink is None: myaudiosink = "autoaudiosink"
    audiosink = Gst.ElementFactory.make(myaudiosink,None)
    self.player.set_property("audio-sink", audiosink)

#
#    self.player.set_property("audio-sink", streamsink)

    bus = self.player.get_bus()
    bus.add_signal_watch()
#    bus.connect("message",                self.on_message)
    bus.connect('message::eos',           self.on_message_eos)
    bus.connect('message::error',         self.on_message_error)
    bus.connect("message::state-changed", self.on_message_state_changed)
Esempio n. 7
0
    def __init_pipeline(self):
        """Creates a gstreamer pipeline. Returns True on success."""

        if self.bin:
            return True

        # reset error state
        self.error = False

        pipeline = config.get("player", "gst_pipeline")
        try:
            pipeline, self._pipeline_desc = GStreamerSink(pipeline)
        except PlayerError as e:
            self._error(e)
            return False

        if self._use_eq and Gst.ElementFactory.find('equalizer-10bands'):
            # The equalizer only operates on 16-bit ints or floats, and
            # will only pass these types through even when inactive.
            # We push floats through to this point, then let the second
            # audioconvert handle pushing to whatever the rest of the
            # pipeline supports. As a bonus, this seems to automatically
            # select the highest-precision format supported by the
            # rest of the chain.
            filt = Gst.ElementFactory.make('capsfilter', None)
            filt.set_property('caps',
                              Gst.Caps.from_string('audio/x-raw,format=F32LE'))
            eq = Gst.ElementFactory.make('equalizer-10bands', None)
            self._eq_element = eq
            self.update_eq_values()
            conv = Gst.ElementFactory.make('audioconvert', None)
            resample = Gst.ElementFactory.make('audioresample', None)
            pipeline = [filt, eq, conv, resample] + pipeline

        # playbin2 has started to control the volume through pulseaudio,
        # which means the volume property can change without us noticing.
        # Use our own volume element for now until this works with PA.
        self._int_vol_element = Gst.ElementFactory.make('volume', None)
        pipeline.insert(0, self._int_vol_element)

        # Get all plugin elements and append audio converters.
        # playbin already includes one at the end
        plugin_pipeline = []
        for plugin in self._get_plugin_elements():
            plugin_pipeline.append(plugin)
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioconvert', None))
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioresample', None))
        pipeline = plugin_pipeline + pipeline

        bufbin = Gst.Bin()
        for element in pipeline:
            assert element is not None, pipeline
            bufbin.add(element)

        PIPELINE_ERROR = PlayerError(_("Could not create GStreamer pipeline"))

        if len(pipeline) > 1:
            if not link_many(pipeline):
                print_w("Linking the GStreamer pipeline failed")
                self._error(PIPELINE_ERROR)
                return False

        # Test to ensure output pipeline can preroll
        bufbin.set_state(Gst.State.READY)
        result, state, pending = bufbin.get_state(timeout=STATE_CHANGE_TIMEOUT)
        if result == Gst.StateChangeReturn.FAILURE:
            bufbin.set_state(Gst.State.NULL)
            print_w("Prerolling the GStreamer pipeline failed")
            self._error(PIPELINE_ERROR)
            return False

        # see if the sink provides a volume property, if yes, use it
        sink_element = pipeline[-1]
        if isinstance(sink_element, Gst.Bin):
            sink_element = iter_to_list(sink_element.iterate_recurse)[-1]

        self._ext_vol_element = None
        if hasattr(sink_element.props, "volume"):
            self._ext_vol_element = sink_element

            # In case we use the sink volume directly we can increase buffering
            # without affecting the volume change delay too much and safe some
            # CPU time... (2x default for now).
            if hasattr(sink_element.props, "buffer_time"):
                sink_element.set_property("buffer-time", 400000)

            def ext_volume_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "volume")

            self._ext_vol_element.connect("notify::volume", ext_volume_notify)

        self._ext_mute_element = None
        if hasattr(sink_element.props, "mute") and \
                sink_element.get_factory().get_name() != "directsoundsink":
            # directsoundsink has a mute property but it doesn't work
            # https://bugzilla.gnome.org/show_bug.cgi?id=755106
            self._ext_mute_element = sink_element

            def mute_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "mute")

            self._ext_mute_element.connect("notify::mute", mute_notify)

        # Make the sink of the first element the sink of the bin
        gpad = Gst.GhostPad.new('sink', pipeline[0].get_static_pad('sink'))
        bufbin.add_pad(gpad)

        self.bin = Gst.ElementFactory.make('playbin', None)
        assert self.bin

        bus = self.bin.get_bus()
        bus.add_signal_watch()
        self.__bus_id = bus.connect('message', self.__message, self._librarian)

        self.bin = BufferingWrapper(self.bin, self)
        self.__atf_id = self.bin.connect('about-to-finish',
                                         self.__about_to_finish)

        # set buffer duration
        duration = config.getfloat("player", "gst_buffer")
        self._set_buffer_duration(int(duration * 1000))

        # connect playbin to our pluing/volume/eq pipeline
        self.bin.set_property('audio-sink', bufbin)

        # by default playbin will render video -> suppress using fakesink
        fakesink = Gst.ElementFactory.make('fakesink', None)
        self.bin.set_property('video-sink', fakesink)

        # disable all video/text decoding in playbin
        GST_PLAY_FLAG_VIDEO = 1 << 0
        GST_PLAY_FLAG_TEXT = 1 << 2
        flags = self.bin.get_property("flags")
        flags &= ~(GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_TEXT)
        self.bin.set_property("flags", flags)

        # find the (uri)decodebin after setup and use autoplug-sort
        # to sort elements like decoders
        def source_setup(*args):
            def autoplug_sort(decode, pad, caps, factories):
                def set_prio(x):
                    i, f = x
                    i = {"mad": -1, "mpg123audiodec": -2}.get(f.get_name(), i)
                    return (i, f)

                return zip(*sorted(map(set_prio, enumerate(factories))))[1]

            for e in iter_to_list(self.bin.iterate_recurse):
                try:
                    e.connect("autoplug-sort", autoplug_sort)
                except TypeError:
                    pass
                else:
                    break

        self.bin.connect("source-setup", source_setup)

        if self.has_external_volume:
            # ReplayGain information gets lost when destroying
            self._reset_replaygain()
        else:
            # Restore volume/ReplayGain and mute state
            self.volume = self._volume
            self.mute = self._mute

        if self.song:
            self.bin.set_property('uri', self.song("~uri"))

        return True
Esempio n. 8
0
vconvert1 = Gst.ElementFactory.make("videoconvert", 'vconvert1')

filter2 = Gst.ElementFactory.make("capsfilter", 'filter2')
filter2.set_property(
    'caps',
    Gst.Caps.from_string("video/x-raw,format=I420,width=640,height=480"))

vconvert2 = Gst.ElementFactory.make("videoconvert", 'vconvert2')

vsink = Gst.ElementFactory.make("xvimagesink")

#vsink.set_property('fullscreen', True)
# create the pipeline

p = Gst.Bin('happybin')
p.add(newElement)
p.add(vconvert1)
p.add(filter2)
p.add(vconvert2)
p.add(vsink)

newElement.link(vconvert1)
vconvert1.link(filter2)
filter2.link(vconvert2)
vconvert2.link(vsink)

p.add_pad(Gst.GhostPad.new('sink', newElement.get_static_pad('sink')))

playbin = Gst.ElementFactory.make("playbin")
Esempio n. 9
0
    def _create_video_encode_bin(self, video_config):
        videoencodebin = Gst.Bin()
        outcaps = None
        if video_config.codec == 'H264':
            encoder = Gst.ElementFactory.make("x264enc", None)
            encoder.set_property('bitrate', video_config.bitrate)
            encoder.set_property('pass', 0)
            caps_str = "video/x-h264,stream-format=avc"
            if video_config.profile:
                caps_str += ',profile=%s' % video_config.profile
            outcaps = Gst.Caps.from_string(caps_str)
            outcaps = None
        else:
            raise Exception("Unknown encoder %s", video_config.codec)
        scale = Gst.ElementFactory.make("videoscale", None)
        colorspace = Gst.ElementFactory.make("videoconvert", None)
        framerate = Gst.ElementFactory.make("videorate", None)
        timeoverlay = Gst.ElementFactory.make("timeoverlay", None)
        timeoverlay = Gst.ElementFactory.make("timeoverlay", None)
        textoverlay = Gst.ElementFactory.make("textoverlay", None)
        incapsfilter = Gst.ElementFactory.make("capsfilter", None)
        outcapsfilter = Gst.ElementFactory.make("capsfilter", None)
        muxer = Gst.ElementFactory.make("mp4dashmux", None)

        caps_str = 'video/x-raw'
        for f in ['width', 'height']:
            v = getattr(video_config, f)
            if v is not None:
                caps_str += ',%s=%s' % (f, v)
        incaps = Gst.Caps.from_string(caps_str)
        incapsfilter.set_property('caps', incaps)
        if outcaps:
            outcapsfilter.set_property('caps', outcaps)
        elements = [
            scale, colorspace, framerate, incapsfilter, timeoverlay,
            textoverlay, encoder, outcapsfilter, muxer
        ]
        for e in elements:
            videoencodebin.add(e)
        for i in range(len(elements) - 1):
            elements[i].link(elements[i + 1])

        if self.config.overlay_stream_desc:
            textoverlay.set_property("font-desc", "DejaVu Sans Mono, 15")
            textoverlay.set_property("valignment", "center")
            textoverlay.set_property("halignment", "left")
            if video_config.height:
                textoverlay.set_property("deltay", -(video_config.height / 10))
            else:
                textoverlay.set_property("deltay", -50)
            textoverlay.set_property(
                "text", "%s %sx%s" %
                (self.config.title, video_config.width, video_config.height))
        else:
            textoverlay.set_property('silent', True)
        if self.config.overlay_timestamps:
            timeoverlay.set_property("font-desc", "DejaVu Sans Mono, 15")
            timeoverlay.set_property("valignment", "center")
            timeoverlay.set_property("halignment", "left")
            timeoverlay
        else:
            timeoverlay.set_property('silent', True)

        inpad = Gst.GhostPad.new('sink', scale.get_static_pad('sink'))
        outpad = Gst.GhostPad.new('src', muxer.get_static_pad('src'))
        videoencodebin.add_pad(inpad)
        videoencodebin.add_pad(outpad)
        return videoencodebin
Esempio n. 10
0
    elif t == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        log.error(err)
        log.debug(debug)
        emit_event("error", err)
    elif t == Gst.MessageType.STATE_CHANGED:
        state = get_gst_player_state()
        if state == Gst.State.PLAYING or state == Gst.State.PAUSED:
            auto_jump()


__player = Gst.ElementFactory.make("playbin", "player")
__scaletempo = Gst.ElementFactory.make("scaletempo", "scaletempo")
__scaletempo.sync_state_with_parent()

__audiobin = Gst.Bin("audioline")
__audiobin.add(__scaletempo)

__audiosink = Gst.ElementFactory.make("autoaudiosink", "audiosink")
__audiobin.add(__audiosink)

__scaletempo.link(__audiosink)
__pad = __scaletempo.get_static_pad("sink")
__audiobin.add_pad(Gst.GhostPad("sink", __pad))

__player.set_property("audio-sink", __audiobin)

__bus = __player.get_bus()
__bus.add_signal_watch()
__bus.connect("message", __on_gst_message)
__current_track = None
Esempio n. 11
0
    def _make_audio_pipeline(self):
        # Make two - one for URL playing, and one for content we already have
        
        self.audio_player = Gst.ElementFactory.make("playbin", "player")

        # now fit an equalizer into that playbin
        
        equalizer = Gst.ElementFactory.make("equalizer-3bands", "equalizer")
        convert = Gst.ElementFactory.make("audioconvert", "convert")
        
        asink = Gst.ElementFactory.make("autoaudiosink", "audio_sink")

        audiobin = Gst.Bin("audio_sink_bin")
        audiobin.add(equalizer)
        audiobin.add(convert)
        audiobin.add(asink)

        equalizer.link(convert)
        convert.link(asink)

        ghost_pad = Gst.GhostPad.new("sink",
                                     Gst.Element.get_static_pad(equalizer, "sink"))
        ghost_pad.set_active(True)
        audiobin.add_pad(ghost_pad)
        
        self.audio_player.set_property('audio-sink', audiobin)

        bus = self.audio_player.get_bus()
        bus.enable_sync_message_emission()
        bus.add_signal_watch()
        bus.connect('message::tag', self.on_tag)
        bus.connect('message::error', self.on_error)
        bus.connect('message::eos', self.on_eos, self.audio_player)
        bus.connect('message::buffering', self.on_buffering)
        bus.connect('message::state-changed', self.on_state_changed)

        pipeline = Gst.Pipeline("audio_pipeline")
        src = Gst.ElementFactory.make("appsrc")
        mad = Gst.ElementFactory.make("mad")
        convert = Gst.ElementFactory.make("audioconvert")
        volume = Gst.ElementFactory.make("volume")
        sink = Gst.ElementFactory.make("alsasink")

        pipeline.add(src)
        pipeline.add(mad)
        pipeline.add(convert)
        pipeline.add(volume)
        pipeline.add(sink)

        src.link(mad)
        mad.link(convert)
        convert.link(volume)
        volume.link(sink)

        bus = pipeline.get_bus()
        bus.enable_sync_message_emission()
        bus.add_signal_watch()
        bus.connect('message::tag', self.on_tag)
        bus.connect('message::error', self.on_error)
        bus.connect('message::eos', self.on_eos, pipeline)
        bus.connect('message::buffering', self.on_buffering)
        bus.connect('message::state-changed', self.on_state_changed)

        pipeline.token = ''
        
        self.audio_source = src
        self.audio_pipeline = pipeline
Esempio n. 12
0
    def init_core(self):
        #                                Song object            display text  icon  album art
        self.songs_model = Gtk.ListStore(GObject.TYPE_PYOBJECT, str,          str,  GdkPixbuf.Pixbuf)
        #                                   Station object         station name  index
        self.stations_model = Gtk.ListStore(GObject.TYPE_PYOBJECT, str,          int)

        Gst.init(None)
        self._query_duration = Gst.Query.new_duration(Gst.Format.TIME)
        self._query_position = Gst.Query.new_position(Gst.Format.TIME)
        self.player = Gst.ElementFactory.make("playbin", "player")

        # split the stream out for saving
        # https://wiki.ubuntu.com/Novacut/GStreamer1.0#Examples
        split = Gst.ElementFactory.make("tee")

        sink_bin = Gst.Bin()
        sink_bin.add(split)

        sink_bin.add_pad(Gst.GhostPad.new("sink", split.get_static_pad("sink")))

        self.player.set_property("audio-sink", sink_bin) # comment out this line to switch back to not saving the stream

        qrep = Gst.ElementFactory.make("queue")
        rep = Gst.ElementFactory.make("autoaudiosink")
        sink_bin.add(qrep)
        sink_bin.add(rep)
        split.link(qrep)
        qrep.link(rep)

        # http://gstreamer.freedesktop.org/documentation/plugins.html
        qfs = Gst.ElementFactory.make("queue")
        enc = Gst.ElementFactory.make("lamemp3enc") # vorbisenc
        # constant bit rate
        enc.set_property("cbr", True) # constant bitrate
        enc.set_property("target",1) # target bitrate, 0 to target quality and then bitrate does not mater
        enc.set_property("bitrate",128) # incoming stream is 64 bit AAC+, so we upconvert to 128 to get every bit out of ut. Could user 192 or even 224 for pandora one subsribers
        # for VBR use the following:
        #enc.set_property("cbr",False)
        #enc.set_property("target",0) # 0 to target quality and then bitrate does not mater
        #enc.set_property("quality",1) # 0 to 10. 0 is the best
        enc.set_property("encoding-engine-quality",2) # 2 high quality. 0 fast, 1 standard

        # need the tagger to start the stream with an ID3 tag frame for later mutagen consumption
        self.tag = Gst.ElementFactory.make("id3v2mux") #vorbistag

        # for ogg vorbis do
        #enc = Gst.ElementFactory.make("lamemp3enc") # vorbisenc
        #tag = Gst.ElementFactory.make("id3v2mux") #vorbistag
        #mux = Gst.ElementFactory.make("oggmux")
        self.fs = Gst.ElementFactory.make("filesink")
        self.fs.set_property("location", "test.file") #dummy location
        self.fs.set_property("async",False)

        sink_bin.add(qfs)
        sink_bin.add(enc)
        sink_bin.add(self.tag)
        #sink_bin.add(mux)
        sink_bin.add(self.fs)
        split.link(qfs)
        qfs.link(enc)
        enc.link(self.tag)
        # for ogg vorbis extend the chain
        #self.tag.link(mux)
        #mux.link(self.fs)
        self.tag.link(self.fs)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect("message::async-done", self.on_gst_async_done)
        bus.connect("message::duration-changed", self.on_gst_duration_changed)
        bus.connect("message::eos", self.on_gst_eos)
        bus.connect("message::buffering", self.on_gst_buffering)
        bus.connect("message::error", self.on_gst_error)
        bus.connect("message::element", self.on_gst_element)
        bus.connect("message::tag", self.on_gst_tag)
        self.player.connect("notify::volume", self.on_gst_volume)
        self.player.connect("notify::source", self.on_gst_source)

        self.player_status = PlayerStatus()

        self.stations_dlg = None

        self.playing = None # None is a special "Waiting to play" state
        self.current_song_index = None
        self.current_station = None
        self.current_station_id = self.preferences.get('last_station_id')

        self.auto_retrying_auth = False
        self.have_stations = False
        self.playcount = 0
        self.gstreamer_errorcount_1 = 0
        self.gstreamer_errorcount_2 = 0
        self.gstreamer_error = ''
        self.waiting_for_playlist = False
        self.start_new_playlist = False
        self.ui_loop_timer_id = 0
        self.worker = GObjectWorker()
        self.art_worker = GObjectWorker()

        aa = GdkPixbuf.Pixbuf.new_from_file(get_media_file('album'))

        self.default_album_art = aa.scale_simple(ALBUM_ART_FULL_SIZE, ALBUM_ART_FULL_SIZE, GdkPixbuf.InterpType.BILINEAR)
Esempio n. 13
0
    def __init__(self):

        # Set default properties
        self.uri = ""
        self.state = playback.STATE_STOPPED
        self.recorder_state = playback.STATE_RECORDER_OFF
        self.rec_trigger = False
        self.output_str = "./%t"
        self.output_fmt = playback.FORMAT_OGG
        self.ext = ".ogg"
        self.recopt = playback.REC_IMMEDIATALY
        self.cb_cg_track = None
        self.ud_cg_track = None
        self.cb_cg_time = None
        self.ud_cg_time = None
        self.cb_cg_state = None
        self.ud_cg_state = None
        self.cb_cg_buffer = None
        self.ud_cg_buffer = None
        self.cb_cg_duration = None
        self.ud_cg_duration = None
        self.cb_eos_reached = None
        self.ud_eos_reached = None
        self.cb_error = None
        self.ud_error = None

        # Properties of stream
        self.organization = "unknown"
        self.bitrate = 0
        self.genre = "unknown"
        self.title = "untitled"
        self.duration = None
        self.rec_plugged = False

        ## Gstreamer

        self.lock1 = RLock()

        # Create the pipeline
        self.pipeline = Gst.Pipeline()  #Gst.Pipeline("playback")

        # uridecodebin
        self.uridec = Gst.ElementFactory.make("uridecodebin", "uridecoder")
        self.uridec.connect("pad-added", self.cb_pad_added)
        self.pipeline.add(self.uridec)

        # Create the player Bin
        self.player = Gst.Bin()

        queue = Gst.ElementFactory.make("queue", "queue")
        convert = Gst.ElementFactory.make("audioconvert", "converter")
        output = Gst.ElementFactory.make("autoaudiosink", "output")

        self.player.add(queue)
        self.player.add(convert)
        self.player.add(output)

        queue.link(convert)
        convert.link(output)

        pad = queue.get_static_pad("sink")
        self.player.add_pad(Gst.GhostPad.new("sink", pad))

        # Create recorder Bin
        self.recorder = None
        self.rplugins = self.get_rec_plugins()
        try:
            self.rplugins.index(playback.FORMAT_OGG)
            self.set_format(playback.FORMAT_OGG)
        except:
            try:
                self.rplugins.index(playback.FORMAT_MP3)
                self.set_format(playback.FORMAT_MP3)
            except:
                print "ERROR: Could not find a plugin to record sound."
                print "       Please, install gstreamer-base-plugins"

        # Create 1-to-N pipe fitting
        tee = Gst.ElementFactory.make("tee", "tee")
        teepad_0 = tee.get_request_pad("src_0")
        teepad_1 = tee.get_request_pad("src_1")

        # Add to pipeline
        #self.pipeline.add(tee, self.player)
        self.pipeline.add(tee)
        self.pipeline.add(self.player)

        # Link tee with player and recorder
        pad_p = self.player.get_static_pad("sink")
        teepad_0.link(pad_p)

        # Here the pipeline is configured only to play the stream

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.cb_messages)