Example #1
0
    def __about_to_finish(self, playbin):
        print_d("About to finish (async)")

        try:
            uri = self._runner.call(self.__about_to_finish_sync,
                                    priority=GLib.PRIORITY_HIGH,
                                    timeout=0.5)
        except MainRunnerTimeoutError as e:
            # Due to some locks being held during this signal we can get
            # into a deadlock when a seek or state change event happens
            # in the mainloop before our function gets scheduled.
            # In this case abort and do nothing, which results
            # in a non-gapless transition.
            print_d("About to finish (async): %s" % e)
            return
        except MainRunnerAbortedError as e:
            print_d("About to finish (async): %s" % e)
            return
        except MainRunnerError:
            util.print_exc()
            return

        if uri is not None:
            print_d("About to finish (async): setting uri")
            playbin.set_property('uri', uri2gsturi(uri))
        print_d("About to finish (async): done")
Example #2
0
    def _next_song(self, first=False):
        if self._current:
            self._current.progress = 1.0
            self._current.done = True
            self._emit_update()
            self._done.append(self._current)
            self._current = None

        if not self._songs:
            self.pipe.set_state(Gst.State.NULL)
            self.emit("done", self._album)
            return

        if first:
            self.analysis.set_property("num-tracks", len(self._songs))
        else:
            self.analysis.set_locked_state(True)
            self.pipe.set_state(Gst.State.NULL)

        self._current = self._songs.pop(0)
        self.decode.set_property("uri", uri2gsturi(self._current.uri))
        if not first:
            # flush, so the element takes new data after EOS
            pad = self.analysis.get_static_pad("src")
            pad.send_event(Gst.Event.new_flush_start())
            pad.send_event(Gst.Event.new_flush_stop(True))
            self.analysis.set_locked_state(False)
        self.pipe.set_state(Gst.State.PLAYING)
Example #3
0
    def _create_waveform(self, song, points):
        # Close any existing pipeline to avoid leaks
        self._clean_pipeline()

        if not song.is_file:
            return

        command_template = """
        uridecodebin name=uridec
        ! audioconvert
        ! level name=audiolevel interval={} post-messages=true
        ! fakesink sync=false"""
        interval = int(song("~#length") * 1E9 / points)
        if not interval:
            return
        print_d("Computing data for each %.3f seconds" % (interval / 1E9))

        command = command_template.format(interval)
        pipeline = Gst.parse_launch(command)
        pipeline.get_by_name("uridec").set_property("uri",
                                                    uri2gsturi(song("~uri")))

        bus = pipeline.get_bus()
        self._bus_id = bus.connect("message", self._on_bus_message, points)
        bus.add_signal_watch()

        pipeline.set_state(Gst.State.PLAYING)

        self._pipeline = pipeline
        self._new_rms_vals = []
Example #4
0
def get_tags(uri):
    """Returns (uri, tags_dict)"""

    tags = {}
    player = Gst.ElementFactory.make("playbin", "player")
    fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
    fakesink2 = Gst.ElementFactory.make("fakesink", "fakesink")
    player.set_property("audio-sink", fakesink)
    player.set_property("video-sink", fakesink2)
    bus = player.get_bus()
    bus.add_signal_watch()

    ml = GLib.MainLoop()

    def message(bus, message, player):
        if message.type == Gst.MessageType.TAG:
            t = TagListWrapper(message.parse_tag(), merge=True)
            for k in t.keys():
                v = str(t[k])
                if not k.endswith("bitrate") and k in tags and \
                        v not in tags[k]:
                    tags[k].append(v)
                else:
                    tags[k] = [v]

            if "nominal-bitrate" in tags and "bitrate" not in tags:
                tags["bitrate"] = tags["nominal-bitrate"]

            # not everyone sends the codec, so ask the typefind element
            typefind = player.get_by_name("typefind")
            if typefind and typefind.props.caps and "audio-codec" not in tags:
                caps = typefind.props.caps
                if "audio/aac" in caps.to_string():
                    tags["audio-codec"] = ["AAC (Advanced Audio Coding)"]
                elif "audio/mpeg" in caps.to_string():
                    tags["audio-codec"] = ["MPEG 1 Audio, Layer 3 (MP3)"]

            if not set(NEEDED) - set(tags.keys()):
                ml.quit()
        elif message.type == Gst.MessageType.ERROR or \
                message.type == Gst.MessageType.EOS:
            ml.quit()
        elif message.type == Gst.MessageType.BUFFERING:
            percent = message.parse_buffering()
            if percent == 100:
                player.set_state(Gst.State.PLAYING)
            else:
                player.set_state(Gst.State.PAUSED)

    sig = bus.connect("message", message, player)
    player.set_property("uri", uri2gsturi(uri))
    player.set_state(Gst.State.PLAYING)
    player.get_state(Gst.SECOND)

    GLib.timeout_add(TIMEOUT * 1000, ml.quit)

    try:
        ml.run()
    except:
        pass

    bus.remove_signal_watch()
    bus.disconnect(sig)
    player.set_state(Gst.State.NULL)

    return uri, tags
Example #5
0
    def __init_pipeline(self):
        """Creates a gstreamer pipeline. Returns True on success."""

        if self.bin:
            return True

        # reset error state
        self.error = False

        pipeline = config.get("player", "gst_pipeline")
        try:
            pipeline, self._pipeline_desc = GStreamerSink(pipeline)
        except PlayerError as e:
            self._error(e)
            return False

        if self._use_eq and Gst.ElementFactory.find('equalizer-10bands'):
            # The equalizer only operates on 16-bit ints or floats, and
            # will only pass these types through even when inactive.
            # We push floats through to this point, then let the second
            # audioconvert handle pushing to whatever the rest of the
            # pipeline supports. As a bonus, this seems to automatically
            # select the highest-precision format supported by the
            # rest of the chain.
            filt = Gst.ElementFactory.make('capsfilter', None)
            filt.set_property('caps',
                              Gst.Caps.from_string('audio/x-raw,format=F32LE'))
            eq = Gst.ElementFactory.make('equalizer-10bands', None)
            self._eq_element = eq
            self.update_eq_values()
            conv = Gst.ElementFactory.make('audioconvert', None)
            resample = Gst.ElementFactory.make('audioresample', None)
            pipeline = [filt, eq, conv, resample] + pipeline

        # playbin2 has started to control the volume through pulseaudio,
        # which means the volume property can change without us noticing.
        # Use our own volume element for now until this works with PA.
        self._int_vol_element = Gst.ElementFactory.make('volume', None)
        pipeline.insert(0, self._int_vol_element)

        # Get all plugin elements and append audio converters.
        # playbin already includes one at the end
        plugin_pipeline = []
        for plugin in self._get_plugin_elements():
            plugin_pipeline.append(plugin)
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioconvert', None))
            plugin_pipeline.append(
                Gst.ElementFactory.make('audioresample', None))
        pipeline = plugin_pipeline + pipeline

        bufbin = Gst.Bin()
        for element in pipeline:
            assert element is not None, pipeline
            bufbin.add(element)

        if len(pipeline) > 1:
            if not link_many(pipeline):
                print_w("Linking the GStreamer pipeline failed")
                self._error(
                    PlayerError(_("Could not create GStreamer pipeline")))
                return False

        # see if the sink provides a volume property, if yes, use it
        sink_element = pipeline[-1]
        if isinstance(sink_element, Gst.Bin):
            sink_element = iter_to_list(sink_element.iterate_recurse)[-1]

        self._ext_vol_element = None
        if hasattr(sink_element.props, "volume"):
            self._ext_vol_element = sink_element

            # In case we use the sink volume directly we can increase buffering
            # without affecting the volume change delay too much and safe some
            # CPU time... (2x default for now).
            if hasattr(sink_element.props, "buffer_time"):
                sink_element.set_property("buffer-time", 400000)

            def ext_volume_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "volume")

            self._ext_vol_element.connect("notify::volume", ext_volume_notify)

        self._ext_mute_element = None
        if hasattr(sink_element.props, "mute") and \
                sink_element.get_factory().get_name() != "directsoundsink":
            # directsoundsink has a mute property but it doesn't work
            # https://bugzilla.gnome.org/show_bug.cgi?id=755106
            self._ext_mute_element = sink_element

            def mute_notify(*args):
                # gets called from a thread
                GLib.idle_add(self.notify, "mute")

            self._ext_mute_element.connect("notify::mute", mute_notify)

        # Make the sink of the first element the sink of the bin
        gpad = Gst.GhostPad.new('sink', pipeline[0].get_static_pad('sink'))
        bufbin.add_pad(gpad)

        bin_ = Gst.ElementFactory.make('playbin', None)
        assert bin_

        self.bin = BufferingWrapper(bin_, self)
        self._seeker = Seeker(self.bin, self)

        bus = bin_.get_bus()
        bus.add_signal_watch()
        self.__bus_id = bus.connect('message', self.__message, self._librarian)

        self.__atf_id = self.bin.connect('about-to-finish',
            self.__about_to_finish)

        # set buffer duration
        duration = config.getfloat("player", "gst_buffer")
        self._set_buffer_duration(int(duration * 1000))

        # connect playbin to our pluing/volume/eq pipeline
        self.bin.set_property('audio-sink', bufbin)

        # by default playbin will render video -> suppress using fakesink
        fakesink = Gst.ElementFactory.make('fakesink', None)
        self.bin.set_property('video-sink', fakesink)

        # disable all video/text decoding in playbin
        GST_PLAY_FLAG_VIDEO = 1 << 0
        GST_PLAY_FLAG_TEXT = 1 << 2
        flags = self.bin.get_property("flags")
        flags &= ~(GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_TEXT)
        self.bin.set_property("flags", flags)

        if not self.has_external_volume:
            # Restore volume/ReplayGain and mute state
            self.props.volume = self._volume
            self.mute = self._mute

        # ReplayGain information gets lost when destroying
        self._reset_replaygain()

        if self.song:
            self.bin.set_property('uri', uri2gsturi(self.song("~uri")))

        return True
Example #6
0
 def _set_uri(self, uri: str) -> None:
     self.bin.set_property("uri", uri2gsturi(uri))
Example #7
0
def test_uri2gsturi():
    assert uri2gsturi("file:///foo/bar") == "file:///foo/bar"
    if is_win:
        assert uri2gsturi("file://foo/bar") == "file:////foo/bar"
    assert uri2gsturi("https://foo.bar.org") == "https://foo.bar.org"