Exemplo n.º 1
0
    def testSetAudioProp(self):
        timeline = common.create_timeline_container()
        project = timeline.app.project_manager.current_project
        project.addUris([common.get_sample_uri("mp3_sample.mp3")])

        audio_track = [
            t for t in project.ges_timeline.tracks
            if isinstance(t, GES.AudioTrack)
        ][0]
        mainloop = common.create_main_loop()

        def progress_cb(project, progress, estimated_time):
            if progress == 100:
                mainloop.quit()

        project.connect_after("asset-loading-progress", progress_cb)
        mainloop.run()

        expected = Gst.Caps("audio/x-raw,channels=(int)2,rate=(int)44100")
        ccaps = audio_track.props.restriction_caps
        self.assertTrue(ccaps.is_equal_fixed(expected),
                        "%s != %s" % (ccaps, expected))

        project.audiochannels = 6

        expected = Gst.Caps("audio/x-raw,channels=(int)6,rate=(int)44100")
        ccaps = audio_track.props.restriction_caps
        self.assertTrue(ccaps.is_equal_fixed(expected),
                        "%s != %s" % (ccaps, expected))
Exemplo n.º 2
0
    def assert_caps_equal(self, caps1, caps2):
        if isinstance(caps1, str):
            caps1 = Gst.Caps(caps1)
        if isinstance(caps2, str):
            caps2 = Gst.Caps(caps2)

        self.assertTrue(caps1.is_equal(caps2),
                        "%s != %s" % (caps1.to_string(), caps2.to_string()))
Exemplo n.º 3
0
    def __get_encoding_profile(self,
                               encoding_target_file,
                               asset=None,
                               width=None,
                               height=None):
        encoding_target = GstPbutils.EncodingTarget.load_from_file(
            os.path.join(get_gstpresets_dir(), encoding_target_file))
        encoding_profile = encoding_target.get_profile("default")

        if not encoding_profile:
            return None

        for profile in encoding_profile.get_profiles():
            profile_format = profile.get_format()
            # Do not verify we have an encoder/decoder for raw audio/video,
            # as they are not required.
            if profile_format.intersect(Gst.Caps("audio/x-raw(ANY)")) or \
                    profile_format.intersect(Gst.Caps("audio/x-video(ANY)")):
                continue
            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_ENCODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SRC, False):
                return None
            if height and width and profile.get_type_nick() == "video":
                profile.set_restriction(
                    Gst.Caps.from_string("video/x-raw, width=%d, height=%d" %
                                         (width, height)))

            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_DECODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SINK, False):
                return None

        if asset:
            # If we have an asset, we force audioconvert to keep
            # the number of channels
            # TODO: remove once https://bugzilla.gnome.org/show_bug.cgi?id=767226
            # is fixed
            info = asset.get_info()
            try:
                # TODO Be smarter about multiple streams
                audio_stream = info.get_audio_streams()[0]
                channels = audio_stream.get_channels()
                audio_profile = [
                    profile for profile in encoding_profile.get_profiles()
                    if isinstance(profile, GstPbutils.EncodingAudioProfile)
                ][0]
                audio_profile.set_restriction(
                    Gst.Caps.from_string("audio/x-raw,channels=%d" % channels))
            except IndexError:
                pass

        return encoding_profile
Exemplo n.º 4
0
def createEncodingProfileSimple(container_caps, audio_caps, video_caps):
    c = GstPbutils.EncodingContainerProfile.new(None, None,
                                                Gst.Caps(container_caps), None)
    a = GstPbutils.EncodingAudioProfile.new(Gst.Caps(audio_caps), None, None,
                                            0)
    v = GstPbutils.EncodingVideoProfile.new(Gst.Caps(video_caps), None, None,
                                            0)

    c.add_profile(a)
    c.add_profile(v)

    return c
Exemplo n.º 5
0
def create_encoding_profile_simple(container_caps, audio_caps, video_caps):
    container_profile = GstPbutils.EncodingContainerProfile.new(
        None, None, Gst.Caps(container_caps), None)
    audio_profile = GstPbutils.EncodingAudioProfile.new(
        Gst.Caps(audio_caps), None, None, 0)
    video_profile = GstPbutils.EncodingVideoProfile.new(
        Gst.Caps(video_caps), None, None, 0)

    container_profile.add_profile(audio_profile)
    container_profile.add_profile(video_profile)

    return container_profile
Exemplo n.º 6
0
    def openfile(self, filename, aModel):
        self.images = list()
        self.errors = list()
        self.fileName = filename
        self.codec = ""
        if (self.player != None):
            self.player.set_state(gst.STATE_NULL)

        self.__isEndOfStream = False
        self.player = gst.element_factory_make("playbin", "player")
        videoBin = gst.Bin("video")
        videoFilter = gst.element_factory_make("capsfilter", "videofilter")
        videoBin.add(videoFilter)
        videoFilter.set_property("caps",
                                 gst.Caps("video/x-raw-rgb, depth=24, bpp=24"))
        ghostPad = gst.GhostPad("sink", videoFilter.get_pad("sink"))
        videoBin.add_pad(ghostPad)
        videoSink = gst.element_factory_make("fakesink", "videosink")
        videoBin.add(videoSink)
        pad = videoSink.get_pad("sink")
        pad.add_buffer_probe(self.__onBufferProbe)
        gst.element_link_many(videoFilter, videoSink)
        self.player.set_property("video-sink", videoBin)

        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.watchID = self.bus.connect("message", self.__onMessage)
        self.player.set_property("uri", "file://" + filename)
        self.player.set_state(gst.STATE_PAUSED)
        self.model = aModel
Exemplo n.º 7
0
    def run(self, xid, device_config=None):
        self.xid = xid
        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        # Create GStreamer elements
        if device_config is None:
            self.src = Gst.ElementFactory.make('autovideosrc', 'source')
        else:
            self.src = get_video_source()
            device_key = get_video_device_key()
            self.src.set_property(device_key, device_config['device'])
        self.filter_ = Gst.ElementFactory.make('capsfilter', 'filter')
        self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
        self.sink.set_property('sync', False)
        caps = Gst.Caps(get_caps_str(device_config))
        self.filter_.set_property('caps', caps)

        # Add elements to the pipeline
        self.pipeline.add(self.src)
        self.pipeline.add(self.filter_)
        self.pipeline.add(self.sink)

        self.src.link(self.filter_)
        self.filter_.link(self.sink)
        self.pipeline.set_state(Gst.State.PLAYING)
Exemplo n.º 8
0
    def test_encoder_restrictions(self):
        """Checks the mechanism to respect encoder specific restrictions."""
        project = self.create_simple_project()
        dialog = self.create_rendering_dialog(project)

        # Explicitly set the encoder
        self.assertTrue(
            set_combo_value(dialog.muxer_combo,
                            Gst.ElementFactory.find("matroskamux")))
        self.assertTrue(
            set_combo_value(dialog.video_encoder_combo,
                            Gst.ElementFactory.find("x264enc")))
        self.assertEqual(project.video_profile.get_restriction()[0]["format"],
                         "Y444")

        # Set encoding profile
        if getattr(GstPbutils.EncodingProfile,
                   "copy"):  # Available only in > 1.11
            profile = project.container_profile.copy()
            vprofile, = [
                p for p in profile.get_profiles()
                if isinstance(p, GstPbutils.EncodingVideoProfile)
            ]
            vprofile.set_restriction(Gst.Caps("video/x-raw"))
            project.set_container_profile(profile)
            self.assertEqual(
                project.video_profile.get_restriction()[0]["format"], "Y444")
Exemplo n.º 9
0
    def build_pipeline(self, video_src, video_sink, pipeline):

        # Create the pipeline elements
        self._decodebin = gst.element_factory_make("decodebin2")
        self._autoconvert = gst.element_factory_make("autoconvert")

        # As a precaution add videio capability filter
        # in the video processing pipeline.
        videocap = gst.Caps("video/x-raw-yuv")

        self._filter = gst.element_factory_make("capsfilter")
        self._filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self._color_space = gst.element_factory_make("ffmpegcolorspace")

        self._audioconvert = gst.element_factory_make("audioconvert")
        self._audiosink = gst.element_factory_make("autoaudiosink")

        # Queues
        self._queue1 = gst.element_factory_make("queue")
        self._queue2 = gst.element_factory_make("queue")

        pipeline.add(video_src, self._decodebin, self._autoconvert,
                     self._audioconvert, self._queue1, self._queue2,
                     self._filter, self._color_space, self._audiosink,
                     video_sink)

        # Link everything we can link now
        gst.element_link_many(video_src, self._decodebin)
        gst.element_link_many(self._queue1, self._autoconvert, self._filter,
                              self._color_space, video_sink)
        gst.element_link_many(self._queue2, self._audioconvert,
                              self._audiosink)
Exemplo n.º 10
0
 def __init__(self, format, preset=None, restriction=None, presence=0):
     GstPbutils.EncodingAudioProfile.__init__(self)
     self.set_format(format)
     if preset is not None:
         self.set_preset(preset)
     if restriction is None:
         restriction = Gst.Caps('ANY')
     self.set_restriction(restriction)
     self.set_presence(presence)
Exemplo n.º 11
0
    def _GesMakeEncodingProfile(self):
        caps = self._GesGetSrcCaps(self.gstElements[0])
        profile = GstPbutils.EncodingContainerProfile.new(
            "photofilmstrip", None, caps, None)
        profile.set_preset_name(self.gstElements[0])

        caps = self._GesGetSrcCaps(self.gstElements[1])
        stream = GstPbutils.EncodingVideoProfile.new(
            caps, None, Gst.Caps("video/x-raw"), 0)
        stream.set_enabled(1)
        stream.set_preset_name(self.gstElements[1])
        profile.add_profile(stream)

        caps = self._GesGetSrcCaps(self.gstElements[2])
        stream = GstPbutils.EncodingAudioProfile.new(
            caps, None, Gst.Caps("audio/x-raw,rate=44100,channels=2"), 0)
        stream.set_enabled(1)
        stream.set_preset_name(self.gstElements[2])
        profile.add_profile(stream)

        return profile
Exemplo n.º 12
0
    def test_fixate_caps_with_defalt_values(self):
        voaacenc_caps = Gst.Caps.from_string(
            "audio/x-raw, format=(string)S16LE, layout=(string)interleaved, rate=(int){ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000 }, channels=(int)1;"
            "audio/x-raw, format=(string)S16LE, layout=(string)interleaved, rate=(int){ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000 }, channels=(int)2, channel-mask=(bitmask)0x0000000000000003"
        )
        yt_audiorest = Gst.Caps(
            "audio/x-raw,channels=6,channel-mask=0x3f,rate={48000,96000};"
            "audio/x-raw,channels=2,rate={48000,96000}")

        vorbis_caps = Gst.Caps(
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)1;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)2, channel-mask=(bitmask)0x0000000000000003;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)3, channel-mask=(bitmask)0x0000000000000007;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)4, channel-mask=(bitmask)0x0000000000000033;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)5, channel-mask=(bitmask)0x0000000000000037;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)6, channel-mask=(bitmask)0x000000000000003f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)7, channel-mask=(bitmask)0x0000000000000d0f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)8, channel-mask=(bitmask)0x0000000000000c3f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)[ 9, 255 ], channel-mask=(bitmask)0x0000000000000000"
        )

        audio_defaults = {
            'channels': Gst.IntRange(range(1, 2147483647)),
            "rate": Gst.IntRange(range(8000, GLib.MAXINT))
        }

        dataset = [(
            voaacenc_caps, yt_audiorest, audio_defaults, None,
            Gst.Caps(
                "audio/x-raw, channels=2,rate=48000,channel-mask=(bitmask)0x03"
            )),
                   (vorbis_caps, None, audio_defaults,
                    Gst.Caps('audio/x-raw,channels=1,rate=8000'))]

        for data in dataset:
            res = fixate_caps_with_default_values(*data[:-1])
            print(res)
            self.assertTrue(res.is_equal_fixed(data[-1]),
                            "%s != %s" % (res, data[-1]))
Exemplo n.º 13
0
class CairoSurfaceThumbnailSink(GstBase.BaseSink):
    """
    GStreamer thumbnailing sink element.

    Can be used in pipelines to generates gtk.gdk.Pixbuf automatically.
    """

    __gsignals__ = {
        "thumbnail":
        (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ([gobject.TYPE_UINT64]))
    }

    __gsttemplates__ = (gst.PadTemplate(
        "sink", gst.PadDirection.SINK, gst.PadPresence.ALWAYS,
        gst.Caps("video/x-raw-rgb,"
                 "bpp = (int) 32, depth = (int) 32,"
                 "endianness = (int) BIG_ENDIAN,"
                 "alpha_mask = (int) %i, "
                 "red_mask = (int)   %i, "
                 "green_mask = (int) %i, "
                 "blue_mask = (int)  %i, "
                 "width = (int) [ 1, max ], "
                 "height = (int) [ 1, max ], "
                 "framerate = (fraction) [ 0, 25 ]" %
                 (big_to_cairo_alpha_mask, big_to_cairo_red_mask,
                  big_to_cairo_green_mask, big_to_cairo_blue_mask))))

    def __init__(self):
        GstBase.BaseSink.__init__(self)
        self.width = 1
        self.height = 1
        self.set_sync(True)
        self.data = None

    def do_set_caps(self, caps):
        self.log("caps %s" % caps.to_string())
        self.log("padcaps %s" % self.get_pad("sink").get_caps().to_string())
        self.width = caps[0]["width"]
        self.height = caps[0]["height"]
        if not caps[0].get_name() == "video/x-raw-rgb":
            return False
        return True

    def do_render(self, buf):
        self.data = str(buf.data)
        self.emit('thumbnail', buf.timestamp)
        return gst.FlowReturn.OK

    def do_preroll(self, buf):
        return self.do_render(buf)
Exemplo n.º 14
0
	def feed(self,capstring,frame):
		if not self.playing:
			self.startProducing()
		gstcaps = Gst.Caps().from_string(capstring)
		#gstcaps = Gst.Caps.from_string(capstring)
		gstbuff = Gst.Buffer.new_wrapped(frame)
		#self.appsource.set_property("caps",gstcaps)
		#gstsample = Gst.Sample(gstbuff,gstcaps,None,None)
		gstsample = Gst.Sample.new(gstbuff,gstcaps,None,None)
		ret = self.appsource.emit("push-sample",gstsample)
		#ret = self.appsource.emit("push-buffer",gstbuff)
		if (ret != Gst.FlowReturn.OK):
			return False
		return True
Exemplo n.º 15
0
    def build_pipeline(self, video_src, video_sink, pipeline):

        # Create the pipeline elements
        self._decodebin = Gst.ElementFactory.make("decodebin", None)
        self._autoconvert = Gst.ElementFactory.make("autoconvert", None)

        # As a precaution add videio capability filter
        # in the video processing pipeline.
        videocap = Gst.Caps("video/x-raw")

        self._filter = Gst.ElementFactory.make("capsfilter", None)
        self._filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self._color_space = Gst.ElementFactory.make("videoconvert", None)

        self._audioconvert = Gst.ElementFactory.make("audioconvert", None)
        self._audiosink = Gst.ElementFactory.make("autoaudiosink", None)

        # Queues
        self._queue1 = Gst.ElementFactory.make("queue", None)
        self._queue2 = Gst.ElementFactory.make("queue", None)

        pipeline.add(video_src)
        pipeline.add(self._decodebin)
        pipeline.add(self._autoconvert)
        pipeline.add(self._audioconvert)
        pipeline.add(self._queue1)
        pipeline.add(self._queue2)
        pipeline.add(self._filter)
        pipeline.add(self._color_space)
        pipeline.add(self._audiosink)
        pipeline.add(video_sink)

        # Link everything we can link now
        video_src.link(self._decodebin)

        self._queue1.link(self._autoconvert)
        self._autoconvert.link(self._filter)
        self._filter.link(self._color_space)
        self._color_space(video_sink)

        self._queue2.link(self._audioconvert)
        self._audioconvert.link(self._audiosink)
Exemplo n.º 16
0
def choose_video_caps(supported_caps, target_caps):
    target_struct = target_caps.get_structure(0)
    assert target_struct.get_name() == "video/x-raw"
    raw_struct = target_struct.copy()
    jpeg_struct = raw_struct.copy()
    jpeg_struct.set_name("image/jpeg")

    # Build up a set of caps we'd accept
    caps = Gst.Caps()
    caps.append_structure(raw_struct.copy())
    raw_struct.remove_field("format")
    caps.append_structure(raw_struct.copy())
    caps.append_structure(jpeg_struct.copy())

    # Without framerate
    raw_struct.remove_field("framerate")
    jpeg_struct.remove_field("framerate")
    caps.append_structure(raw_struct.copy())
    caps.append_structure(jpeg_struct.copy())

    # Without dimensions, but with framerate
    raw_struct.remove_field("width")
    raw_struct.remove_field("height")
    raw_struct.remove_field("pixel-aspect-ratio")
    raw_struct.set_value("framerate", target_struct.get_value("framerate"))
    jpeg_struct.remove_field("width")
    jpeg_struct.remove_field("height")
    jpeg_struct.remove_field("pixel-aspect-ratio")
    jpeg_struct.set_value("framerate", target_struct.get_value("framerate"))
    caps.append_structure(raw_struct.copy())
    caps.append_structure(jpeg_struct.copy())

    # Without dimensions or framerate
    raw_struct.remove_field("framerate")
    jpeg_struct.remove_field("framerate")
    caps.append_structure(raw_struct.copy())
    caps.append_structure(jpeg_struct.copy())

    caps = caps.intersect(supported_caps)
    assert not caps.is_empty()

    return caps.fixate()
Exemplo n.º 17
0
def push_buffer(src, ml):
    global fnum
    global pipeline
    fname = "H264/%05d" % fnum
    try:
        with open(fname, "rb") as f:
            buf = Gst.Buffer.new_wrapped(list(f.read()))
    except FileNotFoundError:
        ml.quit()
        return

    fnum += 1
    print("Pushed %s: %s" %
          (fname,
           src.push_sample(
               Gst.Sample.new(buf, Gst.Caps("video/x-h264"), None, None))))

    Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL,
                              os.path.basename(fname))

    return True
Exemplo n.º 18
0
    def start_pipeline(self, channels, samplerate):
        self.pipeline = Gst.parse_launch(self.pipe)
        # store a pointer to appsrc in our encoder object
        self.src = self.pipeline.get_by_name('src')

        if self.streaming:
            try:  # py3
                import queue
            except:  # py2
                import Queue as queue
            self._streaming_queue = queue.Queue(QUEUE_SIZE)
            # store a pointer to appsink in our encoder object
            self.app = self.pipeline.get_by_name('app')
            self.app.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS)
            self.app.set_property("drop", False)
            self.app.set_property('emit-signals', True)
            self.app.connect("new-sample", self._on_new_sample_streaming)
            self.app.connect('new-preroll', self._on_new_preroll_streaming)

        srccaps = Gst.Caps("""audio/x-raw,
            format=F32LE,
            layout=interleaved,
            channels=(int)%s,
            rate=(int)%d""" % (int(channels), int(samplerate)))
        self.src.set_property("caps", srccaps)
        self.src.set_property('emit-signals', True)
        self.src.set_property('num-buffers', -1)
        self.src.set_property('block', False)
        #self.src.set_property('do-timestamp', True)

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message", self._on_message_cb)

        self.mainloop = GLib.MainLoop()
        self.mainloopthread = MainloopThread(self.mainloop)
        self.mainloopthread.start()

        # start pipeline
        self.pipeline.set_state(Gst.State.PLAYING)
Exemplo n.º 19
0
        def _create_audiobin(self):
            ''' Assemble all the pieces we need. '''
            src = Gst.ElementFactory.make("alsasrc", "absrc")

            # attempt to use direct access to the 0,0 device, solving
            # some A/V sync issues
            src.set_property("device", "plughw:0,0")
            hwdev_available = src.set_state(Gst.State.PAUSED) != \
                Gst.StateChangeReturn.FAILURE
            src.set_state(Gst.State.NULL)
            if not hwdev_available:
                src.set_property("device", "default")

            srccaps = Gst.Caps(
                "audio/x-raw-int,rate=16000,channels=1,depth=16")

            # guarantee perfect stream, important for A/V sync
            rate = Gst.ElementFactory.make("audiorate")

            # without a buffer here, gstreamer struggles at the start
            # of the recording and then the A/V sync is bad for the
            # whole video (possibly a gstreamer/ALSA bug -- even if it
            # gets caught up, it should be able to resync without
            # problem)
            queue = Gst.ElementFactory.make("queue", "audioqueue")
            queue.set_property("leaky", True)  # prefer fresh data
            queue.set_property("max-size-time", 5000000000)  # 5 seconds
            queue.set_property("max-size-buffers", 500)

            enc = Gst.ElementFactory.make("wavenc", "abenc")

            sink = Gst.ElementFactory.make("filesink", "absink")
            sink.set_property("location", self.capture_file)

            self._audiobin = Gst.Bin("audiobin")
            self._audiobin.add(src, rate, queue, enc, sink)

            src.link(rate, srccaps)
            Gst.element_link_many(rate, queue, enc, sink)
Exemplo n.º 20
0
    def build_pipeline(self, video_src, video_sink, pipeline):
        # Create the pipeline elements
        self._decodebin = Gst.ElementFactory.make("decodebin", None)
        self._autoconvert = Gst.ElementFactory.make("autoconvert", None)

        videocap = Gst.Caps("video/x-raw")
        self._filter = Gst.ElementFactory.make("capsfilter", None)
        self._filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self._color_space = Gst.ElementFactory.make("videoconvert", None)

        self._queue1 = Gst.ElementFactory.make("queue", None)

        pipeline.add(video_src, self._decodebin, self._autoconvert,
                     self._queue1, self._filter, self._color_space, video_sink)

        # Link everything we can link now
        video_src.link(self._decodebin)
        self._queue1.link(self._autoconvert)
        self._autoconvert.link(self._filter)
        self._filter.link(self._color_space)
        self._color_space.link(video_sink)
Exemplo n.º 21
0
    def __init__(self, width, height):

        Gst.Pipeline.__init__(self)

        self.set_name('video_out')

        imagefreeze = Gst.ElementFactory.make('imagefreeze', "imagefreeze")
        videoconvert = Gst.ElementFactory.make('videoconvert', 'videoconvert')

        videoflip = Gst.ElementFactory.make('videoflip', "videoflip")
        caps = Gst.Caps(
            'video/x-raw,format=RGB,framerate=30/1,width=%s,height=%s' %
            (width, height))
        filtro = Gst.ElementFactory.make("capsfilter", "filtro")
        filtro.set_property("caps", caps)

        ximagesink = Gst.ElementFactory.make('ximagesink', "ximagesink")
        ximagesink.set_property("force-aspect-ratio", True)

        self.add(imagefreeze)
        self.add(videoconvert)
        self.add(videoflip)
        self.add(filtro)
        self.add(ximagesink)

        imagefreeze.link(videoconvert)
        videoconvert.link(videoflip)
        videoflip.link(filtro)
        filtro.link(ximagesink)

        self.ghost_pad = Gst.GhostPad.new("sink",
                                          imagefreeze.get_static_pad("sink"))

        self.ghost_pad.set_target(imagefreeze.get_static_pad("sink"))

        self.add_pad(self.ghost_pad)
Exemplo n.º 22
0
class ProxyManager(GObject.Object, Loggable):
    """Transcodes assets and manages proxies."""

    __gsignals__ = {
        "progress": (GObject.SignalFlags.RUN_LAST, None, (object, int, int)),
        "proxy-ready": (GObject.SignalFlags.RUN_LAST, None, (object, object)),
        "asset-preparing-cancelled":
        (GObject.SignalFlags.RUN_LAST, None, (object, )),
        "error-preparing-asset":
        (GObject.SignalFlags.RUN_LAST, None, (object, object, object)),
    }

    WHITELIST_CONTAINER_CAPS = [
        "video/quicktime", "application/ogg", "application/xges",
        "video/x-matroska", "video/webm", "image/jpeg"
    ]
    WHITELIST_AUDIO_CAPS = [
        "audio/mpeg", "audio/x-vorbis", "audio/x-raw", "audio/x-flac",
        "audio/x-wav"
    ]
    WHITELIST_VIDEO_CAPS = [
        "video/x-h264", "image/jpeg", "video/x-raw", "video/x-vp8",
        "video/x-theora"
    ]

    WHITELIST_FORMATS = []
    for container in WHITELIST_CONTAINER_CAPS:
        for audio in WHITELIST_AUDIO_CAPS:
            for video in WHITELIST_VIDEO_CAPS:
                WHITELIST_FORMATS.append(
                    create_encoding_profile_simple(container, audio, video))

    for audio in WHITELIST_AUDIO_CAPS:
        a = GstPbutils.EncodingAudioProfile.new(Gst.Caps(audio), None, None, 0)
        WHITELIST_FORMATS.append(a)

    hq_proxy_extension = "proxy.mov"
    scaled_proxy_extension = "scaledproxy.mov"
    # Suffix for filenames of proxies being created.
    part_suffix = ".part"

    def __init__(self, app):
        GObject.Object.__init__(self)
        Loggable.__init__(self)

        self.app = app
        # Total time to transcode in seconds.
        self._total_time_to_transcode = 0
        # Transcoded time per asset in seconds.
        self._transcoded_durations = {}
        self._start_proxying_time = 0
        self.__running_transcoders = []
        self.__pending_transcoders = []
        # The scaled proxy transcoders waiting for their corresponding shadow
        # HQ proxy transcoder to finish.
        self.__waiting_transcoders = []

        self.__encoding_target_file = None
        self.proxying_unsupported = False
        for encoding_format in [ENCODING_FORMAT_JPEG, ENCODING_FORMAT_PRORES]:
            self.__encoding_profile = self.__get_encoding_profile(
                encoding_format)
            if self.__encoding_profile:
                self.__encoding_target_file = encoding_format
                self.info("Using %s as proxying format", encoding_format)
                break

        if not self.__encoding_profile:
            self.proxying_unsupported = True

            self.error("Not supporting any proxy formats!")
            return

    def _scale_asset_resolution(self, asset, max_width, max_height):
        stream = asset.get_info().get_video_streams()[0]
        width = stream.get_width()
        height = stream.get_height()
        aspect_ratio = Fraction(width, height)

        if aspect_ratio.numerator >= width or aspect_ratio.denominator >= height:
            self.log("Unscalable aspect ratio.")
            return width, height
        if aspect_ratio.numerator >= max_width or aspect_ratio.denominator >= max_height:
            self.log("Cannot scale to target resolution.")
            return width, height

        if width > max_width or height > max_height:
            width_factor = max_width // aspect_ratio.numerator
            height_factor = max_height // aspect_ratio.denominator
            scaling_factor = min(height_factor, width_factor)

            width = aspect_ratio.numerator * scaling_factor
            height = aspect_ratio.denominator * scaling_factor

        return width, height

    def _asset_matches_encoding_format(self, asset, encoding_profile):
        def caps_match(info, profile):
            return not info.get_caps().intersect(
                profile.get_format()).is_empty()

        info = asset.get_info()
        if isinstance(encoding_profile, GstPbutils.EncodingAudioProfile):
            if isinstance(info.get_stream_info(),
                          GstPbutils.DiscovererContainerInfo):
                return False
            audios = info.get_audio_streams()
            if len(audios) != 1 or not caps_match(audios[0], encoding_profile):
                return False
            if info.get_video_streams():
                return False
            return True

        container = info.get_stream_info()
        if container:
            if not caps_match(container, encoding_profile):
                return False

        for profile in encoding_profile.get_profiles():
            if isinstance(profile, GstPbutils.EncodingAudioProfile):
                audios = info.get_audio_streams()
                for audio_stream in audios:
                    if not caps_match(audio_stream, profile):
                        return False
            elif isinstance(profile, GstPbutils.EncodingVideoProfile):
                videos = info.get_video_streams()
                for video_stream in videos:
                    if not caps_match(video_stream, profile):
                        return False
        return True

    def __get_encoding_profile(self,
                               encoding_target_file,
                               asset=None,
                               width=None,
                               height=None):
        encoding_target = GstPbutils.EncodingTarget.load_from_file(
            os.path.join(get_gstpresets_dir(), encoding_target_file))
        encoding_profile = encoding_target.get_profile("default")

        if not encoding_profile:
            return None

        for profile in encoding_profile.get_profiles():
            profile_format = profile.get_format()
            # Do not verify we have an encoder/decoder for raw audio/video,
            # as they are not required.
            if profile_format.intersect(Gst.Caps("audio/x-raw(ANY)")) or \
                    profile_format.intersect(Gst.Caps("audio/x-video(ANY)")):
                continue
            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_ENCODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SRC, False):
                return None
            if height and width and profile.get_type_nick() == "video":
                profile.set_restriction(
                    Gst.Caps.from_string("video/x-raw, width=%d, height=%d" %
                                         (width, height)))

            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_DECODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SINK, False):
                return None

        if asset:
            # If we have an asset, we force audioconvert to keep
            # the number of channels
            # TODO: remove once https://bugzilla.gnome.org/show_bug.cgi?id=767226
            # is fixed
            info = asset.get_info()
            try:
                # TODO Be smarter about multiple streams
                audio_stream = info.get_audio_streams()[0]
                channels = audio_stream.get_channels()
                audio_profile = [
                    profile for profile in encoding_profile.get_profiles()
                    if isinstance(profile, GstPbutils.EncodingAudioProfile)
                ][0]
                audio_profile.set_restriction(
                    Gst.Caps.from_string("audio/x-raw,channels=%d" % channels))
            except IndexError:
                pass

        return encoding_profile

    @classmethod
    def is_proxy_asset(cls, obj):
        return cls.is_scaled_proxy(obj) or cls.is_hq_proxy(obj)

    @classmethod
    def is_scaled_proxy(cls, obj):
        if isinstance(obj, GES.Asset):
            uri = obj.props.id
        else:
            uri = obj

        return uri.endswith("." + cls.scaled_proxy_extension)

    @classmethod
    def is_hq_proxy(cls, obj):
        if isinstance(obj, GES.Asset):
            uri = obj.props.id
        else:
            uri = obj

        return uri.endswith("." + cls.hq_proxy_extension)

    def check_proxy_loading_succeeded(self, proxy):
        if self.is_proxy_asset(proxy):
            return True

        self.emit("error-preparing-asset", None, proxy, proxy.get_error())
        return False

    @classmethod
    def get_target_uri(cls, obj):
        if isinstance(obj, GES.Asset):
            uri = obj.props.id
        else:
            uri = obj

        if cls.is_scaled_proxy(uri):
            return ".".join(uri.split(".")[:-4])

        if cls.is_proxy_asset(uri):
            return ".".join(uri.split(".")[:-3])

        return uri

    def get_proxy_uri(self, asset, scaled=False):
        """Gets the URI of the corresponding proxy file for the specified asset.

        The name looks like:
            <filename>.<file_size>[.<proxy_resolution>].<proxy_extension>

        Returns:
            str: The URI or None if it can't be computed for any reason.
        """
        asset_file = Gio.File.new_for_uri(asset.get_id())
        try:
            file_size = asset_file.query_info(Gio.FILE_ATTRIBUTE_STANDARD_SIZE,
                                              Gio.FileQueryInfoFlags.NONE,
                                              None).get_size()
        except GLib.Error as err:
            if err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
                return None
            else:
                raise

        if scaled:
            if not asset.get_info().get_video_streams():
                return None

            max_w = self.app.project_manager.current_project.scaled_proxy_width
            max_h = self.app.project_manager.current_project.scaled_proxy_height
            t_width, t_height = self._scale_asset_resolution(
                asset, max_w, max_h)
            proxy_res = "%sx%s" % (t_width, t_height)
            return "%s.%s.%s.%s" % (asset.get_id(), file_size, proxy_res,
                                    self.scaled_proxy_extension)
        else:
            return "%s.%s.%s" % (asset.get_id(), file_size,
                                 self.hq_proxy_extension)

    def is_asset_format_well_supported(self, asset):
        for encoding_format in self.WHITELIST_FORMATS:
            if self._asset_matches_encoding_format(asset, encoding_format):
                self.info("Automatically not proxying")
                return True

        return False

    def asset_matches_target_res(self, asset):
        """Returns whether the asset's size <= the scaled proxy size."""
        stream = asset.get_info().get_video_streams()[0]

        asset_res = (stream.get_width(), stream.get_height())
        target_res = self._scale_asset_resolution(
            asset, self.app.project_manager.current_project.scaled_proxy_width,
            self.app.project_manager.current_project.scaled_proxy_height)

        return asset_res == target_res

    def __asset_needs_transcoding(self, asset, scaled=False):
        if self.proxying_unsupported:
            self.info("No proxying supported")
            return False

        if asset.is_image():
            return False

        if self.app.settings.proxying_strategy == ProxyingStrategy.NOTHING:
            self.debug("Not proxying anything. %s",
                       self.app.settings.proxying_strategy)
            return False

        if self.app.settings.proxying_strategy == ProxyingStrategy.AUTOMATIC \
                and scaled and not self.asset_matches_target_res(asset):
            return True

        if self.app.settings.proxying_strategy == ProxyingStrategy.AUTOMATIC \
                and not scaled and not self.is_hq_proxy(asset) and \
                self.is_asset_format_well_supported(asset):
            return False

        if not self._asset_matches_encoding_format(asset,
                                                   self.__encoding_profile):
            return True

        self.info("%s does not need proxy", asset.get_id())
        return False

    def asset_can_be_proxied(self, asset, scaled=False):
        """Returns whether the asset is not a proxy nor a proper proxy."""
        if asset.is_image():
            return False

        if scaled:
            if not asset.get_info().get_video_streams():
                return False

            return not self.is_scaled_proxy(asset) or \
                self.asset_matches_target_res(asset)
        else:
            return not self.is_hq_proxy(asset)

    def __start_transcoder(self, transcoder):
        self.debug("Starting %s", transcoder.props.src_uri)
        if self._start_proxying_time == 0:
            self._start_proxying_time = time.time()
        transcoder.run_async()
        self.__running_transcoders.append(transcoder)

    def __assets_match(self, asset, proxy):
        if self.__asset_needs_transcoding(proxy):
            return False

        info = asset.get_info()
        if info.get_duration() != asset.get_duration():
            return False

        return True

    def __asset_loaded_cb(self, proxy, res, asset, transcoder):
        try:
            GES.Asset.request_finish(res)
        except GLib.Error as e:
            if transcoder:
                self.emit("error-preparing-asset", asset, proxy, e)
                del transcoder
            else:
                self.__create_transcoder(asset)

            return

        shadow = transcoder and self._is_shadow_transcoder(transcoder)

        if not transcoder:
            if not self.__assets_match(asset, proxy):
                self.__create_transcoder(asset)
                return
        else:
            if transcoder.props.pipeline.props.video_filter:
                transcoder.props.pipeline.props.video_filter.finalize()

            if transcoder.props.pipeline.props.audio_filter:
                transcoder.props.pipeline.props.audio_filter.finalize()

            del transcoder

        asset_duration = asset_get_duration(asset)
        proxy_duration = asset_get_duration(proxy)
        if asset_duration != proxy_duration:
            duration = min(asset_duration, proxy_duration)
            self.info(
                "Resetting %s duration from %s to %s as"
                " new proxy has a different duration", asset.props.id,
                Gst.TIME_ARGS(asset_duration), Gst.TIME_ARGS(duration))
            asset.set_uint64(ASSET_DURATION_META, duration)
            proxy.set_uint64(ASSET_DURATION_META, duration)
            target_uri = self.get_target_uri(asset)

            for clip in self.app.project_manager.current_project.ges_timeline.iter_clips(
            ):
                if not isinstance(clip, GES.UriClip):
                    continue
                if self.get_target_uri(clip.props.uri) == target_uri:
                    if clip.props.in_point + clip.props.duration > duration:
                        new_duration = duration - clip.props.in_point
                        if new_duration > 0:
                            self.warning(
                                "%s resetting duration to %s as"
                                " new proxy has a shorter duration", clip,
                                Gst.TIME_ARGS(new_duration))
                            clip.set_duration(new_duration)
                        else:
                            new_inpoint = new_duration - clip.props.in_point
                            self.error(
                                "%s resetting duration to %s"
                                " and inpoint to %s as the proxy"
                                " is shorter", clip,
                                Gst.TIME_ARGS(new_duration),
                                Gst.TIME_ARGS(new_inpoint))
                            clip.set_inpoint(new_inpoint)
                            clip.set_duration(duration - new_inpoint)
                        clip.set_max_duration(duration)

        if shadow:
            self.app.project_manager.current_project.finalize_proxy(proxy)
        else:
            self.emit("proxy-ready", asset, proxy)
            self.__emit_progress(proxy, 100)

    def __transcoder_error_cb(self, _, error, unused_details, asset,
                              transcoder):
        self.emit("error-preparing-asset", asset, None, error)

    def __transcoder_done_cb(self, emitter, asset, transcoder):
        emitter.disconnect_by_func(self.__proxying_position_changed_cb)
        emitter.disconnect_by_func(self.__transcoder_done_cb)
        emitter.disconnect_by_func(self.__transcoder_error_cb)

        self.debug("Transcoder done with %s", asset.get_id())

        self.__running_transcoders.remove(transcoder)

        proxy_uri = transcoder.props.dest_uri.rstrip(ProxyManager.part_suffix)
        os.rename(Gst.uri_get_location(transcoder.props.dest_uri),
                  Gst.uri_get_location(proxy_uri))

        shadow = self._is_shadow_transcoder(transcoder)
        second_transcoder = self._get_second_transcoder(transcoder)
        if second_transcoder and not shadow:
            # second_transcoder is the shadow for transcoder.
            # Defer loading until the shadow transcoder finishes.
            self.__waiting_transcoders.append([transcoder, asset])
        else:
            # Make sure that if it first failed loading, the proxy is forced to
            # be reloaded in the GES cache.
            GES.Asset.needs_reload(GES.UriClip, proxy_uri)
            GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                    self.__asset_loaded_cb, asset, transcoder)

        if shadow:
            # Finish deferred loading for waiting scaled proxy transcoder.
            for pair in self.__waiting_transcoders:
                waiting_transcoder, waiting_asset = pair
                if waiting_transcoder.props.src_uri == transcoder.props.src_uri:
                    proxy_uri = waiting_transcoder.props.dest_uri.rstrip(
                        ProxyManager.part_suffix)
                    GES.Asset.needs_reload(GES.UriClip, proxy_uri)
                    GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                            self.__asset_loaded_cb,
                                            waiting_asset, waiting_transcoder)

                    self.__waiting_transcoders.remove(pair)
                    break

        try:
            self.__start_transcoder(self.__pending_transcoders.pop())
        except IndexError:
            if not self.__running_transcoders:
                self._transcoded_durations = {}
                self._total_time_to_transcode = 0
                self._start_proxying_time = 0

    def __emit_progress(self, asset, creation_progress):
        """Handles the transcoding progress of the specified asset."""
        if self._transcoded_durations:
            time_spent = time.time() - self._start_proxying_time
            transcoded_seconds = sum(self._transcoded_durations.values())
            remaining_seconds = max(
                0, self._total_time_to_transcode - transcoded_seconds)
            estimated_time = remaining_seconds * time_spent / transcoded_seconds
        else:
            estimated_time = 0

        asset.creation_progress = creation_progress
        self.emit("progress", asset, asset.creation_progress, estimated_time)

    def __proxying_position_changed_cb(self, _, position, asset, transcoder):
        if transcoder not in self.__running_transcoders:
            self.info("Position changed after job cancelled!")
            return

        second_transcoder = self._get_second_transcoder(transcoder)
        if second_transcoder is not None:
            position = (position + second_transcoder.props.position) // 2

        self._transcoded_durations[asset] = position / Gst.SECOND

        duration = transcoder.props.duration
        if duration <= 0 or duration == Gst.CLOCK_TIME_NONE:
            duration = asset.props.duration
        if duration > 0 and duration != Gst.CLOCK_TIME_NONE:
            creation_progress = 100 * position / duration
            # Do not set to >= 100 as we need to notify about the proxy first.

            asset.creation_progress = max(0, min(creation_progress, 99))

        self.__emit_progress(asset, asset.creation_progress)

    def _get_second_transcoder(self, transcoder):
        """Gets the shadow of a scaled proxy or the other way around."""
        all_transcoders = self.__running_transcoders + self.__pending_transcoders
        for transcoder2 in all_transcoders:
            if transcoder2.props.position_update_interval == transcoder.props.position_update_interval:
                # Both transcoders are of the same type.
                continue
            if transcoder2.props.src_uri == transcoder.props.src_uri:
                return transcoder2
        return None

    def _is_shadow_transcoder(self, transcoder):
        if transcoder.props.position_update_interval == 1001:
            return True
        return False

    def is_asset_queued(self, asset, optimisation=True, scaling=True):
        """Returns whether the specified asset is queued for transcoding.

        Args:
            asset (GES.Asset): The asset to check.
            optimisation(bool): Whether to check optimisation queue
            scaling(bool): Whether to check scaling queue

        Returns:
            bool: True if the asset is being transcoded or pending.
        """
        all_transcoders = self.__running_transcoders + self.__pending_transcoders
        is_queued = False
        for transcoder in all_transcoders:
            transcoder_uri = transcoder.props.dest_uri
            scaling_ext = "." + self.scaled_proxy_extension + ProxyManager.part_suffix
            optimisation_ext = "." + self.hq_proxy_extension + ProxyManager.part_suffix

            scaling_transcoder = transcoder_uri.endswith(scaling_ext)
            optimisation_transcoder = transcoder_uri.endswith(optimisation_ext)

            if transcoder.props.src_uri == asset.props.id:
                if optimisation and optimisation_transcoder:
                    is_queued = True
                    break

                if scaling and scaling_transcoder:
                    is_queued = True
                    break

        return is_queued

    def __create_transcoder(self, asset, scaled=False, shadow=False):
        self._total_time_to_transcode += asset.get_duration() / Gst.SECOND
        asset_uri = asset.get_id()
        proxy_uri = self.get_proxy_uri(asset, scaled=scaled)

        if Gio.File.new_for_uri(proxy_uri).query_exists(None):
            self.debug("Using proxy already generated: %s", proxy_uri)
            GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                    self.__asset_loaded_cb, asset, None)
            return

        self.debug(
            "Creating a proxy for %s (strategy: %s, force: %s, scaled: %s)",
            asset.get_id(), self.app.settings.proxying_strategy,
            asset.force_proxying, scaled)

        width = None
        height = None
        if scaled:
            project = self.app.project_manager.current_project
            w = project.scaled_proxy_width
            h = project.scaled_proxy_height
            if not project.has_scaled_proxy_size():
                project.scaled_proxy_width = w
                project.scaled_proxy_height = h
            width, height = self._scale_asset_resolution(asset, w, h)
        enc_profile = self.__get_encoding_profile(self.__encoding_target_file,
                                                  asset, width, height)

        if HAS_GST_1_19:
            transcoder = GstTranscoder.Transcoder.new_full(
                asset_uri, proxy_uri + ProxyManager.part_suffix, enc_profile)
            signals_emitter = transcoder.get_signal_adapter(None)
        else:
            dispatcher = GstTranscoder.TranscoderGMainContextSignalDispatcher.new(
            )
            signals_emitter = transcoder = GstTranscoder.Transcoder.new_full(
                asset_uri, proxy_uri + ProxyManager.part_suffix, enc_profile,
                dispatcher)

        if shadow:
            # Used to identify shadow transcoder
            transcoder.props.position_update_interval = 1001
        else:
            transcoder.props.position_update_interval = 1000

        info = asset.get_info()
        if info.get_video_streams():
            thumbnailbin = Gst.ElementFactory.make("teedthumbnailbin")
            thumbnailbin.props.uri = asset.get_id()
            transcoder.props.pipeline.props.video_filter = thumbnailbin

        if info.get_audio_streams():
            waveformbin = Gst.ElementFactory.make("waveformbin")
            waveformbin.props.uri = asset.get_id()
            waveformbin.props.duration = asset.get_duration()
            transcoder.props.pipeline.props.audio_filter = waveformbin

        transcoder.set_cpu_usage(self.app.settings.max_cpu_usage)
        signals_emitter.connect("position-updated",
                                self.__proxying_position_changed_cb, asset,
                                transcoder)

        signals_emitter.connect("done", self.__transcoder_done_cb, asset,
                                transcoder)
        signals_emitter.connect("error", self.__transcoder_error_cb, asset,
                                transcoder)

        if len(self.__running_transcoders
               ) < self.app.settings.num_transcoding_jobs:
            self.__start_transcoder(transcoder)
        else:
            self.__pending_transcoders.append(transcoder)

    def cancel_job(self, asset):
        """Cancels the transcoding job for the specified asset, if any.

        Args:
            asset (GES.Asset): The original asset.
        """
        if not self.is_asset_queued(asset):
            return

        for transcoder in self.__running_transcoders:
            if asset.props.id == transcoder.props.src_uri:
                self.info("Cancelling running transcoder %s %s",
                          transcoder.props.src_uri, transcoder.__grefcount__)
                self.__running_transcoders.remove(transcoder)
                self.emit("asset-preparing-cancelled", asset)

        for transcoder in self.__pending_transcoders:
            if asset.props.id == transcoder.props.src_uri:
                self.info("Cancelling pending transcoder %s",
                          transcoder.props.src_uri)
                # Removing the transcoder from the list
                # will lead to its destruction (only reference)
                # here, which means it will be stopped.
                self.__pending_transcoders.remove(transcoder)
                self.emit("asset-preparing-cancelled", asset)

    def add_job(self, asset, scaled=False, shadow=False):
        """Adds a transcoding job for the specified asset if needed.

        Args:
            asset (GES.Asset): The asset to be transcoded.
            scaled (Optional[bool]): Whether to create a scaled proxy instead
                of a high-quality proxy.
            shadow (Optional[bool]): Whether to create a high-quality proxy
                to shadow a scaled proxy.
        """
        force_proxying = asset.force_proxying
        video_streams = asset.get_info().get_video_streams()
        if video_streams:
            # Handle Automatic scaling
            if self.app.settings.auto_scaling_enabled and not force_proxying \
                    and not shadow and not self.asset_matches_target_res(asset):
                scaled = True

            # Create shadow proxies for unsupported assets
            if not self.is_asset_format_well_supported(asset) and not \
                    self.app.settings.proxying_strategy == ProxyingStrategy.NOTHING \
                    and not shadow and scaled:
                hq_uri = self.app.proxy_manager.get_proxy_uri(asset)
                if not Gio.File.new_for_uri(hq_uri).query_exists(None):
                    self.add_job(asset, shadow=True)
        else:
            # Scaled proxy is not for audio assets
            scaled = False

        if self.is_asset_queued(asset, scaling=scaled,
                                optimisation=not scaled):
            self.log("Asset %s already queued for %s", asset,
                     "scaling" if scaled else "optimization")
            return

        if not force_proxying:
            if not self.__asset_needs_transcoding(asset, scaled):
                self.debug("Not proxying asset (proxying disabled: %s)",
                           self.proxying_unsupported)
                # Make sure to notify we do not need a proxy for that asset.
                self.emit("proxy-ready", asset, None)
                return

        self.__create_transcoder(asset, scaled=scaled, shadow=shadow)
Exemplo n.º 23
0
    def run(self, xid, output_path, device_config=None, bitrate=350 << 3 << 10):
        '''
        Draw video source to window with the specified `xid` and record the
        video to the specified output file path.

        __NB__ The output file container is determined based on the extension
        of the output file path.  Supported containers are `avi` and `mp4`.  In
        either case, the video is encoded in MPEG4 format.

        Arguments
        ---------

         - `xid`: Integer identifier of window to draw frames to.
         - `output_path`: Output file path.
         - `device_config`:
           * Configuration dictionary or a `pandas.Series` in the format of a
             row of a frame returned by `caps.get_device_configs()`.
           * If not provided, the GStreamer `autovideosrc` is used.
         - `bitrate`: Target encode bit rate in bits/second (default=350kB/s)
        '''
        self.xid = xid
        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        # Create GStreamer elements
        if device_config is None:
            self.src = Gst.ElementFactory.make('autovideosrc', 'source')
        else:
            self.src = get_video_source()
            device_key = get_video_device_key()
            self.src.set_property(device_key, device_config['device'])
        self.sink = Gst.ElementFactory.make('autovideosink', 'sink')
        self.sink.set_property('sync', False)

        self.filter_ = Gst.ElementFactory.make('capsfilter', 'filter')
        tee = Gst.ElementFactory.make('tee', None)

        sink_queue = Gst.ElementFactory.make('queue', None)
        capture_queue = Gst.ElementFactory.make('queue', None)
        encoder = Gst.ElementFactory.make('avenc_mpeg4', None)
        encoder.set_property('bitrate', bitrate)
        encoder.set_property('bitrate-tolerance', 500 << 10)
        if path(output_path).ext.lower() == '.mp4':
            muxer = Gst.ElementFactory.make('mp4mux', None)
        elif path(output_path).ext.lower() == '.avi':
            muxer = Gst.ElementFactory.make('avimux', None)
        else:
            raise ValueError('Unsupported output file type: %s' %
                             path(output_path).ext)
        filesink = Gst.ElementFactory.make('filesink', None)
        filesink.set_property('location', output_path)

        videorate = Gst.ElementFactory.make('videorate', None)
        filter1 = Gst.ElementFactory.make('capsfilter', None)
        filter1.set_property('caps',
                             Gst.Caps('video/x-raw,framerate={framerate_numerator}/{framerate_denominator}'
                                      .format(**device_config)))

        caps = Gst.Caps(get_caps_str(device_config))
        self.filter_.set_property('caps', caps)

        src_elements = (self.src, self.filter_, videorate, filter1, tee)
        sink_elements = (sink_queue, self.sink)
        capture_elements = (capture_queue, encoder, muxer, filesink)

        # Add elements to the pipeline
        for d in src_elements + sink_elements + capture_elements:
            self.pipeline.add(d)

        for elements in (src_elements, sink_elements, capture_elements):
            for i, j in zip(elements[:-1], elements[1:]):
                i.link(j)

        tee.link(sink_elements[0])
        tee.link(capture_elements[0])


        self.output_path = output_path
        self.tee = tee
        self.muxer = muxer
        self.src_elements = src_elements
        self.sink_elements = sink_elements
        self.capture_elements = capture_elements

        self.pipeline.set_state(Gst.State.PLAYING)
        self._alive = True
    def __init__(self):
        def onPad(obj, pad, target):
            """

            :param obj: GstDecodeBin
            :param target: GstX264Enc
            :type pad: GstDecodePad
            """
            print("Received new pad '%s' from '%s':" %
                  (pad.get_name(), obj.get_name()))
            if pad.is_linked():
                print("We are already linked. Ignoring.")
                return True
            ret = pad.link(target.get_static_pad("sink"))
            return True

        self.pipeline = Gst.Pipeline("mypipeline")

        for key, i in enumerate(range(3)):
            #  video elements
            self.src = Gst.ElementFactory.make("videotestsrc",
                                               "source" + i.__str__())
            self.src.set_property("pattern", i)
            self.decodebin = Gst.ElementFactory.make("decodebin")
            self.encoder = Gst.ElementFactory.make("x264enc")
            self.rtp_payload = Gst.ElementFactory.make("rtph264pay")
            self.rtpbin = Gst.ElementFactory.make('rtpbin',
                                                  'rtpbin' + i.__str__())
            self.udpsink = Gst.ElementFactory.make("udpsink")
            self.udpsink.set_property("host", "127.0.0.1")
            self.udpsink.set_property("port", 5011)
            self.udpsrc = Gst.ElementFactory.make("udpsrc")
            self.udpsrc.set_property("port", 5013)
            print(self.get_ip(5043))
            caps = Gst.Caps(
                "application/x-rtp, width=640, height=480, framerate=20/1")
            self.udpsrc.set_property("caps", caps)

            if not self.pipeline or not self.src or not self.decodebin or not self.encoder or not self.rtp_payload \
                    or not self.rtpbin or not self.udpsink or not self.udpsrc:
                print("One of the elements wasn't create... Exiting\n")
                exit(-1)
            self.pipeline.add(self.src, self.decodebin, self.encoder,
                              self.rtp_payload, self.rtpbin, self.udpsink,
                              self.udpsrc)

            # video linking
            self.src.link(self.decodebin)
            self.decodebin.connect("pad-added", onPad, self.encoder)
            self.encoder.link(self.rtp_payload)
            self.rtp_payload.link_pads('src', self.rtpbin, 'send_rtp_sink_0')
            self.rtpbin.link_pads('send_rtp_src_0', self.udpsink, 'sink')
            self.udpsrc.link_pads('src', self.rtpbin, 'recv_rtcp_sink_0')

        ret = self.pipeline.set_state(Gst.State.PLAYING)
        if ret == Gst.StateChangeReturn.FAILURE:
            print("Unable to set the pipeline to the playing state.",
                  file=sys.stderr)
            exit(-1)

        bus = self.pipeline.get_bus()

        # Parse message
        while True:
            message = bus.timed_pop_filtered(
                Gst.CLOCK_TIME_NONE, Gst.MessageType.STATE_CHANGED
                | Gst.MessageType.ERROR | Gst.MessageType.EOS)
            if message.type == Gst.MessageType.ERROR:
                err, debug = message.parse_error()
                print("Error received from element %s: %s" %
                      (message.src.get_name(), err),
                      file=sys.stderr)
                print("Debugging information: %s" % debug, file=sys.stderr)
                break
            elif message.type == Gst.MessageType.EOS:
                print("End-Of-Stream reached.")
                break
            elif message.type == Gst.MessageType.STATE_CHANGED:
                if isinstance(message.src, Gst.Pipeline):
                    old_state, new_state, pending_state = message.parse_state_changed(
                    )
                    print("Pipeline state changed from %s to %s." %
                          (old_state.value_nick, new_state.value_nick))
            else:
                print("Unexpected message received.", file=sys.stderr)
        # Free resources
        self.pipeline.set_state(Gst.State.NULL)
Exemplo n.º 25
0
class ProxyManager(GObject.Object, Loggable):
    """Transcodes assets and manages proxies."""

    __gsignals__ = {
        "progress": (GObject.SignalFlags.RUN_LAST, None, (object, int, int)),
        "proxy-ready": (GObject.SignalFlags.RUN_LAST, None, (object, object)),
        "asset-preparing-cancelled":
        (GObject.SignalFlags.RUN_LAST, None, (object, )),
        "error-preparing-asset":
        (GObject.SignalFlags.RUN_LAST, None, (object, object, object)),
    }

    WHITELIST_CONTAINER_CAPS = [
        "video/quicktime", "application/ogg", "video/x-matroska", "video/webm"
    ]
    WHITELIST_AUDIO_CAPS = [
        "audio/mpeg", "audio/x-vorbis", "audio/x-raw", "audio/x-flac",
        "audio/x-wav"
    ]
    WHITELIST_VIDEO_CAPS = [
        "video/x-h264", "image/jpeg", "video/x-raw", "video/x-vp8",
        "video/x-theora"
    ]

    WHITELIST_FORMATS = []
    for container in WHITELIST_CONTAINER_CAPS:
        for audio in WHITELIST_AUDIO_CAPS:
            for video in WHITELIST_VIDEO_CAPS:
                WHITELIST_FORMATS.append(
                    createEncodingProfileSimple(container, audio, video))

    for audio in WHITELIST_AUDIO_CAPS:
        a = GstPbutils.EncodingAudioProfile.new(Gst.Caps(audio), None, None, 0)
        WHITELIST_FORMATS.append(a)

    proxy_extension = "proxy.mkv"

    def __init__(self, app):
        GObject.Object.__init__(self)
        Loggable.__init__(self)

        self.app = app
        # Total time to transcode in seconds.
        self._total_time_to_transcode = 0
        # Transcoded time per asset in seconds.
        self._transcoded_durations = {}
        self._start_proxying_time = 0
        self.__running_transcoders = []
        self.__pending_transcoders = []

        self.__encoding_target_file = None
        self.proxyingUnsupported = False
        for encoding_format in [ENCODING_FORMAT_JPEG, ENCODING_FORMAT_PRORES]:
            self.__encoding_profile = self.__getEncodingProfile(
                encoding_format)
            if self.__encoding_profile:
                self.__encoding_target_file = encoding_format
                self.info("Using %s as proxying format", encoding_format)
                break

        if not self.__encoding_profile:
            self.proxyingUnsupported = True

            self.error("Not supporting any proxy formats!")
            return

    def _assetMatchesEncodingFormat(self, asset, encoding_profile):
        def capsMatch(info, profile):
            return not info.get_caps().intersect(
                profile.get_format()).is_empty()

        info = asset.get_info()
        if isinstance(encoding_profile, GstPbutils.EncodingAudioProfile):
            if isinstance(info.get_stream_info(),
                          GstPbutils.DiscovererContainerInfo):
                return False
            audios = info.get_audio_streams()
            if len(audios) != 1 or not capsMatch(audios[0], encoding_profile):
                return False
            if info.get_video_streams():
                return False
            return True

        container = info.get_stream_info()
        if container:
            if not capsMatch(container, encoding_profile):
                return False

        for profile in encoding_profile.get_profiles():
            if isinstance(profile, GstPbutils.EncodingAudioProfile):
                audios = info.get_audio_streams()
                for audio_stream in audios:
                    if not capsMatch(audio_stream, profile):
                        return False
            elif isinstance(profile, GstPbutils.EncodingVideoProfile):
                videos = info.get_video_streams()
                for video_stream in videos:
                    if not capsMatch(video_stream, profile):
                        return False
        return True

    def __getEncodingProfile(self, encoding_target_file, asset=None):
        encoding_target = GstPbutils.EncodingTarget.load_from_file(
            os.path.join(get_gstpresets_dir(), encoding_target_file))
        encoding_profile = encoding_target.get_profile("default")

        if not encoding_profile:
            return None

        for profile in encoding_profile.get_profiles():
            profile_format = profile.get_format()
            # Do not verify we have an encoder/decoder for raw audio/video,
            # as they are not required.
            if profile_format.intersect(Gst.Caps("audio/x-raw(ANY)")) or \
                    profile_format.intersect(Gst.Caps("audio/x-video(ANY)")):
                continue
            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_ENCODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SRC, False):
                return None
            if not Gst.ElementFactory.list_filter(
                    Gst.ElementFactory.list_get_elements(
                        Gst.ELEMENT_FACTORY_TYPE_DECODER, Gst.Rank.MARGINAL),
                    profile_format, Gst.PadDirection.SINK, False):
                return None

        if asset:
            # If we have an asset, we force audioconvert to keep
            # the number of channels
            # TODO: remove once https://bugzilla.gnome.org/show_bug.cgi?id=767226
            # is fixed
            info = asset.get_info()
            try:
                # TODO Be smarter about multiple streams
                audio_stream = info.get_audio_streams()[0]
                channels = audio_stream.get_channels()
                audio_profile = [
                    profile for profile in encoding_profile.get_profiles()
                    if isinstance(profile, GstPbutils.EncodingAudioProfile)
                ][0]
                audio_profile.set_restriction(
                    Gst.Caps.from_string("audio/x-raw,channels=%d" % channels))
            except IndexError:
                pass

        return encoding_profile

    @classmethod
    def is_proxy_asset(cls, obj):
        if isinstance(obj, GES.Asset):
            uri = obj.props.id
        else:
            uri = obj

        return uri.endswith("." + cls.proxy_extension)

    def checkProxyLoadingSucceeded(self, proxy):
        if self.is_proxy_asset(proxy):
            return True

        self.emit("error-preparing-asset", None, proxy, proxy.get_error())
        return False

    def getTargetUri(self, proxy_asset):
        return ".".join(proxy_asset.props.id.split(".")[:-3])

    def getProxyUri(self, asset):
        """Returns the URI of a possible proxy file.

        The name looks like:
            <filename>.<file_size>.<proxy_extension>
        """
        asset_file = Gio.File.new_for_uri(asset.get_id())
        try:
            file_size = asset_file.query_info(Gio.FILE_ATTRIBUTE_STANDARD_SIZE,
                                              Gio.FileQueryInfoFlags.NONE,
                                              None).get_size()
        except GLib.Error as err:
            if err.matches(Gio.io_error_quark(), Gio.IOErrorEnum.NOT_FOUND):
                return None
            else:
                raise

        return "%s.%s.%s" % (asset.get_id(), file_size, self.proxy_extension)

    def isAssetFormatWellSupported(self, asset):
        for encoding_format in self.WHITELIST_FORMATS:
            if self._assetMatchesEncodingFormat(asset, encoding_format):
                self.info("Automatically not proxying")
                return True

        return False

    def __assetNeedsTranscoding(self, asset):
        if self.proxyingUnsupported:
            self.info("No proxying supported")
            return False

        if asset.is_image():
            return False

        if self.app.settings.proxyingStrategy == ProxyingStrategy.NOTHING:
            self.debug("Not proxying anything. %s",
                       self.app.settings.proxyingStrategy)
            return False

        if self.app.settings.proxyingStrategy == ProxyingStrategy.AUTOMATIC \
                and not self.is_proxy_asset(asset) and \
                self.isAssetFormatWellSupported(asset):
            return False

        if not self._assetMatchesEncodingFormat(asset,
                                                self.__encoding_profile):
            return True

        self.info("%s does not need proxy", asset.get_id())
        return False

    def __startTranscoder(self, transcoder):
        self.debug("Starting %s", transcoder.props.src_uri)
        if self._start_proxying_time == 0:
            self._start_proxying_time = time.time()
        transcoder.run_async()
        self.__running_transcoders.append(transcoder)

    def __assetsMatch(self, asset, proxy):
        if self.__assetNeedsTranscoding(proxy):
            return False

        info = asset.get_info()
        if info.get_duration() != asset.get_duration():
            return False

        return True

    def __assetLoadedCb(self, proxy, res, asset, transcoder):
        try:
            GES.Asset.request_finish(res)
        except GLib.Error as e:
            if transcoder:
                self.emit("error-preparing-asset", asset, proxy, e)
                del transcoder
            else:
                self.__createTranscoder(asset)

            return

        if not transcoder:
            if not self.__assetsMatch(asset, proxy):
                return self.__createTranscoder(asset)
        else:
            transcoder.props.pipeline.props.video_filter.finalize(proxy)
            transcoder.props.pipeline.props.audio_filter.finalize(proxy)

            del transcoder

        if asset.get_info().get_duration() != proxy.get_info().get_duration():
            self.error(
                "Asset %s (duration=%s) and created proxy %s (duration=%s) do not"
                " have the same duration this should *never* happen, please file"
                " a bug with the media files." %
                (asset.get_id(), Gst.TIME_ARGS(
                    asset.get_info().get_duration()), proxy.get_id(),
                 Gst.TIME_ARGS(proxy.get_info().get_duration())))

        self.emit("proxy-ready", asset, proxy)
        self.__emitProgress(proxy, 100)

    def __transcoderErrorCb(self, transcoder, error, unused_details, asset):
        self.emit("error-preparing-asset", asset, None, error)

    def __transcoderDoneCb(self, transcoder, asset):
        transcoder.disconnect_by_func(self.__transcoderDoneCb)
        transcoder.disconnect_by_func(self.__transcoderErrorCb)
        transcoder.disconnect_by_func(self.__proxyingPositionChangedCb)

        self.debug("Transcoder done with %s", asset.get_id())

        self.__running_transcoders.remove(transcoder)

        proxy_uri = self.getProxyUri(asset)
        os.rename(Gst.uri_get_location(transcoder.props.dest_uri),
                  Gst.uri_get_location(proxy_uri))

        # Make sure that if it first failed loading, the proxy is forced to be
        # reloaded in the GES cache.
        GES.Asset.needs_reload(GES.UriClip, proxy_uri)
        GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                self.__assetLoadedCb, asset, transcoder)

        try:
            self.__startTranscoder(self.__pending_transcoders.pop())
        except IndexError:
            if not self.__running_transcoders:
                self._transcoded_durations = {}
                self._total_time_to_transcode = 0
                self._start_proxying_time = 0

    def __emitProgress(self, asset, creation_progress):
        """Handles the transcoding progress of the specified asset."""
        if self._transcoded_durations:
            time_spent = time.time() - self._start_proxying_time
            transcoded_seconds = sum(self._transcoded_durations.values())
            remaining_seconds = max(
                0, self._total_time_to_transcode - transcoded_seconds)
            estimated_time = remaining_seconds * time_spent / transcoded_seconds
        else:
            estimated_time = 0

        asset.creation_progress = creation_progress
        self.emit("progress", asset, asset.creation_progress, estimated_time)

    def __proxyingPositionChangedCb(self, transcoder, position, asset):
        if transcoder not in self.__running_transcoders:
            self.info("Position changed after job cancelled!")
            return

        self._transcoded_durations[asset] = position / Gst.SECOND

        duration = transcoder.props.duration
        if duration <= 0 or duration == Gst.CLOCK_TIME_NONE:
            duration = asset.props.duration
        if duration > 0 and duration != Gst.CLOCK_TIME_NONE:
            creation_progress = 100 * position / duration
            # Do not set to >= 100 as we need to notify about the proxy first.
            asset.creation_progress = max(0, min(creation_progress, 99))

        self.__emitProgress(asset, asset.creation_progress)

    def is_asset_queued(self, asset):
        """Returns whether the specified asset is queued for transcoding.

        Args:
            asset (GES.Asset): The asset to check.

        Returns:
            bool: True iff the asset is being transcoded or pending.
        """
        all_transcoders = self.__running_transcoders + self.__pending_transcoders
        for transcoder in all_transcoders:
            if asset.props.id == transcoder.props.src_uri:
                return True

        return False

    def __createTranscoder(self, asset):
        self._total_time_to_transcode += asset.get_duration() / Gst.SECOND
        asset_uri = asset.get_id()
        proxy_uri = self.getProxyUri(asset)

        dispatcher = GstTranscoder.TranscoderGMainContextSignalDispatcher.new()
        encoding_profile = self.__getEncodingProfile(
            self.__encoding_target_file, asset)
        transcoder = GstTranscoder.Transcoder.new_full(asset_uri,
                                                       proxy_uri + ".part",
                                                       encoding_profile,
                                                       dispatcher)
        transcoder.props.position_update_interval = 1000

        thumbnailbin = Gst.ElementFactory.make("teedthumbnailbin")
        thumbnailbin.props.uri = asset.get_id()

        waveformbin = Gst.ElementFactory.make("waveformbin")
        waveformbin.props.uri = asset.get_id()
        waveformbin.props.duration = asset.get_duration()

        transcoder.props.pipeline.props.video_filter = thumbnailbin
        transcoder.props.pipeline.props.audio_filter = waveformbin

        transcoder.set_cpu_usage(self.app.settings.max_cpu_usage)
        transcoder.connect("position-updated",
                           self.__proxyingPositionChangedCb, asset)

        transcoder.connect("done", self.__transcoderDoneCb, asset)
        transcoder.connect("error", self.__transcoderErrorCb, asset)
        if len(self.__running_transcoders
               ) < self.app.settings.numTranscodingJobs:
            self.__startTranscoder(transcoder)
        else:
            self.__pending_transcoders.append(transcoder)

    def cancel_job(self, asset):
        """Cancels the transcoding job for the specified asset, if any.

        Args:
            asset (GES.Asset): The original asset.
        """
        if not self.is_asset_queued(asset):
            return

        for transcoder in self.__running_transcoders:
            if asset.props.id == transcoder.props.src_uri:
                self.info("Cancelling running transcoder %s %s",
                          transcoder.props.src_uri, transcoder.__grefcount__)
                self.__running_transcoders.remove(transcoder)
                self.emit("asset-preparing-cancelled", asset)
                return

        for transcoder in self.__pending_transcoders:
            if asset.props.id == transcoder.props.src_uri:
                self.info("Cancelling pending transcoder %s",
                          transcoder.props.src_uri)
                # Removing the transcoder from the list
                # will lead to its destruction (only reference)
                # here, which means it will be stopped.
                self.__pending_transcoders.remove(transcoder)
                self.emit("asset-preparing-cancelled", asset)
                return

    def add_job(self, asset):
        """Adds a transcoding job for the specified asset if needed.

        Args:
            asset (GES.Asset): The asset to be transcoded.
        """
        if self.is_asset_queued(asset):
            self.log("Asset already queued for proxying: %s", asset)
            return

        force_proxying = asset.force_proxying
        if not force_proxying and not self.__assetNeedsTranscoding(asset):
            self.debug("Not proxying asset (proxying disabled: %s)",
                       self.proxyingUnsupported)
            # Make sure to notify we do not need a proxy for that asset.
            self.emit("proxy-ready", asset, None)
            return

        proxy_uri = self.getProxyUri(asset)
        if Gio.File.new_for_uri(proxy_uri).query_exists(None):
            self.debug("Using proxy already generated: %s", proxy_uri)
            GES.Asset.request_async(GES.UriClip, proxy_uri, None,
                                    self.__assetLoadedCb, asset, None)
            return

        self.debug("Creating a proxy for %s (strategy: %s, force: %s)",
                   asset.get_id(), self.app.settings.proxyingStrategy,
                   force_proxying)
        self.__createTranscoder(asset)
        return
Exemplo n.º 26
0
def main(args):
    if len(args) != 2:
        sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
        sys.exit(1)

    GObject.threads_init()
    Gst.init(args)
    # Gst.debug_set_default_threshold(Gst.DebugLevel.INFO)
    loop = GObject.MainLoop()

    pipeline = Gst.Pipeline.new("dstest1-pipeline")

    source = Gst.ElementFactory.make("filesrc", "file-source")
    h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
    decoder = Gst.ElementFactory.make("nvdec_h264", "nvh264-decoder")

    pgie = Gst.ElementFactory.make("nvinfer", "primary-nvinference-engine")
    nvvidconv = Gst.ElementFactory.make("nvvidconv", "nvvideo-converter")

    nvosd = Gst.ElementFactory.make("nvosd", "nv-onscreendisplay")
    sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")

    filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
    filter2 = Gst.ElementFactory.make("capsfilter", "filter2")

    if (not pipeline) or (not source) or (not h264parser) \
        or (not decoder) or (not pgie) or (not nvvidconv) \
        or (not nvosd) or (not sink) or (not filter1) or (not filter2):
        sys.stderr.write("One element could not be created. Exiting.\n")
        sys.exit(1)

    source.set_property("location", args[1])
    pgie.set_property("config-file-path", "demo_pgie_config.txt")
    nvosd.set_property("font-size", 15)

    cap1 = Gst.Caps("video/x-raw(memory:NVMM), format=NV12")
    filter1.set_property("caps", cap1)
    cap2 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
    filter2.set_property("caps", cap2)

    add_many(pipeline, source, h264parser, decoder, pgie, filter1, nvvidconv,
             filter2, nvosd, sink)
    link_many(source, h264parser, decoder, pgie, filter1, nvvidconv, filter2,
              nvosd, sink)

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.enable_sync_message_emission()
    bus.connect("message", bus_call, loop, pipeline)

    osd_sink_pad = nvosd.get_static_pad("sink")
    if not osd_sink_pad:
        sys.stdout.write("Unable to get sink nvosd pad!\n")
    else:
        sys.stdout.write("Get sink nvosd pad!\n")
        osd_probe_id = osd_sink_pad.add_probe(Gst.PadProbeType.BUFFER,
                                              osd_sink_pad_buffer_probe)

    sys.stdout.write("Now playing: %s\n" % args[1])
    ret = pipeline.set_state(Gst.State.PLAYING)
    if ret == Gst.StateChangeReturn.FAILURE:
        sys.stdout.write(
            "ERROR: Unable to set the pipeline to the playing state\n")
        sys.exit(1)
    try:
        sys.stdout.write("Running...\n")
        loop.run()
    except:
        sys.stdout.write("something happened\n")
        pass
    sys.stdout.write("Returned, stopping playback\n")
    pipeline.set_state(Gst.State.NULL)
Exemplo n.º 27
0
    def main(self):
        Gst.init(None)

        def onPad(obj, pad, target):
            """
            :param obj: GstDecodeBin
            :param target: GstX264Enc
            :type pad: GstDecodePad
            """
            print("Received new pad '%s' from '%s':" %
                  (pad.get_name(), obj.get_name()))
            if pad.is_linked():
                print("We are already linked. Ignoring.")
                return True
            pad.link(target.get_static_pad("sink"))
            return True

        self.pipeline = Gst.Pipeline("mypipeline")
        if self.sink_ip:
            for key, i in enumerate(self.sink_ip):
                print(i, key)
                #  video elements
                src = Gst.ElementFactory.make("udpsrc",
                                              "source" + key.__str__())
                src.set_property("port", 8880)
                caps = Gst.Caps(
                    "application/x-rtp, width=640, height=480, framerate=20/1")
                src.set_property("caps", caps)
                decodebin = Gst.ElementFactory.make("decodebin")
                encoder = Gst.ElementFactory.make("vp8enc")
                dencoder = Gst.ElementFactory.make("vp8dec")
                videoconvert = Gst.ElementFactory.make("videoconvert")
                rtp_payload = Gst.ElementFactory.make("rtpvp8depay")
                rtp_payload1 = Gst.ElementFactory.make(
                    "rtpvp8pay", 'rtpvp8pay' + key.__str__())
                rtpbin = Gst.ElementFactory.make('rtpbin',
                                                 'rtpbin' + key.__str__())
                udpsink = Gst.ElementFactory.make("udpsink",
                                                  'sink' + key.__str__())
                udpsink.set_property("host", i)
                udpsink.set_property("port", 6000)
                udpsrc = Gst.ElementFactory.make("udpsrc")
                udpsrc.set_property("port", 5013)
                caps = Gst.Caps(
                    "application/x-rtp, width=640, height=480, framerate=20/1")
                udpsrc.set_property("caps", caps)
                sink = Gst.ElementFactory.make('autovideosink')

                if not self.pipeline or not src or not rtp_payload or not dencoder or not videoconvert or not decodebin \
                        or not encoder or not rtp_payload1 or not rtpbin or not udpsink:
                    print("One of the elements wasn't create... Exiting\n")
                    exit(-1)
                self.pipeline.add(src, rtp_payload, dencoder, videoconvert,
                                  decodebin, encoder, rtp_payload1, rtpbin,
                                  udpsink)

                # video linking
                src.link(rtp_payload)
                rtp_payload.link(dencoder)
                dencoder.link(videoconvert)
                videoconvert.link(decodebin)
                decodebin.connect("pad-added", onPad, encoder)
                encoder.link(rtp_payload1)
                rtp_payload1.link_pads('src', rtpbin, 'send_rtp_sink_0')
                rtpbin.link_pads('send_rtp_src_0', udpsink, 'sink')
                # udpsrc.link_pads('src', rtpbin, 'recv_rtcp_sink_0')

            ret = self.pipeline.set_state(Gst.State.PLAYING)
            if ret == Gst.StateChangeReturn.FAILURE:
                print("Unable to set the pipeline to the playing state.",
                      file=sys.stderr)
                exit(-1)

            bus = self.pipeline.get_bus()

            # Parse message
            while True:
                message = bus.timed_pop_filtered(
                    Gst.CLOCK_TIME_NONE, Gst.MessageType.STATE_CHANGED
                    | Gst.MessageType.ERROR | Gst.MessageType.EOS)
                if message.type == Gst.MessageType.ERROR:
                    err, debug = message.parse_error()
                    print("Error received from element %s: %s" %
                          (message.src.get_name(), err),
                          file=sys.stderr)
                    print("Debugging information: %s" % debug, file=sys.stderr)
                    break
                elif message.type == Gst.MessageType.EOS:
                    print("End-Of-Stream reached.")
                    break
                elif message.type == Gst.MessageType.STATE_CHANGED:
                    if isinstance(message.src, Gst.Pipeline):
                        old_state, new_state, pending_state = message.parse_state_changed(
                        )
                        print("Pipeline state changed from %s to %s." %
                              (old_state.value_nick, new_state.value_nick))
                else:
                    print("Unexpected message received.", file=sys.stderr)
            # Free resources
            self.pipeline.set_state(Gst.State.NULL)
Exemplo n.º 28
0
gi.require_version("GstBase", "1.0")

import os
import cv2
import numpy as np
import msgpack
import struct
from gi.repository import Gst, GObject, GstBase, GLib
from typing import List

Gst.init(None)

ICAPS = Gst.Caps(
    Gst.Structure(
        "application/msgpack-predicts"
    )
)

OCAPS = Gst.Caps(
    Gst.Structure(
        "application/meter",
    )
)


def read_mask():
    mask = cv2.imread(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        "..",
        "resource",
Exemplo n.º 29
0
    from numpy_ringbuffer import RingBuffer
    from matplotlib import pyplot as plt
    from matplotlib.backends.backend_agg import FigureCanvasAgg
except ImportError:
    Gst.error('audioplot requires numpy, numpy_ringbuffer and matplotlib')
    raise


Gst.init(None)

AUDIO_FORMATS = [f.strip() for f in
                 GstAudio.AUDIO_FORMATS_ALL.strip('{ }').split(',')]

ICAPS = Gst.Caps(Gst.Structure('audio/x-raw',
                               format=Gst.ValueList(AUDIO_FORMATS),
                               layout='interleaved',
                               rate = Gst.IntRange(range(1, GLib.MAXINT)),
                               channels = Gst.IntRange(range(1, GLib.MAXINT))))

OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
                               format='ARGB',
                               width=Gst.IntRange(range(1, GLib.MAXINT)),
                               height=Gst.IntRange(range(1, GLib.MAXINT)),
                               framerate=Gst.FractionRange(Gst.Fraction(1, 1),
                                                           Gst.Fraction(GLib.MAXINT, 1))))

DEFAULT_WINDOW_DURATION = 1.0
DEFAULT_WIDTH = 640
DEFAULT_HEIGHT = 480
DEFAULT_FRAMERATE_NUM = 25
DEFAULT_FRAMERATE_DENOM = 1
Exemplo n.º 30
0
    def test_fixate_caps_with_defalt_values(self):
        voaacenc_caps = Gst.Caps.from_string(
            "audio/x-raw, format=(string)S16LE, layout=(string)interleaved, rate=(int){ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000 }, channels=(int)1;"
            "audio/x-raw, format=(string)S16LE, layout=(string)interleaved, rate=(int){ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000 }, channels=(int)2, channel-mask=(bitmask)0x0000000000000003"
        )
        yt_audiorest = Gst.Caps(
            "audio/x-raw,channels=6,channel-mask=0x3f,rate={48000,96000};"
            "audio/x-raw,channels=2,rate={48000,96000}")

        vorbis_caps = Gst.Caps(
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)1;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)2, channel-mask=(bitmask)0x0000000000000003;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)3, channel-mask=(bitmask)0x0000000000000007;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)4, channel-mask=(bitmask)0x0000000000000033;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)5, channel-mask=(bitmask)0x0000000000000037;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)6, channel-mask=(bitmask)0x000000000000003f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)7, channel-mask=(bitmask)0x0000000000000d0f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)8, channel-mask=(bitmask)0x0000000000000c3f;"
            "audio/x-raw, format=(string)F32LE, layout=(string)interleaved, rate=(int)[ 1, 200000 ], channels=(int)[ 9, 255 ], channel-mask=(bitmask)0x0000000000000000"
        )

        avenc_ac3_caps = Gst.Caps(
            "audio/x-raw, channel-mask=(bitmask)0x0000000000000000, channels=(int)1, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000003, channels=(int)2, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000103, channels=(int)3, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000007, channels=(int)3, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000c03, channels=(int)4, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000033, channels=(int)4, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000107, channels=(int)4, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000c07, channels=(int)5, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000037, channels=(int)5, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000000c, channels=(int)2, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000000b, channels=(int)3, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000010b, channels=(int)4, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000000f, channels=(int)4, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000c0b, channels=(int)5, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000003b, channels=(int)5, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000010f, channels=(int)5, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x0000000000000c0f, channels=(int)6, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
            " audio/x-raw, channel-mask=(bitmask)0x000000000000003f, channels=(int)6, rate=(int){ 48000, 44100, 32000 }, layout=(string)interleaved, format=(string)F32LE;"
        )

        audio_defaults = {
            "channels": Gst.IntRange(range(1, 2147483647)),
            "rate": Gst.IntRange(range(8000, GLib.MAXINT))
        }

        dataset = [
            (voaacenc_caps, yt_audiorest, audio_defaults, None,
             Gst.Caps(
                 "audio/x-raw, channels=2,rate=48000,channel-mask=(bitmask)0x03"
             )),
            (vorbis_caps, None, audio_defaults, None,
             Gst.Caps("audio/x-raw,channels=1,rate=8000")),
            (avenc_ac3_caps, None, audio_defaults,
             Gst.Caps("audio/x-raw, channels=(int)6, rate=(int)44100"),
             Gst.Caps("audio/x-raw, channels=(int)6, rate=(int)44100")),
        ]

        for template, restrictions, default_values, prev_vals, expected in dataset:
            res = fixate_caps_with_default_values(template, restrictions,
                                                  default_values, prev_vals)
            self.assertTrue(res.is_equal_fixed(expected),
                            "%s != %s" % (res, expected))