Пример #1
0
    def run(self) :

        self.show_all()
        self.xid = self.movie_window.get_property('window').get_xid()
        self.player.set_state(Gst.State.NULL)

        if self.startcam == "Start":
            if self.flipcam == True:
                self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! videoflip method=horizontal-flip ! autovideosink")
            else:
                self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! autovideosink")

#             Set up the gstreamer pipeline
#             self.player = Gst.parse_launch("v4l2src ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! video/x-raw-yuv,width=320,height=240,framerate=30/1 ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! video/x-raw-yuv,width=320,height=240,framerate=30/1 ! textoverlay font-desc=\"Sans 20\" text=\"Microsoft LifeCam NX-3000\" valign=top halign=left shaded-background=true ! timeoverlay halign=right valign=bottom font-desc=\"Sans 20\" ! clockoverlay halign=left valign=bottom text=\"\" time-format=\"%d.%m.%Y  %H:%M:%S \" font-desc=\"Sans 20\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! textoverlay valignment=bottom xpad=450 ypad=25 color=4278255360 font-desc=\"Sans 20\" text=\"Microsoft LifeCam NX-3000\" shaded-background=true ! timeoverlay halignment=right color=4278255360 font-desc=\"Sans 20\" ! clockoverlay color=4278255360 text=\"\" time-format=\"%d.%m.%Y  %H:%M:%S \" font-desc=\"Sans 20\" ! autovideosink")
#             self.player = Gst.parse_launch("v4l2src device=\"/dev/video1\" ! image/jpeg,width=640,height=480,framerate=30/1 ! jpegdec ! aspectratiocrop aspect-ratio=16/9 ! videoflip method=horizontal-flip ! autovideosink")
            self.startcam ="Stop"
        else:
            self.player = Gst.parse_launch("videotestsrc ! video/x-raw,width=640,height=480,framerate=30/1 ! aspectratiocrop aspect-ratio=16/9 ! autovideosink")
#             self.player = Gst.parse_launch("videotestsrc ! autovideosink")
            self.startcam = "Start"

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
        self.player.set_state(Gst.State.PLAYING)
Пример #2
0
    def __init__(self, pipeline_string='videotestsrc pattern=18 ! tee name=t ! queue ! autovideosink t. ! queue ! videoconvert ! videorate ! video/x-raw,width=(int)320,height=(int)240,format=(string)RGB16,framerate=(fraction)30/1 ! appsink name=sink'):
        self.data = None# this will contain the data passed between
        self.source_id = None
        self.lock = Lock()

        self.isWhite = True
        self.isStream = True
        self.timestamp = 0

        self.pipeline = Gst.parse_launch(pipeline_string)

        self.appsink = self.pipeline.get_by_name('sink')

        assert self.appsink, 'appsink element named \'sink\' not found'

        self.appsink.connect('new-sample', self.on_new_buffer)
        self.appsink.set_property('emit-signals', True)

        self.pipeline.set_state(Gst.State.PLAYING)

        # OUTPUT pipeline
        self.pipeline_out = Gst.parse_launch('appsrc name=source ! videoconvert ! autovideosink')

        self.appsrc = self.pipeline_out.get_by_name('source')

        assert self.appsrc, 'appsrc element named \'source\' not found'

        self.appsrc.set_property('caps', Gst.Caps.from_string('video/x-raw,format=(string)RGB16,width=(int)320,height=(int)240,framerate=(fraction)30/1'))

        self.appsrc.connect('need-data', self.on_need_data)
        self.appsrc.connect('enough-data', self.on_enough_data)

        self.pipeline_out.set_state(Gst.State.PLAYING)

        GObject.timeout_add_seconds(2, self._switch_data_type)
Пример #3
0
	def makeReference(self, video):
		'''
		Make the reference videos.

		:param string video: Path to the selected video.
		'''
		VTLOG.info('Making reference...')
		self.pipeline = Gst.parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw,framerate=%s/1 ! filesink name=sink1' % self.framerate)
		source = self.pipeline.get_by_name('source')
		sink1 = self.pipeline.get_by_name('sink1')
		self.files['original'].append(video)
		source.props.location = video
		location = self.path + '_ref_original.yuv'
		self.files['original'].append(location)
		sink1.props.location = location
		self.__play()
		self.pipeline = Gst.parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw,framerate=%s/1 ! %s bitrate=%s ! tee name=t ! queue %s ! filesink name=sink2 t. ! queue ! decodebin ! filesink name=sink3' % (
			self.framerate,
			supported_codecs[self.codec]['encoder'],
			supported_codecs[self.codec]['bitrate_from_kbps'](self.bitrate),
			supported_codecs[self.codec]['add']
		))
		source = self.pipeline.get_by_name('source')
		sink2 = self.pipeline.get_by_name('sink2')
		sink3 = self.pipeline.get_by_name('sink3')
		source.props.location = video
		location = self.path + '_ref.' + self.codec
		self.files['coded'].append(location)
		sink2.props.location = location
		location = self.path + '_ref.yuv'
		self.files['coded'].append(location)
		sink3.props.location = location
		self.__play()
		VTLOG.info('Reference made')
Пример #4
0
 def _convert_to_mp3(self, src, dst):
     """
         Convert file to mp3
         @param src as Gio.File
         @param dst as Gio.File
         @return Gst.Pipeline
     """
     try:
         # We need to escape \ in path
         src_path = src.get_path().replace("\\", "\\\\\\")
         dst_path = dst.get_path().replace("\\", "\\\\\\")
         if self._normalize:
             pipeline = Gst.parse_launch(
                                     'filesrc location="%s" ! decodebin\
                                     ! audioconvert\
                                     ! rgvolume pre-amp=6.0 headroom=10.0\
                                     ! rglimiter ! audioconvert\
                                     ! lamemp3enc ! id3v2mux\
                                     ! filesink location="%s"'
                                     % (src_path, dst_path))
         else:
             pipeline = Gst.parse_launch(
                                     'filesrc location="%s" ! decodebin\
                                     ! audioconvert ! lamemp3enc ! id3v2mux\
                                     ! filesink location="%s"'
                                     % (src_path, dst_path))
         pipeline.set_state(Gst.State.PLAYING)
         return pipeline
     except Exception as e:
         print("MtpSync::_convert_to_mp3(): %s" % e)
         return None
def _has_espeak_module():
    try:
        Gst.parse_launch('espeak')
    except GError:
        logging.error('The speech plugin is not installed in the system.')
        return False
    return True
Пример #6
0
    def __init__(self, ip_address, partners_quality):
        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        partners_quality = partners_quality.rstrip()
	if partners_quality.lower() == "h":	
	    self.pipeline = Gst.parse_launch("rtpbin name=rtpbin v4l2src ! videoscale ! videoconvert ! video/x-raw,format=UYVY,width=640,height=480,framerate=30/1 ! videoscale ! videoconvert ! avenc_mpeg4 bitrate=1000000 ! tee name=t ! rtpmp4vpay ! rtpbin.send_rtp_sink_0 t. ! queue ! xvimagesink rtpbin.send_rtp_src_0 ! udpsink name=udpsink_video port=5002 host=" + ip_address + " rtpbin.send_rtcp_src_0 ! udpsink port=5003 host=" + ip_address + " sync=false async=false udpsrc port=5007 ! rtpbin.recv_rtcp_sink_0")
            print "You've sent High Quality data"
	else:
            self.pipeline = Gst.parse_launch("rtpbin name=rtpbin v4l2src ! videoscale ! videoconvert ! video/x-raw,format=UYVY,width=640,height=480,framerate=30/1 ! videoscale ! videoconvert ! avenc_mpeg4 bitrate=150000 ! tee name=t ! rtpmp4vpay ! rtpbin.send_rtp_sink_0 t. ! queue ! xvimagesink rtpbin.send_rtp_src_0 ! udpsink name=udpsink_video port=5002 host=" + ip_address + " rtpbin.send_rtcp_src_0 ! udpsink port=5003 host=" + ip_address + " sync=false async=false udpsrc port=5007 ! rtpbin.recv_rtcp_sink_0")
            print "You've sent Low Quality data"
Пример #7
0
    def _check(self, song):
        old_threshold = Gst.debug_get_default_threshold()
        Gst.debug_set_default_threshold(Gst.DebugLevel.NONE)

        pipeline = Gst.parse_launch(
            "uridecodebin uri=%s ! fakesink" % song("~uri"))
        bus = pipeline.get_bus()
        pipeline.set_state(Gst.State.PLAYING)
        try:
            while 1:
                message = bus.timed_pop(Gst.SECOND * 10)
                if not message or message.type == Gst.MessageType.ERROR:
                    if message:
                        debug = message.parse_error()[0].message
                    else:
                        debug = "timed out"
                    # only print a warning for platforms where we control
                    # the shipped dependencies.
                    if sys.platform == "darwin" or os.name == "nt":
                        print_w("GStreamer: Decoding %r failed (%s)" %
                                (song("~format"), debug))
                    break
                if message.type == Gst.MessageType.EOS:
                    break
        finally:
            pipeline.set_state(Gst.State.NULL)

        Gst.debug_set_default_threshold(old_threshold)
Пример #8
0
    def _create_waveform(self, song, points):
        # Close any existing pipeline to avoid leaks
        self._clean_pipeline()

        if not song.is_file:
            return

        command_template = """
        uridecodebin name=uridec
        ! audioconvert
        ! level name=audiolevel interval={} post-messages=true
        ! fakesink sync=false"""
        interval = int(song("~#length") * 1E9 / points)
        print_d("Computing data for each %.3f seconds" % (interval / 1E9))

        command = command_template.format(interval)
        pipeline = Gst.parse_launch(command)
        pipeline.get_by_name("uridec").set_property("uri", song("~uri"))

        bus = pipeline.get_bus()
        self._bus_id = bus.connect("message", self._on_bus_message)
        bus.add_signal_watch()

        pipeline.set_state(Gst.State.PLAYING)

        self._pipeline = pipeline
        self._new_rms_vals = []
Пример #9
0
    def __updatePipeline(self):
        """Creates / updates the GStreamer pipeline according to the currently set state."""

        print "Updating Pipeline, isReady: {}, isRunning: {}, isRecording: {} ".format(self.isReady, self.isRunning, self.isRecording)
        if self.pl:
            self.pl.set_state(Gst.State.NULL)
            self.pl = []

        pipeString = ""
        if self.isRecording:
            #    pipeString = self.source + \
            #        " ! tee name=t t. ! queue ! videoconvert ! x264enc ! mp4mux ! filesink location=outvid01 async=0 t. ! queue ! autovideosink"
            pipeString = "videotestsrc ! tee name=t t. ! queue ! videoconvert ! x264enc ! mp4mux ! filesink location=outvid01 async=0 t. ! queue ! autovideosink"
        else:
            pipeString = self.source + " ! autovideosink"

        print "\tPipeline String: " + pipeString
        self.pl = Gst.parse_launch(pipeString)

        # intercept sync messages so we can set in which window to draw in
        bus = self.pl.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message::eos", self.__onEos)
        bus.connect("message::error", self.__onError)
        bus.connect("sync-message::element", self.__onSyncMessage)

        if self.isRunning:
            self.pl.set_state(Gst.State.PLAYING)
        else:
            self.pl.set_state(Gst.State.NULL)

        print "\tWinID: " + str(self.targetWin.winId())
Пример #10
0
    def __init__(self, options={}): 
        base.Base.__init__(self, options)
        Gst.Bin.__init__(self)

        aux = pipestr.replace("gc-hauppauge-preview", "sink-" + self.options['name'])

        #bin = Gst.parse_bin_from_description(aux, True)
        bin = Gst.parse_launch("( {} )".format(aux))
        self.add(bin)

        sink = self.get_by_name("gc-hauppauge-device-src")
        sink.set_property("device", self.options["locprevideo"])       

        sink = self.get_by_name("gc-hauppauge-file-src")
        sink.set_property("location", self.options["location"])


        sink = self.get_by_name("gc-hauppauge-audio-src") 
        sink.set_property("location", self.options["locpreaudio"])

        if self.options["player"] == False:
            self.mute = True
            element = self.get_by_name("gc-hauppauge-volume")
            element.set_property("mute", True)
        else:
            self.mute = False

        sink = self.get_by_name("gc-hauppauge-sink")
        sink.set_property('location', path.join(self.options['path'], self.options['file']))

        if self.options["vumeter"] == False:
            level = self.get_by_name("gc-hauppauge-level")
            level.set_property("message", False) 
Пример #11
0
    def _check(self, song):
        old_threshold = Gst.debug_get_default_threshold()
        Gst.debug_set_default_threshold(Gst.DebugLevel.NONE)

        pipeline = Gst.parse_launch(
            "uridecodebin uri=%s ! fakesink" % song("~uri"))
        bus = pipeline.get_bus()
        pipeline.set_state(Gst.State.PLAYING)
        error = None
        try:
            while 1:
                message = bus.timed_pop(Gst.SECOND * 40)
                if not message or message.type == Gst.MessageType.ERROR:
                    if message:
                        error = message.parse_error()[0].message
                    else:
                        error = "timed out"
                    break
                if message.type == Gst.MessageType.EOS:
                    break
        finally:
            pipeline.set_state(Gst.State.NULL)

        Gst.debug_set_default_threshold(old_threshold)
        return error
Пример #12
0
    def mk_pipe(self):

        # self.pipeline = Gst.parse_launch( "uridecodebin name=decode ! audioconvert ! level name=wavelevel ! fakesink name=faked" )
        # self.pipeline = Gst.parse_launch( "filesrc name=filesrc ! qtdemux ! audioconvert ! level name=wavelevel ! fakesink")
        self.pipeline = Gst.parse_launch(
            "filesrc name=filesrc ! decodebin ! audioconvert ! level name=wavelevel ! fakesink"
        )

        # if self.uri.startswith('/'):
        #    self.uri = "file://" + self.uri
        # decode = self.pipeline.get_by_name("decode")
        # decode.set_property( 'uri', self.uri )

        filesrc = self.pipeline.get_by_name("filesrc")
        filesrc.set_property("location", self.location)

        wavelevel = self.pipeline.get_by_name("wavelevel")
        wavelevel.set_property("interval", int(self.interval * Gst.SECOND))
        wavelevel.set_property("post-messages", True)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._messageCb)

        return
Пример #13
0
	def on_connect(self, sock, *args):
		'''Asynchronous connection listener. Starts a handler for each connection.'''
		if self.source_pipeline:
			return False

		conn, addr = sock.accept()
		print("Connection from", addr)

		self.source_pipeline = Gst.parse_launch("""
			fdsrc name=a fd=%u !
				matroskademux name=demux

			demux. !
				video/x-raw,width=800,height=450,format=I420,framerate=25/1 !
				queue !
				textoverlay halignment=left valignment=top ypad=25 text=intervideosink !
				timeoverlay halignment=left valignment=top ypad=25 xpad=400 !
				intervideosink channel=video

			demux. !
				audio/x-raw,format=S16LE,channels=2,layout=interleaved,rate=48000,channel-mask=(bitmask)0x3 !
				queue !
				interaudiosink channel=audio

		""" % conn.fileno())

		self.source_pipeline.bus.add_signal_watch()
		self.source_pipeline.bus.connect("message::eos", self.on_disconnect)

		self.source_pipeline.set_state(Gst.State.PLAYING)

		self.conn = conn
		return True
Пример #14
0
    def _setupPipeline(self):
        """Creates the pipeline.

        It has the form "playbin ! thumbnailsink" where thumbnailsink
        is a Bin made out of "videorate ! capsfilter ! gdkpixbufsink"
        """
        # TODO: don't hardcode framerate
        self.pipeline = Gst.parse_launch(
            "uridecodebin uri={uri} name=decode ! "
            "videoconvert ! "
            "videorate ! "
            "videoscale method=lanczos ! "
            "capsfilter caps=video/x-raw,format=(string)RGBA,height=(int){height},"
            "pixel-aspect-ratio=(fraction)1/1,framerate=2/1 ! "
            "gdkpixbufsink name=gdkpixbufsink".format(uri=self.uri, height=self.thumb_height))

        # get the gdkpixbufsink and the sinkpad
        self.gdkpixbufsink = self.pipeline.get_by_name("gdkpixbufsink")

        decode = self.pipeline.get_by_name("decode")
        decode.connect("autoplug-select", self._autoplug_select_cb)

        self.__preroll_timeout_id = GLib.timeout_add_seconds(MAX_BRINGING_TO_PAUSED_DURATION,
                                                             self.__preroll_timed_out_cb)
        self.pipeline.get_bus().add_signal_watch()
        self.pipeline.get_bus().connect("message", self.__bus_message_cb)
        self.pipeline.set_state(Gst.State.PAUSED)
Пример #15
0
 def __init__(self):
     Gtk.Window.__init__(self)
     self.image = None
     GLib.timeout_add(50, self.save_image)
     self.connect("delete-event", self.on_quit)
     # Status
     self.status = Gst.State.NULL
     # Video Area
     self.video_area = Gtk.DrawingArea()
     # Disable Double Buffered
     self.video_area.set_double_buffered(False)
     # playbin
     self.player = Gst.parse_launch("tcpclientsrc host=" + sys.argv[1] +" port=5000 ! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! xvimagesink name=xv")
     bus = self.player.get_bus()
     bus.add_signal_watch()
     bus.enable_sync_message_emission()
     bus.connect("message", self.on_message)
     bus.connect("sync-message::element", self.on_sync_message)
     # DnD
     dnd_list = Gtk.TargetEntry.new("text/uri-list", 0, 0)
     # pack
     vbox = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0) 
     vbox.pack_start(self.video_area, True, True, 0)
     self.add(vbox)
     self.resize(640, 480)
     self.show_all()
     self.player.set_state(Gst.State.PLAYING)
Пример #16
0
    def _launchPipeline(self):
        self.debug(
            'Now generating waveforms for: %s', path_from_uri(self._uri))
        self.pipeline = Gst.parse_launch("uridecodebin name=decode uri=" +
                                         self._uri + " ! waveformbin name=wave"
                                         " ! fakesink qos=false name=faked")
        # This line is necessary so we can instantiate GstTranscoder's
        # GstCpuThrottlingClock below.
        Gst.ElementFactory.make("uritranscodebin", None)
        clock = GObject.new(GObject.type_from_name("GstCpuThrottlingClock"))
        clock.props.cpu_usage = self._max_cpu_usage
        self.pipeline.use_clock(clock)
        faked = self.pipeline.get_by_name("faked")
        faked.props.sync = True
        self._wavebin = self.pipeline.get_by_name("wave")
        asset = self.ges_elem.get_parent().get_asset()
        self._wavebin.props.uri = asset.get_id()
        self._wavebin.props.duration = asset.get_duration()
        decode = self.pipeline.get_by_name("decode")
        decode.connect("autoplug-select", self._autoplug_select_cb)
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()

        asset = self.ges_elem.get_parent().get_asset()
        self.n_samples = asset.get_duration() / SAMPLE_DURATION
        bus.connect("message", self._busMessageCb)
Пример #17
0
    def __init__(self):
        QtCore.QObject.__init__(self)

        self._supported_extensions = []
        for plugin, extensions in self.PLUGINS.items():
            print("Testing plugin \"" + plugin + "\" ..."),
            if Gst.Registry.find_plugin(Gst.Registry.get(), plugin) is not None:
                print("supported.")
                self._supported_extensions.extend(extensions)
            else:
                print("NOT supported.")

        self.uri = None
        self._status = State.STOPPED
        self.bin = Gst.ElementFactory.make('playbin', None)
        self.bin.set_property('video-sink', None)
        try:
            device = Gst.parse_launch(self.SINK)
        except GObject.GError:
            pass
        else:
            self.bin.set_property('audio-sink', device)

        bus = self.bin.get_bus()
        bus.add_signal_watch()
        bus.connect('message::eos', self._message_eos)
        bus.connect('message::error', self._message_error)
        bus.connect('message::async-done', self._message_async_done)

        self.time_fmt = Gst.Format(Gst.Format.TIME)
        self.seek_pending = False
Пример #18
0
def GStreamerSink(pipeline_desc):
    """Returns a list of unlinked gstreamer elements ending with an audio sink
    and a textual description of the pipeline.

    `pipeline_desc` can be gst-launch syntax for multiple elements
    with or without an audiosink.

    In case of an error, raises PlayerError
    """

    pipe = None
    if pipeline_desc:
        try:
            pipe = [Gst.parse_launch(e) for e in pipeline_desc.split('!')]
        except GLib.GError as e:
            message = e.message.decode("utf-8")
            raise PlayerError(_("Invalid GStreamer output pipeline"), message)

    if pipe:
        # In case the last element is linkable with a fakesink
        # it is not an audiosink, so we append the default one
        fake = Gst.ElementFactory.make('fakesink', None)
        if link_many([pipe[-1], fake]):
            unlink_many([pipe[-1], fake])
            default_elm, default_desc = find_audio_sink()
            pipe += [default_elm]
            pipeline_desc += " ! " + default_desc
    else:
        elm, pipeline_desc = find_audio_sink()
        pipe = [elm]

    return pipe, pipeline_desc
Пример #19
0
    def _create_pipeline(self):
        """
        Create a GstPipeline that contains our encoding, etc
        """
        if self.current_filename == "":
            location = self._generate_location()
        else:
            location = self.current_filename

        encoder = self._select_encoder()
        sink_str = self._select_sink()

        save_str = ("appsrc name={} is-live=true format=3 "
                    "! {} "
                    "! queue leaky=downstream "
                    "! videoconvert "
                    "! {} "
                    "! {} name={} location={}").format(self.src_name,
                                                       self.caps.to_string(),
                                                       encoder.module,
                                                       sink_str,
                                                       self.sink_name,
                                                       location)

        log.info("Using pipeline to save: '{}'".format(save_str))

        self.pipeline = Gst.parse_launch(save_str)

        self.src = self.pipeline.get_by_name(self.src_name)
        self.src.set_property("caps", self.caps)
        self.src.set_property("do-timestamp", True)

        self.pipeline.set_name(self.pipeline_name)
Пример #20
0
    def __init__(self, options={}):
        base.Base.__init__(self, options)
        Gst.Bin.__init__(self)

        aux = (pipestr.replace('gc-v4l2-preview', 'sink-' + self.options['name'])
                      .replace('gc-v4l2-enc', self.options['videoencoder'])
                      .replace('gc-v4l2-mux', self.options['muxer']))


        if self.options['videofilter']:
            aux = aux.replace('gc-videofilter', self.options['videofilter'])
        else:
            aux = aux.replace('gc-videofilter !', '')
            

        if 'image/jpeg' in self.options['caps']:
            aux = aux.replace('gc-v4l2-dec', 'jpegdec max-errors=-1 ! queue !')
        else:
            aux = aux.replace('gc-v4l2-dec', '')

        #bin = Gst.parse_bin_from_description(aux, True)
        bin = Gst.parse_launch("( {} )".format(aux))
        self.add(bin)

        self.set_option_in_pipeline('location', 'gc-v4l2-src', 'device')

        self.set_value_in_pipeline(path.join(self.options['path'], self.options['file']), 'gc-v4l2-sink', 'location')

        self.set_option_in_pipeline('caps', 'gc-v4l2-filter', 'caps', None)
    def copyThumbPic(self, fsink, buffer, pad, user_data=None):
        if not self._thumb_exposure_open:
            return

        self._thumb_exposure_open = False
        loader = Gtk.Gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
        loader.write(buffer)
        loader.close()
        self.thumbBuf = loader.get_pixbuf()
        self.model.still_ready(self.thumbBuf)

        self._thumb_element('thumb_tee').unlink(self._thumb_element('thumb_queue'))

        oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
        wavFilepath = os.path.join(Instance.instancePath, "output.wav")
        muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv

        muxline = Gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoraparse ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
        taglist = self._get_tags(constants.TYPE_VIDEO)
        vorbis_enc = muxline.get_by_name('muxVorbisenc')
        vorbis_enc.merge_tags(taglist, Gst.TAG_MERGE_REPLACE_ALL)

        muxBus = muxline.get_bus()
        muxBus.add_signal_watch()
        self._video_transcode_handler = muxBus.connect('message', self._onMuxedVideoMessageCb, muxline)
        self._mux_pipes.append(muxline)
        #add a listener here to monitor % of transcoding...
        self._transcode_id = GObject.timeout_add(200, self._transcodeUpdateCb, muxline)
        muxline.set_state(Gst.STATE_PLAYING)
Пример #22
0
 def test_create_thumbnail_bin(self):
     """Checks our `waveformbin` element is usable."""
     pipeline = Gst.parse_launch("uridecodebin name=decode uri=file:///some/thing"
                                 " waveformbin name=wavebin ! fakesink qos=false name=faked")
     self.assertTrue(pipeline)
     wavebin = pipeline.get_by_name("wavebin")
     self.assertTrue(wavebin)
Пример #23
0
    def async_process_file(self, filename, end_callback):
        self.end_callback = end_callback

        at = self.ensure_new_type('sound_segment', title=_("Sound segment"))
        at.setMetaData(config.data.namespace_prefix['dc'], "description", _("Sound segmentation with a threshold of %(threshold)d dB - channel: %(channel)s") % self.__dict__)

        # Build pipeline
        self.pipeline = Gst.parse_launch('uridecodebin name=decoder ! audioconvert ! audiopanorama method=1 panorama=%d ! audioconvert ! cutter threshold-dB=%s run-length=%d ! progressreport silent=true update-freq=1 name=report ! fakesink' % (self.channel_mapping[self.channel], str(self.threshold), self.min_silence_duration * Gst.MSECOND))
        self.decoder = self.pipeline.get_by_name('decoder')
        self.report = self.pipeline.get_by_name('report')
        bus = self.pipeline.get_bus()
        # Enabling sync_message_emission will in fact force the
        # self.progress call from a thread other than the main thread,
        # which surprisingly works better ATM.
        bus.enable_sync_message_emission()
        bus.connect('sync-message', self.on_bus_message)
        bus.connect('message', self.on_bus_message)

        if config.data.os == 'win32':
            self.decoder.props.uri = 'file:///' + os.path.abspath(str(filename))
        else:
            self.decoder.props.uri = 'file://' + os.path.abspath(str(filename))
        self.progress(.1, _("Starting silence detection"))
        self.pipeline.set_state(Gst.State.PLAYING)
        return self.package
    def _video_eos(self):
        self._pipeline.set_state(Gst.STATE_NULL)
        self._pipeline.get_by_name("tee").unlink(self._videobin)
        self._pipeline.remove(self._videobin)
        self._pipeline.remove(self._audiobin)

        self.model.shutter_sound()

        if len(self._thumb_pipes) > 0:
            thumbline = self._thumb_pipes[-1]
            thumbline.get_by_name('thumb_fakesink').disconnect(self._thumb_handoff_handler)

        ogg_path = os.path.join(Instance.instancePath, "output.ogg") #ogv
        if not os.path.exists(ogg_path) or os.path.getsize(ogg_path) <= 0:
            # FIXME: inform model of failure?
            return

        line = 'filesrc location=' + ogg_path + ' name=thumbFilesrc ! oggdemux name=thumbOggdemux ! theoradec name=thumbTheoradec ! tee name=thumb_tee ! queue name=thumb_queue ! ffmpegcolorspace name=thumbFfmpegcolorspace ! jpegenc name=thumbJPegenc ! fakesink name=thumb_fakesink'
        thumbline = Gst.parse_launch(line)
        thumb_queue = thumbline.get_by_name('thumb_queue')
        thumb_queue.set_property("leaky", True)
        thumb_queue.set_property("max-size-buffers", 1)
        thumb_tee = thumbline.get_by_name('thumb_tee')
        thumb_fakesink = thumbline.get_by_name('thumb_fakesink')
        self._thumb_handoff_handler = thumb_fakesink.connect("handoff", self.copyThumbPic)
        thumb_fakesink.set_property("signal-handoffs", True)
        self._thumb_pipes.append(thumbline)
        self._thumb_exposure_open = True
        thumbline.set_state(Gst.STATE_PLAYING)
Пример #25
0
    def init_camera(self):
        # TODO: This doesn't work when camera resolution is resized at runtime.
        # There must be some other way to release the camera?
        if self._pipeline:
            self._pipeline = None

        video_src = self._video_src
        if video_src == 'v4l2src':
            video_src += ' device=/dev/video%d' % self._index
        elif video_src == 'dc1394src':
            video_src += ' camera-number=%d' % self._index

        if Gst.version() < (1, 0, 0, 0):
            caps = ('video/x-raw-rgb,red_mask=(int)0xff0000,'
                    'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff')
            pl = ('{} ! decodebin name=decoder ! ffmpegcolorspace ! '
                  'appsink name=camerasink emit-signals=True caps={}')
        else:
            caps = 'video/x-raw,format=RGB'
            pl = '{} ! decodebin name=decoder ! videoconvert ! appsink ' + \
                 'name=camerasink emit-signals=True caps={}'

        self._pipeline = Gst.parse_launch(pl.format(video_src, caps))
        self._camerasink = self._pipeline.get_by_name('camerasink')
        self._camerasink.connect('new-sample', self._gst_new_sample)
        self._decodebin = self._pipeline.get_by_name('decoder')

        if self._camerasink and not self.stopped:
            self.start()
Пример #26
0
    def __init__(self, sink):
        Gst.Pipeline.__init__(self)
        self.playbin = Gst.ElementFactory.make("playbin")
        self.add(self.playbin)

        bin = Gst.Bin()
        self.speedchanger = Gst.ElementFactory.make("pitch")
        if self.speedchanger is None:
            myGtk.show_error(_("You need to install the Gstreamer soundtouch elements for "
                    "play it slowly to. They are part of Gstreamer-plugins-bad. Consult the "
                    "README if you need more information.")).run()
            raise SystemExit()

        bin.add(self.speedchanger)

        self.audiosink = Gst.parse_launch(sink)
        #self.audiosink = Gst.ElementFactory.make(sink, "sink")

        bin.add(self.audiosink)
        convert = Gst.ElementFactory.make("audioconvert")
        bin.add(convert)
        self.speedchanger.link(convert)
        convert.link(self.audiosink)
        sink_pad = Gst.GhostPad.new("sink", self.speedchanger.get_static_pad("sink"))
        bin.add_pad(sink_pad)
        self.playbin.set_property("audio-sink", bin)
        #bus = self.playbin.get_bus()
        #bus.add_signal_watch()
        #bus.connect("message", self.on_message)

        self.eos = lambda: None
Пример #27
0
def convert(inFile, outFile, quality):

    if not os.path.exists(inFile):
        raise Exception('File does not exist: "', fileName, '"')

    player = Gst.parse_launch(
        "filesrc name=src ! flacparse ! flacdec ! audioconvert ! "
        "lamemp3enc name=enc ! id3v2mux "
        "! filesink name=sink"
    )
    bus = player.get_bus()

    src = player.get_by_name("src")
    src.set_property("location", inFile)
    sink = player.get_by_name("sink")
    sink.set_property("location", outFile)
    enc = player.get_by_name("enc")
    enc.set_property("quality", quality)
    player.set_state(Gst.State.PLAYING)

    while True:
        message = bus.pop()
        if not message:
            continue
        t = message.type
        if t == Gst.MessageType.EOS:
            break
        elif t == Gst.MessageType.ERROR:
            err, debug = message.parse_error()
            print "Error: %s" % err, debug
            break

    player.set_state(Gst.State.NULL)
Пример #28
0
 def __initVideo(self):
     self.pipeline = Gst.parse_launch(
     'filesrc name=video1 filesrc name=video2 filesrc name=video3 \
         videomixer name=mix ! xvimagesink \
         video1. \
             ! queue ! videoparse framerate=%s/1 name=parser1 \
             ! textoverlay font-desc="Sans 24" text="Original" \
                 valignment=top halignment=left shaded-background=true \
             ! videoscale \
             ! mix.sink_1 \
         video2. \
             ! queue ! videoparse framerate=%s/1 name=parser2 \
             ! textoverlay font-desc="Sans 24" text="Coded" \
                 valignment=top halignment=left shaded-background=true \
             ! videoscale \
             ! mix.sink_2 \
         video3. \
             ! queue ! videoparse framerate=%s/1 name=parser3 \
             ! textoverlay font-desc="Sans 24" text="Received" \
                 valignment=top halignment=left shaded-background=true \
             ! videoscale \
             ! mix.sink_3' % (
         self.main.conf['framerate'],
         self.main.conf['framerate'],
         self.main.conf['framerate']
     ))
     bus = self.pipeline.get_bus()
     bus.add_signal_watch()
     bus.enable_sync_message_emission()
     bus.connect('message', self.onMessage)
     bus.connect('sync-message::element', self.onSyncMessage)
Пример #29
0
    def __init__(self, loop=None):
        self.default_volume = 10
        self.raw_buffers = []
        self.fft_magnitudes = [0.0] * BINS
        self.fft_frequency_bands = [i * (SAMPLING_FREQUENCY / BINS / 2)
                for i in range(BINS)]
        self.fft_samples_taken = 0
        self.loop = loop
        self.in_range = False

        self.pipeline = Gst.parse_launch('''autoaudiosrc name=recordersrc
        ! queue
        ! level message=true name=recorderlevel
        ! audioconvert
        ! audio/x-raw,channels=1, rate=(int)44100
        ! audioresample
        ! spectrum interval=100000000 bands = %(bands)s
        ! wavenc
        ! appsink name=recordersink emit-signals=true''' %
        {'bands': BINS})
        self.sink = self.pipeline.get_by_name('recordersink')
        self.sink.connect('new-sample', self.nextbuffer)
        self.recordersource = self.pipeline.get_by_name('recordersrc')
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self.bus_message_handler)
        self.volume = 0
        self.volumecontrol(self.volume)
    def __init__(self, options={}):
        base.Base.__init__(self, options)
        Gst.Bin.__init__(self)

        gcvideosink = get_videosink(videosink=self.options['videosink'], name='sink-'+self.options['name'])
        gcaudiosink = get_audiosink(audiosink=self.options['audiosink'], name='sink-audio-'+self.options['name'])
        aux = (pipestr.replace('gc-vsink', gcvideosink)
               .replace('gc-asink', gcaudiosink))

        if self.options["caps-preview"]:
            aux = aux.replace("caps-preview !","videoscale ! videorate ! "+self.options["caps-preview"]+" !")
        else:
            aux = aux.replace("caps-preview !","")


        #bin = Gst.parse_bin_from_description(aux, False)
        bin = Gst.parse_launch("( {} )".format(aux))

        self.add(bin)

        sink = self.get_by_name("gc-firewireavi-sink")
        sink.set_property('location', path.join(self.options['path'], self.options['file']))

        if self.options["vumeter"] == False:
            level = self.get_by_name("gc-firewireavi-level")
            level.set_property("message", False)

        if self.options["player"] == False:
            self.mute = True
            element = self.get_by_name("gc-firewireavi-volume")
            element.set_property("mute", True)
        else:
            self.mute = False
Пример #31
0
    def build_pipeline(self):
        sink = 'autovideosink'
        if config.data.player['vout'] == 'x11':
            sink = 'ximagesink'
        elif config.data.player['vout'] == 'xvideo':
            sink = 'xvimagesink'
        elif config.data.player['vout'] == 'gtk':
            sink = 'gtksink'
        elif config.data.player['vout'] == 'd3d':
            sink = 'd3dvideosink'
        elif config.data.player['vout'] == 'gl':
            sink = 'glimagesinkelement'
        elif config.data.player['vout'].startswith('raw:'):
            # "Raw" element definition - will be passed to Gst.parse_launch
            sink = config.data.player['vout'][4:]

        self.player = Gst.ElementFactory.make("playbin", "player")

        self.video_sink = Gst.Bin()

        # TextOverlay does not seem to be present in win32 installer. Do without it.
        try:
            self.captioner = Gst.ElementFactory.make('textoverlay',
                                                     'captioner')
            # FIXME: move to config.data
            self.captioner.props.font_desc = 'Sans 24'
        except:
            self.captioner = None

        self.imageoverlay = None
        if config.data.player['svg'] and svgelement:
            try:
                self.imageoverlay = Gst.ElementFactory.make(
                    svgelement, 'overlay')
                self.imageoverlay.props.fit_to_frame = True
            except:
                logger.error("Gstreamer SVG overlay element is not available",
                             exc_info=True)

        self.imagesink = Gst.parse_launch(f"{sink} name=sink")
        try:
            self.imagesink.set_property('force-aspect-ratio', True)
        except TypeError:
            logger.warning("Cannot set force-aspect-ratio on video sink")
        self.real_imagesink = self.imagesink

        elements = []
        elements.append(Gst.ElementFactory.make('videoconvert', None))
        elements.append(Gst.ElementFactory.make('videoscale', None))
        if self.imageoverlay is not None:
            # FIXME: Issue: rsvgoverlay.fit_to_frame expects that the
            # dimensions of the input buffers match the aspect ratio
            # of the original video, which is currently not the case.
            elements.append(Gst.ElementFactory.make('queue', None))
            elements.append(self.imageoverlay)
        if self.captioner is not None:
            elements.append(self.captioner)

        # Add a queue before display
        elements.append(Gst.ElementFactory.make('queue', None))

        if sink == 'glimagesinkelement':
            upload = Gst.ElementFactory.make('glupload', None)
            csp = Gst.ElementFactory.make('glcolorconvert', None)
            elements.extend((upload, csp, self.imagesink))
        else:
            csp = Gst.ElementFactory.make('videoconvert', None)
            elements.extend((csp, self.imagesink))

        for el in elements:
            self.video_sink.add(el)
        if len(elements) >= 2:
            for src, dst in zip(elements, elements[1:]):
                src.link(dst)

        self.log("using " + sink)

        # Note: it is crucial to make ghostpad an attribute, so that
        # it is not garbage-collected at the end of the build_pipeline
        # method.
        self._video_ghostpad = Gst.GhostPad.new(
            'sink', elements[0].get_static_pad('video_sink')
            or elements[0].get_static_pad('sink'))
        # Idem for elements
        self._video_elements = elements

        logger.debug("Using video sink pipeline %s", self._video_elements)
        self.video_sink.add_pad(self._video_ghostpad)

        self.player.props.video_sink = self.video_sink
        self.player.props.force_aspect_ratio = True

        self.audio_sink = Gst.parse_launch(
            'scaletempo name=scaletempo ! audioconvert ! audioresample ! autoaudiosink'
        )
        self.audio_sink.add_pad(
            Gst.GhostPad.new(
                'sink',
                self.audio_sink.get_child_by_name('scaletempo').get_static_pad(
                    'sink')))
        self.player.props.audio_sink = self.audio_sink

        bus = self.player.get_bus()
        bus.enable_sync_message_emission()
        bus.connect('sync-message::element', self.on_sync_message)
        bus.add_signal_watch()
        bus.connect('message::error', self.on_bus_message_error)
        bus.connect('message::warning', self.on_bus_message_warning)
Пример #32
0
    def __init__(self, serial, width, height, framerate, cam_name,
                 camera_info_url):
        """ Constructor.
        Creates the sink pipeline and the source pipeline.

        :param serial: Serial number of the camera to use.
        :param width: Width of the video format, e.g. 640, 1920 etc,
        :param height: Height of the video format, e.g. 480, 1080
        :param framerate: Numerator of the frame rate, e.g. 15, 30, 60 etc
        :param color: If True, color is used, else gray scale
        :param liveview: If True an own live window is opened.
        """
        Gst.init([])
        self.height = height
        self.width = width
        self.sample = None
        self.samplelocked = False
        self.newsample = False
        self.pid = -1
        self.camera_name = cam_name
        self.camera_info_url = camera_info_url
        self.randshm = random_str(8)

        self.__remove_tmp_file()

        pixelformat = "BGRx"
        color = True
        liveview = False
        if not color:
            pixelformat = "GRAY8"

        if liveview:
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
                serial,
                pixelformat,
                width,
                height,
                framerate,
            )
            p += ' ! tee name=t'
            p += ' t. ! queue ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/tiscamera_%s' % (
                width, height, framerate, self.randshm)
            p += ' t. ! queue ! videoconvert ! ximagesink'
        else:
            p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
                serial,
                pixelformat,
                width,
                height,
                framerate,
            )
            p += ' ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/tiscamera_%s' % (
                width, height, framerate, self.randshm)

        # print(p)

        try:
            self.pipeline = Gst.parse_launch(p)
        except GLib.Error as error:
            raise RuntimeError("Error creating pipeline: {0}".format(error))

        self.pipeline.set_state(Gst.State.READY)
        if self.pipeline.get_state(
                10 * Gst.SECOND)[0] != Gst.StateChangeReturn.SUCCESS:
            raise RuntimeError("Failed to start video stream.")
        # Query a pointer to our source, so we can set properties.
        self.source = self.pipeline.get_by_name("source")

        # Create gscam_config variable with content
        gscam = 'shmsrc socket-path=/tmp/tiscamera_%s ! video/x-raw-rgb, width=%d,height=%d,framerate=%d/1' % (
            self.randshm,
            width,
            height,
            framerate,
        )
        gscam += ',bpp=24,depth=24,blue_mask=16711680, green_mask=65280, red_mask=255 ! ffmpegcolorspace'
        os.environ["GSCAM_CONFIG"] = gscam
Пример #33
0
    def init_audio(self):
        self.unlink_gst()
        if(self.enable):
            self.chain = []
            rs = int(SignalGen.sample_rates[self.sample_rate])
            self.rate = float(rs)
            self.interval = 1.0 / self.rate
            if 0:
                self.player = gst.Pipeline("mypipeline")
                self.source = self.make_and_chain("appsrc", "appsrc")
                if not self.source:
                    print("ERROR: Could not create '{0}' element".format('appsrc'))
                    sys.exit(1)
                self.source.set_property("is-live", True)
            else:
                # inject the real rate
                self.pipeline = self.PIPELINE_SIMPLE.format(rs)
                # parse it
                self.pipeline = Gst.parse_launch(self.pipeline)
                self.appsrc = self.pipeline.get_by_name("appsrc")
                self.appsrc.connect('need-data', self.need_data)

           
            if 0:
                caps = gst.Caps(
                  'audio/x-raw-int,'
                  'endianness=(int)1234,'
                  'channels=(int)2,'
                  'width=(int)32,'
                  'depth=(int)32,'
                  'signed=(boolean)true,'
                  'rate=(int)%s' % rs)

                # this must be done for each buffer
                # self.source.set_property('caps', caps)
                self.source.connect('need-data', self.need_data)
                self.source.connect('enough-data', self.enough_data)
                self.source.set_property('format', 'time')
                self.source.set_property('do-timestamp', True)

                self.sink = self.make_and_chain("autoaudiosink")
                self.player.add(*self.chain)
                gst.element_link_many(*self.chain)

            # print initial negotiated caps (in NULL state)
            print("In NULL state:")
            if 0:
                print(self.sink)
                print_pad_capabilities(self.sink, "sink")

            if 0:
                self.bus = self.player.get_bus()
            else:
                self.bus = self.pipeline.get_bus()

            self.bus.add_signal_watch()
            self.bus.enable_sync_message_emission()
            self.bus.connect('message', self.on_message)
            if 0:
                self.player.set_state(gst.STATE_PLAYING)
            else:
                ret = self.pipeline.set_state(Gst.State.PLAYING)
            if ret == Gst.StateChangeReturn.FAILURE:
                print("ERROR: Unable to set the pipeline to the playing state")
                sys.exit(1)
            # print the current capabilities of the sink
            print("init_audio: set state to PLAYING")
            if 0:
                print_pad_capabilities(self.sink, "sink")
            print('enabled the output player')
        else:
            if 0:
                if self.player:
                    self.player.set_state(Gst.State.NULL)
            else:
                if self.pipeline:
                    self.pipeline.set_state(Gst.State.PAUSED)
Пример #34
0
 def _rtsp_start_pipeline(self):
     self._pipeline = Gst.parse_launch(self._command)
     bus = self._pipeline.get_bus()
     bus.add_signal_watch()
     bus.connect("message", self._rtsp_on_message, None)
     self._pipeline.set_state(Gst.State.PLAYING)
Пример #35
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument("socket", help="shmsrc socket")

    args = parser.parse_args(argv[1:])

    cache_root = (os.environ.get("XDG_CACHE_HOME", None)
                  or os.environ.get("HOME") + '/.cache')
    default_file = '%s/stbt/camera-video-cache/black.mp4' % cache_root

    if not os.path.exists(default_file):
        utils.mkdir_p(os.path.dirname(default_file))
        gst_utils.frames_to_video(
            default_file,
            [(bytearray([0, 0, 0]) * 1280 * 720, 5 * Gst.SECOND)],
            'video/x-raw,format=BGR,width=1280,height=720', 'mp4')

    default_uri = "file://" + default_file

    frame_bytes = 1280 * 720 * 3

    next_video = [default_uri]

    def about_to_finish(playbin):
        playbin.set_property('uri', next_video[0])
        next_video[0] = default_uri
        playbin.set_state(Gst.State.PLAYING)

    if USE_SHMSRC:
        pipeline_desc = (
            """\
            playbin name=pb audio-sink=fakesink uri=%s flags=0x00000791 \
            video-sink="videoconvert \
                ! video/x-raw,width=1280,height=720,format=RGB ! identity ! \
                shmsink wait-for-connection=true shm-size=%i max-lateness=-1 \
                        qos=false socket-path=%s blocksize=%i sync=true \
                        buffer-time=100000000" """ %
            (default_uri, frame_bytes * 1000, args.socket, frame_bytes))
    else:
        pipeline_desc = (
            """playbin name=pb audio-sink=fakesink uri=%s flags=0x00000791 \
            video-sink="videoconvert ! timeoverlay ! xvimagesink sync=true" """
            % default_uri)

    playbin = Gst.parse_launch(pipeline_desc)

    playbin.connect("about-to-finish", about_to_finish)

    runner = gst_utils.PipelineRunner(playbin)
    gst_thread = threading.Thread(target=runner.run)
    gst_thread.daemon = True
    gst_thread.start()

    playbin.get_state(0)

    def set_uri(uri):
        print "=== Setting URI to", uri
        if uri == 'stop':
            next_video[0] = default_uri
        else:
            next_video[0] = uri
        playbin.seek(1.0, Gst.Format.TIME,
                     Gst.SeekFlags.FLUSH | Gst.SeekFlags.KEY_UNIT,
                     Gst.SeekType.END, 0, Gst.SeekType.NONE, 0)

    while True:
        uri = sys.stdin.readline()
        if uri == '':
            break
        elif len(uri.strip()) > 0:
            set_uri(uri.strip())
Пример #36
0
 def do_create_element(self, url):
     logging.info("Video pipeline to be used: {0}".format(self.videoPipeline)) 
     return Gst.parse_launch(self.videoPipeline)
Пример #37
0
 def __init__(self, pipeline):
     Gst.init(None)
     self._pipeline = Gst.parse_launch(pipeline)
     self.running = False
import os, sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst

frame_format = 'RGBA'

Gst.init()
pipeline = Gst.parse_launch(f'''
    filesrc location=media/in.mp4 num-buffers=200 !
    decodebin !
    fakesink name=s
''')


def on_frame_probe(pad, info):
    buf = info.get_buffer()
    print(f'[{buf.pts / Gst.SECOND:6.2f}]')
    return Gst.PadProbeReturn.OK


pipeline.get_by_name('s').get_static_pad('sink').add_probe(
    Gst.PadProbeType.BUFFER, on_frame_probe)

pipeline.set_state(Gst.State.PLAYING)

try:
    while True:
        msg = pipeline.get_bus().timed_pop_filtered(
            Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR)
        if msg:
 def get_pipeline(self):
     description = 'rtspsrc name=rtspsrc ! rtph264depay ! h264parse ! matroskamux ! filesink location=file.mkv'
     self.pipeline = Gst.parse_launch(description)
     rtsp_src = self.pipeline.get_by_name("rtspsrc")
     rtsp_src.set_property('location', self.url)
Пример #40
0
def construct_pipeline(configuration):
    """
    Launch the gstreamer pipeline according to the choosen configuration (from configuration file).
    Bass lift adjustment is made with the low 3 bands of the standard 10 band equalizer which is
    not really the way to do it properly. But it is simple and fairly intuitive to use.
    Also note that there are a total of 3 volume controls per channel to aid when playing
    around, they should probably just be removed.
    """

    # Alsa device selection
    # The primary entry is used for 'left', 'right', 'mono' and 'stereo_40' (anything but 'stereo')
    # Default Alsa device if left empty, use 'device=hw:X' to select a specific device instead
    primary = ''
    # The secondary entry is the second output alsa device used when running in 'stereo'.
    # Modify to use the correct second soundcard.
    secondary = 'device=hw:X'

    # Input Alsa device is always the primary from above.
    input = (
        f'alsasrc {primary} ! audioconvert ! audio/x-raw,format=F32LE,channels=2 ! '
        f'queue ! ')

    if configuration in (Configuration.LEFT, Configuration.RIGHT):
        # pick which of the two input channels to play (src_0 or src_1) if part of a stereo setup
        channel = f'deinterleave name=d d.src_{configuration.value} ! tee name=tee_0 '
        paths = [configuration.value]
        alsa_devices = [primary]
        channel_masks = [0x01]
        interleave_index = [0]

    elif configuration == Configuration.MONO:
        # stereo signal mixed to mono
        channel = 'audioconvert ! audio/x-raw,channels=1 ! deinterleave name=d d.src_0 ! tee name=tee_0 '
        paths = [Configuration.LEFT.value]
        alsa_devices = [primary]
        channel_masks = [0x01]
        interleave_index = [0]

    else:
        # 'stereo' or 'stereo_40'. 'stereo' uses two soundcards
        # and 'stereo_40' two outputs on a 5.1 soundcard.
        channel = 'deinterleave name=d d.src_0 ! tee name=tee_0 d.src_1 ! tee name=tee_1 '
        paths = [Configuration.LEFT.value, Configuration.RIGHT.value]
        if configuration == Configuration.STEREO:
            alsa_devices = [primary, secondary]
            channel_masks = [0x01, 0x01]
            interleave_index = [0, 1]
        else:
            alsa_devices = [primary, primary]
            channel_masks = [0x01, 0x10]
            interleave_index = [0, 4]

    launch = input + channel

    for path in range(len(paths)):
        interleave_element = 0

        if not path or (configuration == Configuration.STEREO):
            if path and (configuration == Configuration.STEREO):
                interleave_element = 1

            output = (f'interleave name=i{interleave_element} ! '
                      f'audioconvert ! audioresample ! queue ! '
                      f'volume name=master_vol{path} volume=0.01 ! '
                      f'alsasink name=alsasink{path} {alsa_devices[path]} '
                      f'sync=true buffer-time=1000 ')

            launch += output

        low_ch_mask = channel_masks[path]
        high_ch_mask = low_ch_mask * 2
        low_interleave_channel = interleave_index[path]
        high_interleave_channel = low_interleave_channel + 1

        low = (
            f'tee_{path}.src_0 ! queue ! '
            f'equalizer-10bands name=equalizer{path} band0=0.0 band1=6.0 band2=0.0 ! '
            f'audiocheblimit name=low_xover{path} poles=8 mode=low-pass cutoff=2000.0 ! '
            f'volume name=low_vol{path} volume=1.0 ! audioconvert ! '
            f'"audio/x-raw,channels=1,channel-mask=(bitmask){low_ch_mask:#x}" ! '
            f'i{interleave_element}.sink_{low_interleave_channel} ')

        high = (
            f'tee_{path}.src_1 ! queue ! '
            f'audiocheblimit name=high_xover{path} poles=8 mode=high-pass cutoff=2000.0 ! '
            f'volume name=high_vol{path} volume=1.0 ! audioconvert ! '
            f'"audio/x-raw,channels=1,channel-mask=(bitmask){high_ch_mask:#x}" ! '
            f'i{interleave_element}.sink_{high_interleave_channel} ')

        launch += low + high

    # The printed launch line can be used directly with the gst-launch tool for testing on the command line
    # print(launch)

    # Python bindings refuses to run with caps in double quotes (!?)
    pipeline = Gst.parse_launch(launch.replace('"', ''))

    pipeline.set_state(Gst.State.PLAYING)

    return pipeline
                                                     class_indexes *
                                                     image_indexes,
                                                     iou_threshold=0.7)

        bboxes = flat_locs[nms_mask].cpu()
        probs = flat_probs[nms_mask].cpu()
        class_indexes = class_indexes[nms_mask].cpu()
        if bboxes.size(0) > 0:
            print(bboxes, class_indexes, probs)


Gst.init()
pipeline = Gst.parse_launch(f'''
    filesrc location=media/in.mp4 num-buffers=256 !
    decodebin !
    nvvideoconvert !
    video/x-raw,format={frame_format} !
    fakesink name=s
''')

pipeline.get_by_name('s').get_static_pad('sink').add_probe(
    Gst.PadProbeType.BUFFER, on_frame_probe)

pipeline.set_state(Gst.State.PLAYING)

try:
    while True:
        msg = pipeline.get_bus().timed_pop_filtered(
            Gst.SECOND, Gst.MessageType.EOS | Gst.MessageType.ERROR)
        if msg:
            text = msg.get_structure().to_string() if msg.get_structure(
Пример #42
0
        queue max-size-buffers=2 leaky=downstream ! \
        %s name=%s" % (SINK_NAME, GST_VIEW_NAME)

    # a gstreamer sink that is a gtk drawing area
    # this is the camera preview display.
    gDrawAreaSink = Gtk.DrawingArea()
    gDrawAreaSink.set_double_buffered(True)
    gDrawAreaSink.name = GST_VIEW_NAME

    # build GStreamer launch string
    source2tee = "%s ! tee name=t" % cam_src_str
    tee2view = "t. ! %s" % view_launch_str
    tee2app = "t. ! %s" % app_launch_str
    launch = "%s %s %s" % (source2tee, tee2view, tee2app)

    gstPipeline = Gst.parse_launch(launch)

    gGstAppSink = gstPipeline.get_by_name(GST_APP_NAME)

    # build GUI
    window = Gtk.Window()
    window.connect("delete-event", window_closed, gstPipeline)
    window.set_default_size(640, 480)
    window.set_title("py_stream_infer")

    box = Gtk.Box()
    box.set_spacing(5)
    box.set_orientation(Gtk.Orientation.VERTICAL)
    window.add(box)

    box.pack_start(gDrawAreaSink, True, True, 0)
def run_pipeline(
    user_function,
    src_frame_rate: int = None,
    src_height: int = None,
    src_width: int = None,
    binning_level: int = 1,
    use_leaky_queue: bool = True,
    image_sink_bin: str = "ximagesink sync=false",
    image_src_bin: str = "pyspinsrc",
):
    if binning_level is not None and binning_level != 1:
        image_src_bin += f" h-binning={binning_level} v-binning={binning_level}"

    image_src_caps = "video/x-raw,format=RGB"
    if src_frame_rate is not None:
        image_src_caps += f",framerate={int(src_frame_rate)}/1"

    if src_height is not None:
        image_src_caps += f",height={src_height}"

    if src_width is not None:
        image_src_caps += f",width={src_width}"

    appsink_element = "appsink name=appsink emit-signals=true max-buffers=1 drop=true"
    appsink_caps = "video/x-raw,format=RGB"
    image_queue = "queue"

    if use_leaky_queue:
        image_queue += " max-size-buffers=1 leaky=downstream"

    appsrc_element = (
        "appsrc name=appsrc is-live=true emit-signals=true format=3 block=true"
    )

    image_src_pipeline = f" {image_src_bin} ! {image_src_caps} ! {image_queue} ! videoconvert ! {appsink_caps} ! {appsink_element}"

    print("Image src pipeline:\n", image_src_pipeline)
    image_src_pipeline = Gst.parse_launch(image_src_pipeline)

    appsink = image_src_pipeline.get_by_name("appsink")

    # start image source pipeling and block until playing
    image_src_pipeline.set_state(Gst.State.PLAYING)
    state_change_info = image_src_pipeline.get_state(Gst.CLOCK_TIME_NONE)
    print(
        f"Image src pipeline state change to running successful? : {state_change_info[0] == Gst.StateChangeReturn.SUCCESS}"
    )

    image_sink_pipeline = f"{appsrc_element} ! {str(appsink.sinkpad.get_current_caps())} ! videoconvert ! {image_sink_bin}"

    print("Image sink pipeline:\n", image_sink_pipeline)

    image_sink_pipeline = Gst.parse_launch(image_sink_pipeline)

    appsrc = image_sink_pipeline.get_by_name("appsrc")

    appsink.connect(
        "new-sample",
        partial(on_new_sample, appsrc=appsrc, user_function=user_function),
    )

    loop = GObject.MainLoop()

    src_bus = image_src_pipeline.get_bus()
    src_bus.add_signal_watch()
    src_bus.connect("message", on_bus_message, loop)

    sink_bus = image_sink_pipeline.get_bus()

    image_sink_pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    image_src_pipeline.set_state(Gst.State.NULL)

    image_sink_pipeline.send_event(Gst.Event.new_eos())

    print("Waiting for the EOS message on the bus")
    sink_bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE, Gst.MessageType.EOS)
    print("Stopping pipeline")

    image_sink_pipeline.set_state(Gst.State.NULL)

    while GLib.MainContext.default().iteration(False):
        pass
Пример #44
0
 def __init__(self):
     self.pipe = Gst.parse_launch(PIPELINE_DESC)
     self.pipe.set_state(Gst.State.PLAYING)
Пример #45
0
def run_pipeline(user_function,
                 src_size,
                 appsink_size,
                 mirror=False,
                 h264=False,
                 jpeg=False,
                 videosrc='/dev/video0'):
    PIPELINE = 'v4l2src device=%s ! {src_caps} ! {leaky_q} '%videosrc
    if h264:
        SRC_CAPS = 'video/x-h264,width={width},height={height},framerate=30/1'
    elif jpeg:
        SRC_CAPS = 'image/jpeg,width={width},height={height},framerate=30/1'
    else:
        SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'

    APPSRC_PIPELINE = 'appsrc name=appsrc ! {appsrc_caps} '
    if detectCoralDevBoard():
      print("***\nNOTE: On a Coral devboard use bodypix_gl_imx.py instead for much faster performance.\n***")
      scale_caps = None
      PIPELINE += """
         ! decodebin ! glupload ! glvideoflip video-direction={direction} ! {leaky_q}
         ! glfilterbin filter=glbox name=glbox ! {sink_caps} ! {sink_element}
      """
      APPSRC_PIPELINE += """
         ! {leaky_q} ! videoconvert n-threads=4
         ! rsvgoverlay name=overlay ! waylandsink
      """
    else:  # raspberry pi or linux
      scale = min(appsink_size[0] / src_size[0], appsink_size[1] / src_size[1])
      scale = tuple(int(x * scale) for x in src_size)
      scale_caps = 'video/x-raw,width={width},height={height}'.format(width=scale[0], height=scale[1])
      PIPELINE += """
         ! decodebin ! videoflip video-direction={direction} ! videoconvert
         ! videoscale ! {scale_caps} ! videobox name=box autocrop=true
         ! {sink_caps}  ! {leaky_q} ! {sink_element} """
      APPSRC_PIPELINE += """ ! {leaky_q} ! videoconvert
         ! rsvgoverlay name=overlay ! videoconvert ! autovideosink"""

    SINK_ELEMENT = 'appsink name=appsink sync=false emit-signals=true max-buffers=1 drop=true'
    DL_CAPS = 'video/x-raw,format=BGRA,width={width},height={height}'
    SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
    APPSRC_CAPS = 'video/x-raw,format=RGB,width={width},height={height},framerate=30/1'
    LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
    direction = 'horiz' if mirror else 'identity'

    src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
    sink_caps = SINK_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    dl_caps = DL_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    pipeline = PIPELINE.format(leaky_q=LEAKY_Q, src_caps=src_caps, dl_caps=dl_caps,
                               sink_caps=sink_caps, sink_element=SINK_ELEMENT,
                               scale_caps=scale_caps, direction=direction)
    print('Gstreamer pipeline: ', pipeline)
    pipeline = Gst.parse_launch(pipeline)
    appsink = pipeline.get_by_name('appsink')

    appsrc_caps = APPSRC_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    appsrc_pipeline = APPSRC_PIPELINE.format(leaky_q=LEAKY_Q,
                                             appsrc_caps=appsrc_caps,
                                             src_caps=src_caps)
    print('Gstreamer appsrc pipeline: ', appsrc_pipeline)
    appsrc_pipeline = Gst.parse_launch(appsrc_pipeline)
    appsrc = appsrc_pipeline.get_by_name('appsrc')
    overlay = appsrc_pipeline.get_by_name('overlay')

    appsink.connect('new-sample', partial(on_new_sample,
                                          appsrc=appsrc, overlay=overlay,
                                          screen_size=src_size, appsink_size=appsink_size,
                                          user_function=user_function))
    loop = GObject.MainLoop()

    # Set up a pipeline bus watch to catch errors.
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect('message', on_bus_message, loop)

    # Run pipeline.
    pipeline.set_state(Gst.State.PLAYING)
    appsrc_pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    # Clean up.
    pipeline.set_state(Gst.State.NULL)
    appsrc_pipeline.set_state(Gst.State.NULL)
    while GLib.MainContext.default().iteration(False):
        pass
Пример #46
0
        pipeline.set_state(Gst.State.NULL)
        sys.exit()
    else:
        pass
    return True


def set_callbacks(pipeline):
    gvawatermark = pipeline.get_by_name("gvawatermark")
    pad = gvawatermark.get_static_pad("src")
    pad.add_probe(Gst.PadProbeType.BUFFER, pad_probe_callback)

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, pipeline)


if __name__ == '__main__':
    Gst.init(sys.argv)
    gst_launch_string = create_launch_string()
    print(gst_launch_string)
    pipeline = Gst.parse_launch(gst_launch_string)

    set_callbacks(pipeline)

    pipeline.set_state(Gst.State.PLAYING)

    glib_mainloop()

    print("Exiting")
Пример #47
0
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# GStreamer SDK Tutorials in Python
#
#     basic-tutorial-1
#
"""
basic-tutorial-1: Hello world!
http://docs.gstreamer.com/display/GstSDK/Basic+tutorial+2%3A+GStreamer+concepts
"""

from gi.repository import Gst

Gst.init(None)

# Build the pipeline
pipeline = Gst.parse_launch(
    "playbin uri=http://docs.gstreamer.com/media/sintel_trailer-480p.webm")

# Start playing
pipeline.set_state(Gst.State.PLAYING)

# Wait until error or EOS
bus = pipeline.get_bus()
msg = bus.timed_pop_filtered(Gst.CLOCK_TIME_NONE,
                             Gst.MessageType.ERROR | Gst.MessageType.EOS)

# Free resources
pipeline.set_state(Gst.State.NULL)
Пример #48
0
#!/usr/bin/env python
# Shows how two pipelines can be connected, using proxysink/proxysrc.
# It will output a test audio sound.
# Python equivalent of example at https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gst-plugins-bad-plugins/html/gst-plugins-bad-plugins-proxysrc.html
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
import os

Gst.init(None)
mainloop = GObject.MainLoop()

pipe1 = Gst.parse_launch("audiotestsrc is-live=1 ! proxysink name=psink")
psink = pipe1.get_by_name('psink')

pipe2 = Gst.parse_launch("proxysrc name=psrc ! autoaudiosink")
psrc = pipe2.get_by_name('psrc')

psrc.set_property('proxysink', psink)

clock = Gst.SystemClock.obtain()
pipe1.use_clock(clock)
pipe2.use_clock(clock)
clock.unref()

pipe1.set_base_time(0)
pipe2.set_base_time(0)

pipe1.set_state(Gst.State.PLAYING)
pipe2.set_state(Gst.State.PLAYING)
def run_pipeline(pipeline,
                 layout,
                 loop,
                 render_overlay,
                 display,
                 handle_sigint=True,
                 signals=None):
    # Create pipeline
    pipeline = describe(pipeline)
    print(pipeline)
    pipeline = Gst.parse_launch(pipeline)

    # Set up a pipeline bus watch to catch errors.
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect('message', on_bus_message, pipeline, loop)

    if display is not Display.NONE:
        # Needed to commit the wayland sub-surface.
        def on_gl_draw(sink, widget):
            widget.queue_draw()

        # Needed to account for window chrome etc.
        def on_widget_configure(widget, event, glsink):
            allocation = widget.get_allocation()
            glsink.set_render_rectangle(allocation.x, allocation.y,
                                        allocation.width, allocation.height)
            return False

        window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
        window.set_title(WINDOW_TITLE)
        window.set_default_size(layout.render_size.width,
                                layout.render_size.height)
        if display is Display.FULLSCREEN:
            window.fullscreen()

        drawing_area = Gtk.DrawingArea()
        window.add(drawing_area)
        drawing_area.realize()

        glsink = pipeline.get_by_name('glsink')
        glsink.connect('drawn', on_gl_draw, drawing_area)

        # Wayland window handle.
        wl_handle = glsink.get_wayland_window_handle(drawing_area)
        glsink.set_window_handle(wl_handle)

        # Wayland display context wrapped as a GStreamer context.
        wl_display = glsink.get_default_wayland_display_context()
        glsink.set_context(wl_display)

        drawing_area.connect('configure-event', on_widget_configure, glsink)
        window.connect('delete-event', Gtk.main_quit)
        window.show_all()

        # The appsink pipeline branch must use the same GL display as the screen
        # rendering so they get the same GL context. This isn't automatically handled
        # by GStreamer as we're the ones setting an external display handle.
        def on_bus_message_sync(bus, message, glsink):
            if message.type == Gst.MessageType.NEED_CONTEXT:
                _, context_type = message.parse_context_type()
                if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
                    sinkelement = glsink.get_by_interface(
                        GstVideo.VideoOverlay)
                    gl_context = sinkelement.get_property('context')
                    if gl_context:
                        display_context = Gst.Context.new(
                            GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
                        GstGL.context_set_gl_display(display_context,
                                                     gl_context.get_display())
                        message.src.set_context(display_context)
            return Gst.BusSyncReply.PASS

        bus.set_sync_handler(on_bus_message_sync, glsink)

    with Worker(save_frame) as images, Commands() as get_command:
        signals = {
            'appsink': {
                'new-sample':
                functools.partial(on_new_sample,
                                  render_overlay=functools.partial(
                                      render_overlay, layout=layout),
                                  layout=layout,
                                  images=images,
                                  get_command=get_command),
                'eos':
                on_sink_eos
            },
            **(signals or {})
        }

        for name, signals in signals.items():
            component = pipeline.get_by_name(name)
            if component:
                for signal_name, signal_handler in signals.items():
                    component.connect(signal_name, signal_handler, pipeline)

        # Handle signals.
        if handle_sigint:
            GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, signal.SIGINT,
                                 Gtk.main_quit)

        # Run pipeline.
        pipeline.set_state(Gst.State.PLAYING)
        try:
            Gtk.main()
        except KeyboardInterrupt:
            pass
        finally:
            pipeline.set_state(Gst.State.NULL)

        # Process all pending MainContext operations.
        while GLib.MainContext.default().iteration(False):
            pass
Пример #50
0
    def __init__(self, media_file, result_file, options, mainloop, media_info):
        self.analyze_returncode = None
        self.options = options
        self.media_info = media_info
        self._samplerate = int(self.media_info.get("sample_rate", 0))
        self._media_duration = float(self.media_info["duration"])
        self.mainloop = mainloop
        self._media_file = media_file
        self._result_filename = result_file
        self._result_file = open(result_file, "w")
        self._bands_count = 1024
        self.last_freq = 0
        self._first_tick_timestamp = -1
        self._first_tick_timestamp_saved = -1
        self._magnitude_position = -1
        self._max_magnitude = 0
        self._last_freq_count = 0
        self.qrcode_count = 0
        self.qrcode_with_beep_count = 0
        self._tick_count = 0
        spectrum_interval_ms = 3
        self.spectrum_interval_ns = spectrum_interval_ms * Gst.MSECOND
        framerate = self.media_info.get("avg_frame_rate")

        if framerate is not None:
            # assume audio ticks are at least 1 video frame long
            self.framerate = Fraction(self.media_info["avg_frame_rate"])
            frame_dur_ms = float(1000 / self.framerate)
        else:
            # assume 60 fps
            frame_dur_ms = 1000 / 60

        if options.expected_beep_duration:
            self.ticks_count_threshold = int(options.expected_beep_duration / spectrum_interval_ms)
        else:
            self.ticks_count_threshold = int(frame_dur_ms / spectrum_interval_ms)

        # FIXME: fdk adds 2048 samples of priming samples (silence) which adds 42ms of latency
        # aacenc adds 1024 samples (21ms)
        # https://github.com/mstorsjo/fdk-aac/issues/24
        # apple encoder adds 2112 samples
        # https://developer.apple.com/library/content/technotes/tn2258/_index.html
        # we will assume 2112 which gives a perfect result for our real samples
        self._encoder_latency = (
            1000000000 * 2112 / self._samplerate
            if self.media_info.get("a_codec") == "aac"
            else 0
        )
        # spectrum works on averaging over a 3ms interval, which adds latency
        self._encoder_latency += self.spectrum_interval_ns

        self._start_time = 0
        self._end_time = 0
        self._json_length = 70
        self._threshold_db = self.options.audio_threshold
        self._min_freq = 200
        self._audio_duration = 0
        self._video_duration = 0

        self._audio_fakesink_pad = None
        self._video_fakesink_pad = None
        self._id_prob_audio_sink = None
        self._id_prob_video_sink = None

        self.size = 0

        self._uri_media_file = self._media_file
        if '://' not in self._uri_media_file:
            self._uri_media_file = Gst.filename_to_uri(self._media_file)
        self.pipeline_str = self.get_pipeline(self._uri_media_file)
        self.pipeline = Gst.parse_launch(self.pipeline_str)
Пример #51
0
    elif mtype == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        print(err, debug)
    elif mtype == Gst.MessageType.WARNING:
        err, debug = message.parse_warning()
        print(err, debug)

    return True


command = "videotestsrc num-buffers=50 ! gtksink"

# Gst.Pipeline https://lazka.github.io/pgi-docs/Gst-1.0/classes/Pipeline.html
# https://lazka.github.io/pgi-docs/Gst-1.0/functions.html#Gst.parse_launch
pipeline = Gst.parse_launch(command)

# https://lazka.github.io/pgi-docs/Gst-1.0/classes/Bus.html
bus = pipeline.get_bus()

# allow bus to emit messages to main thread
bus.add_signal_watch()

# Add handler to specific signal
# https://lazka.github.io/pgi-docs/GObject-2.0/classes/Object.html#GObject.Object.connect
bus.connect("message", on_message, None)

# Start pipeline
pipeline.set_state(Gst.State.PLAYING)

# Init GObject loop to handle Gstreamer Bus Events
Пример #52
0
from gettext import gettext as _

import gi
gi.require_version('Gtk', '3.0')

from gi.repository import Gio
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject

_HAS_GST = True
try:
    gi.require_version('Gst', '1.0')
    from gi.repository import Gst
    Gst.init(None)
    Gst.parse_launch('espeak')
except:
    logging.error('Gst or the espeak plugin is not installed in the system.')
    _HAS_GST = False

from sugar3 import power

DEFAULT_PITCH = 0

DEFAULT_RATE = 0

_SAVE_TIMEOUT = 500

# This voice names are use dto allow the translation of the voice names.
# If espeak add new voices, we need update this list.
Пример #53
0
    def __init__(self):
        Loggable.__init__(self)
        self.video_effects = []
        self.audio_effects = []
        self.gl_effects = []
        self._effects = {}

        useless_words = [
            "Video", "Audio", "audio", "effect",
            _("Video"),
            _("Audio"),
            _("Audio").lower(),
            _("effect")
        ]
        uselessRe = re.compile(" |".join(useless_words))

        registry = Gst.Registry.get()
        factories = registry.get_feature_list(Gst.ElementFactory)
        longnames = set()
        duplicate_longnames = set()
        for factory in factories:
            longname = factory.get_longname()
            if longname in longnames:
                duplicate_longnames.add(longname)
            else:
                longnames.add(longname)
        for factory in factories:
            klass = factory.get_klass()
            name = factory.get_name()
            if ("Effect" not in klass
                    or any(black in name for black in BLACKLISTED_PLUGINS)):
                continue

            media_type = None
            if "Audio" in klass:
                self.audio_effects.append(factory)
                media_type = AUDIO_EFFECT
            elif "Video" in klass:
                self.video_effects.append(factory)
                media_type = VIDEO_EFFECT
            if not media_type:
                HIDDEN_EFFECTS.append(name)
                continue

            longname = factory.get_longname()
            if longname in duplicate_longnames:
                # Workaround https://bugzilla.gnome.org/show_bug.cgi?id=760566
                # Add name which identifies the element and is unique.
                longname = "%s %s" % (longname, name)
            human_name = uselessRe.sub("", longname).title()
            effect = EffectInfo(name,
                                media_type,
                                categories=self._getEffectCategories(name),
                                human_name=human_name,
                                description=factory.get_description())
            self._effects[name] = effect

        gl_element_factories = registry.get_feature_list_by_plugin("opengl")
        self.gl_effects = [
            element_factory.get_name()
            for element_factory in gl_element_factories
        ]
        if self.gl_effects:
            # Checking whether the GL effects can be used
            # by setting a pipeline with "gleffects" to PAUSED.
            pipeline = Gst.parse_launch(
                "videotestsrc ! glupload ! gleffects ! fakesink")
            bus = pipeline.get_bus()
            bus.add_signal_watch()
            bus.connect("message", self._gl_pipeline_message_cb, pipeline)
            res = pipeline.set_state(Gst.State.PAUSED)
            assert res == Gst.StateChangeReturn.ASYNC
Пример #54
0
#https://gist.github.com/velovix/8cbb9bb7fe86a08fb5aa7909b2950259
from threading import Thread

import gi

gi.require_version("Gst", "1.0")

from gi.repository import Gst, GLib
import time

Gst.init()

main_loop = GLib.MainLoop()
thread = Thread(target=main_loop.run)
thread.start()
cmd = """gst-launch-1.0.exe souphttpsrc is-live=true location="$(youtube-dl --format "best[ext=mp4][protocol=https]" --get-url https://www.youtube.com/watch?v=jQhUhlU1KL8)" ! qtdemux name=demuxer  demuxer. ! queue ! decodebin ! autovideosink  demuxer.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink"""
t = """https://r2---sn-m4vox-ua8s.googlevideo.com/videoplayback?expire=1611531280&ei=sK8NYIG-BZPh1wL3opfQBg&ip=5.102.225.128&id=o-ABHwT7CcK7dWK0ygNJRUJk-NBqL2dsACBwj9OAwQfOmU&itag=22&source=youtube&requiressl=yes&mh=eV&mm=31%2C29&mn=sn-m4vox-ua8s%2Csn-4g5edne7&ms=au%2Crdu&mv=m&mvi=2&pl=20&initcwndbps=508750&vprv=1&mime=video%2Fmp4&ns=zrEFQGojWySkqMq0ToXK22IF&ratebypass=yes&dur=140.387&lmt=1572989225009924&mt=1611509324&fvip=2&beids=9466587&c=WEB&txp=5532432&n=kvStZPM0GzfNsSu&sparams=expire%2Cei%2Cip%2Cid%2Citag%2Csource%2Crequiressl%2Cvprv%2Cmime%2Cns%2Cratebypass%2Cdur%2Clmt&sig=AOq0QJ8wRQIhAPHyHkhW0ZGMDZPQfTg90iB0kgnzstXg4mAJBZw8jNxAAiBSU9Ne8818omKNEVDIrRpu5VjjEUeCe3YHNVJcM_eEeQ%3D%3D&lsparams=mh%2Cmm%2Cmn%2Cms%2Cmv%2Cmvi%2Cpl%2Cinitcwndbps&lsig=AG3C_xAwRQIhAOzDzS-zDzSBNdyCP19IWUg3-kbN0fTpeVYeZ09K4RQ6AiBBozHzQ3zcafBn5Xp6GrTlUPN0aYJDMF2NDlDOV_IhfA%3D%3D"""
#pipeline = Gst.parse_launch("videotestsrc ! decodebin ! videoconvert ! autovideosink")
pipeline = Gst.parse_launch(
    f'souphttpsrc is-live=true location="{t}" ! qtdemux name=demuxer  demuxer. ! queue ! decodebin ! autovideosink  demuxer.audio_0 ! queue ! decodebin ! audioconvert ! audioresample ! autoaudiosink'
)
pipeline.set_state(Gst.State.PLAYING)

try:
    while True:
        time.sleep(0.1)
except KeyboardInterrupt:
    pass

pipeline.set_state(Gst.State.NULL)
main_loop.quit()
Пример #55
0
    def push_to_pipeline(self):
        try:
            r = requests.post(url="http://127.0.0.1:4001/create/%s" %
                              self.stream_name,
                              timeout=1)
            content = r.content
            print(content)
            res = json.loads(content.decode("utf-8"))
            rtp_port = res["rtp_port"]
            print("assigned rtp port %s" % rtp_port)
        except requests.exceptions.RequestException:
            print(
                "Error vinculando rtp con con webrtc - Revisar modulo rtmp-to-webrtc"
            )
            return

        if rtp_port is None:
            self.running = False
            print("Error intentando obtener un puerto rtp")
            return

        launch_string = 'appsrc name=source is-live=true format=GST_FORMAT_TIME ' \
                        ' caps=video/x-raw,format=BGR,width=%s,height=%s,framerate=%s/1 ' \
                        '! videoconvert ! video/x-raw,format=I420 ' \
                        '! x264enc speed-preset=ultrafast tune=zerolatency byte-stream=true ' \
                        '! h264parse ! rtph264pay config-interval=-1 pt=96 ! udpsink host=127.0.0.1 port=%s sync=false' % (
                        self.w, self.h, self.fps, rtp_port)

        pipeline = Gst.parse_launch(launch_string)
        appsrc = pipeline.get_child_by_name('source')
        pipeline.set_state(Gst.State.PLAYING)

        while self.running:
            try:
                ret, frame = self.cap.read()
                if not ret or frame is None:
                    time.sleep(0.5)
                    continue
                frame = cv2.resize(
                    frame, (self.w, self.h),
                    interpolation=cv2.INTER_AREA)  # resized frame
                cv2.putText(frame, 'HELLO WORLD', (100, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
                #cv2.imshow("TEST", frame)
                data = frame.tostring()
                buf = Gst.Buffer.new_allocate(None, len(data), None)
                buf.fill(0, data)
                buf.duration = self.duration
                timestamp = self.number_frames * self.duration
                buf.pts = buf.dts = int(timestamp)
                buf.offset = timestamp
                self.number_frames += 1
                retval = appsrc.emit('push-buffer', buf)
                if retval != Gst.FlowReturn.OK:
                    print(retval)
                k = cv2.waitKey(33)
                if k == 27:  # Esc key to stop
                    break
                time.sleep(1 / self.fps)
            except Exception:
                break
        pipeline.set_state(Gst.State.NULL)
    def run_example(self):
        """Init pipeline and run example.
        :return: None
        """

        print("Run: NNStreamer example for multimodel implementation.")

        # main loop
        self.loop = GObject.MainLoop()

        pipeline_string = self.pipeline_initializer()

        print(pipeline_string)

        # init pipeline
        self.pipeline = Gst.parse_launch(pipeline_string)

        # bus and message callback
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_bus_message)

        if self.OPTION_FM or self.OPTION_EM:
            res_face = self.pipeline.get_by_name('res_face')
            res_face.connect('new-data', self.new_callback_face)

        if self.OPTION_OD:
            tensor_sink = self.pipeline.get_by_name('res_object')
            tensor_sink.connect('new-data', self.new_callback_object)

        if self.OPTION_EM:
            posesink_0 = self.pipeline.get_by_name('posesink_0')
            posesink_0.connect('new-data', self.new_data_pose_cb)

            posesink_1 = self.pipeline.get_by_name('posesink_1')
            posesink_1.connect('new-data', self.new_data_pose_cb)

            posesink_2 = self.pipeline.get_by_name('posesink_2')
            posesink_2.connect('new-data', self.new_data_pose_cb)

        if self.OPTION_XV or self.OPTION_RTMP:
            tensor_res = self.pipeline.get_by_name('tensor_res')
            tensor_res.connect('draw', self.draw_overlay_cb)
            tensor_res.connect('caps-changed', self.prepare_overlay_cb)

        # start pipeline
        self.pipeline.set_state(Gst.State.PLAYING)
        self.running = True

        if self.OPTION_XV:
            self.set_window_title('output_local',
                                  'NNStreamer Multi Model TFLite Example')

        # run main loop
        # Don't use this then you're trying to use this code with gui
        self.loop.run()

        # quit when received eos or error message
        # run this when you're going to quit stream
        self.running = False
        self.pipeline.set_state(Gst.State.NULL)
        bus.remove_signal_watch()
Пример #57
0
def run_pipeline(user_function,
                 src_size=(X_PIXEL, Y_PIXEL),
                 appsink_size=(639, 480)):
    PIPELINE = 'v4l2src device=/dev/video1 ! {src_caps} ! {leaky_q} '
    if detectCoralDevBoard():
        SRC_CAPS = 'video/x-raw,format=YUY2,width={width},height={height},framerate={frame_rate}/1'
        PIPELINE += """ ! glupload ! tee name=t
            t. ! {leaky_q} ! glfilterbin filter=glcolorscale
               ! {dl_caps} ! videoconvert ! {sink_caps} ! {sink_element}
            t. ! {leaky_q} ! glfilterbin filter=glcolorscale
               ! rsvgoverlay name=overlay ! waylandsink
        """
    else:
        SRC_CAPS = 'video/x-raw,width={width},height={height},framerate={frame_rate}/1'
        PIPELINE += """ ! tee name=t
            t. ! {leaky_q} ! videoconvert ! videoscale ! {sink_caps} ! {sink_element}
            t. ! {leaky_q} ! videoconvert
               ! rsvgoverlay name=overlay ! videoconvert ! ximagesink
            """

    SINK_ELEMENT = 'appsink name=appsink sync=false emit-signals=true max-buffers=1 drop=true'
    DL_CAPS = 'video/x-raw,format=RGBA,width={width},height={height}'
    SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
    LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'

    src_caps = SRC_CAPS.format(width=src_size[0],
                               height=src_size[1],
                               frame_rate=FRAME_RATE)
    dl_caps = DL_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    sink_caps = SINK_CAPS.format(width=appsink_size[0], height=appsink_size[1])
    pipeline = PIPELINE.format(leaky_q=LEAKY_Q,
                               src_caps=src_caps,
                               dl_caps=dl_caps,
                               sink_caps=sink_caps,
                               sink_element=SINK_ELEMENT)

    print("Preparing streamer pipeline")
    print("Camera resolution", src_size[0], src_size[1], "Frame rate",
          FRAME_RATE)
    print(pipeline)
    pipeline = Gst.parse_launch(pipeline)

    overlay = pipeline.get_by_name('overlay')
    appsink = pipeline.get_by_name('appsink')
    appsink.connect(
        'new-sample',
        partial(on_new_sample,
                overlay=overlay,
                screen_size=src_size,
                appsink_size=appsink_size,
                user_function=user_function))
    loop = GObject.MainLoop()

    # Set up a pipeline bus watch to catch errors.
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect('message', on_bus_message, loop)

    # Run pipeline.
    pipeline.set_state(Gst.State.PLAYING)
    try:
        loop.run()
    except:
        pass

    # Clean up.
    pipeline.set_state(Gst.State.NULL)
    while GLib.MainContext.default().iteration(False):
        pass
Пример #58
0
 def _setup(self):
     self.video_pipe = Gst.parse_launch(self.command)
     self.closed = True
Пример #59
0
 def do_create_element(self, url):
     return Gst.parse_launch(self.launch_string)
Пример #60
0
This demo shows how, by splitting into pipelines, each soure can be seeked independently.
And if one fails (e.g. file not found), the other continues.
'''

import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
import os
from time import sleep
from threading import Thread

Gst.init(None)
mainloop = GObject.MainLoop()

# We make the two pipelines
pipe1 = Gst.parse_launch("playbin uri=\"file://" + os.environ['SRC'] + "\"")
pipe2 = Gst.parse_launch("playbin uri=\"file://" + os.environ['SRC2'] + "\"")

# The third pipeline is more complex as it has to accept the other two, and mix.
pipe3 = Gst.parse_launch(
    "intervideosrc name=video_src_1 ! videomix. " +
    "interaudiosrc name=audio_src_1 ! autoaudiosink "
    "intervideosrc name=video_src_2 ! videomix. " +
    "interaudiosrc name=audio_src_2 ! autoaudiosink " +
    "compositor name=videomix sink_1::xpos=800 sink_1::ypos=800  ! autovideosink "
)

# Because 'playbin' is a bin rather than element, the bit we want within it is 'playsink':
pipe1_playsink = pipe1.get_by_name('playsink')
pipe2_playsink = pipe2.get_by_name('playsink')