def setReceiver(self, offer_sdp):
        '''
        '''
        print('setReceiver thread:', threading.get_ident())
        result, offer = GstSdp.SDPMessage.new()
        assert (result == GstSdp.SDPResult.OK)
        GstSdp.sdp_message_parse_buffer(offer_sdp.encode(), offer)
        description = GstWebRTC.WebRTCSessionDescription.new(
            GstWebRTC.WebRTCSDPType.OFFER, offer)
        promise = Gst.Promise.new()
        self.webrtc.emit('set-remote-description', description, promise)
        promise.interrupt()

        direction_audio = GstWebRTC.WebRTCRTPTransceiverDirection.SENDRECV
        caps_audio = Gst.caps_from_string(
            "application/x-rtp,media=audio,encoding-name=OPUS,payload=111")
        direction_video = GstWebRTC.WebRTCRTPTransceiverDirection.SENDRECV
        caps_vedio = Gst.caps_from_string(
            "application/x-rtp,media=video,encoding-name=VP8,payload=96")

        self.webrtc.emit('add-transceiver', direction_audio, caps_audio)
        self.webrtc.emit('add-transceiver', direction_video, caps_vedio)

        promise = Gst.Promise.new()
        self.webrtc.emit('create-answer', None, promise)
        promise.wait()
        reply = promise.get_reply()
        answer = reply.get_value('answer')
        # promise = Gst.Promise.new_with_change_func(self.onSetLocalDescription, 'myaatga')
        promise = Gst.Promise.new()
        self.webrtc.emit('set-local-description', answer, promise)
        promise.wait()

        return answer.sdp.as_text()
Esempio n. 2
0
    def configure(self, config):
        logger.info("Configuring %s..." % self.__class__.__name__)

        # Configure source
        self.graph["source"].set_property("name", "source")
        self.graph["source"].set_property("emit-signals", True)
        self.graph["source"].set_property("do-timestamp", True)
        self.graph["source"].set_property("is-live", True)
        self.graph["source"].set_property("block", True)
        self.graph["source"].set_property("format", 3)

        # Configure source filter
        img_format = config.get("source.img_format") if config.isSet(
            "source.img_format") else "BGR"
        caps_str = 'video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1,interlace-mode=(string)progressive' \
            % (img_format, config.get("source.width"), config.get("source.height"), config.get("source.fps"))
        config["source_filter"] = {"caps": caps_str}
        source_caps = Gst.caps_from_string(caps_str)
        self.graph["source_filter"].set_property("caps", source_caps)

        # Configure encoder_filter
        if self.graph.contains("encoder_filter"):
            caps_str = 'video/x-h264, profile=main'
            config["encoder_filter"] = {"caps": caps_str}
            encoder_caps = Gst.caps_from_string(caps_str)
            self.graph["encoder_filter"].set_property("caps", encoder_caps)

        # Configure sink pipeline
        VideoPipeline.configure_sink(self.graph, config)
    def start_pipeline(self):
        self.pipe = Gst.parse_launch(PIPELINE_DESC)
        self.webrtc = self.pipe.get_by_name('sendrecv')
        
        self.webrtc.connect('on-ice-candidate', self.send_ice_candidate_message)
        self.webrtc.connect('pad-added', self.on_incoming_stream)
        self.pipe.set_state(Gst.State.PLAYING)

        offer = 'v=0\no=- 249052402997811464 2 IN IP4 127.0.0.1\ns=-\nt=0 0\na=group:BUNDLE 0 1\na=extmap-allow-mixed\na=msid-semantic: WMS\nm=audio 9 UDP/TLS/RTP/SAVPF 111 103 104 9 0 8 106 105 13 110 112 113 126\nc=IN IP4 0.0.0.0\na=rtcp:9 IN IP4 0.0.0.0\na=ice-ufrag:09iz\na=ice-pwd:TDNEa4DNQNu5vntI7paNKKgV\na=ice-options:trickle\na=fingerprint:sha-256 4F:12:50:51:FE:1A:76:0E:74:6D:79:31:DB:C0:E5:42:41:4F:AA:9D:AA:C9:29:AD:A2:49:6E:F3:AF:0F:3A:14\na=setup:actpass\na=mid:0\na=extmap:1 urn:ietf:params:rtp-hdrext:ssrc-audio-level\na=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\na=extmap:3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01\na=extmap:4 urn:ietf:params:rtp-hdrext:sdes:mid\na=extmap:5 urn:ietf:params:rtp-hdrext:sdes:rtp-stream-id\na=extmap:6 urn:ietf:params:rtp-hdrext:sdes:repaired-rtp-stream-id\na=sendrecv\na=rtcp-mux\na=rtpmap:111 opus/48000/2\na=rtcp-fb:111 transport-cc\na=fmtp:111 minptime=10;useinbandfec=1\na=rtpmap:103 ISAC/16000\na=rtpmap:104 ISAC/32000\na=rtpmap:9 G722/8000\na=rtpmap:0 PCMU/8000\na=rtpmap:8 PCMA/8000\na=rtpmap:106 CN/32000\na=rtpmap:105 CN/16000\na=rtpmap:13 CN/8000\na=rtpmap:110 telephone-event/48000\na=rtpmap:112 telephone-event/32000\na=rtpmap:113 telephone-event/16000\na=rtpmap:126 telephone-event/8000\nm=video 9 UDP/TLS/RTP/SAVPF 96 97 98 99 100 101 122 102 121 127 120 125 107 108 109 124 119 123 118 114 115 116 35\nc=IN IP4 0.0.0.0\na=rtcp:9 IN IP4 0.0.0.0\na=ice-ufrag:09iz\na=ice-pwd:TDNEa4DNQNu5vntI7paNKKgV\na=ice-options:trickle\na=fingerprint:sha-256 4F:12:50:51:FE:1A:76:0E:74:6D:79:31:DB:C0:E5:42:41:4F:AA:9D:AA:C9:29:AD:A2:49:6E:F3:AF:0F:3A:14\na=setup:actpass\na=mid:1\na=extmap:14 urn:ietf:params:rtp-hdrext:toffset\na=extmap:2 http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time\na=extmap:13 urn:3gpp:video-orientation\na=extmap:3 http://www.ietf.org/id/draft-holmer-rmcat-transport-wide-cc-extensions-01\na=extmap:12 http://www.webrtc.org/experiments/rtp-hdrext/playout-delay\na=extmap:11 http://www.webrtc.org/experiments/rtp-hdrext/video-content-type\na=extmap:7 http://www.webrtc.org/experiments/rtp-hdrext/video-timing\na=extmap:8 http://www.webrtc.org/experiments/rtp-hdrext/color-space\na=extmap:4 urn:ietf:params:rtp-hdrext:sdes:mid\na=extmap:5 urn:ietf:params:rtp-hdrext:sdes:rtp-stream-id\na=extmap:6 urn:ietf:params:rtp-hdrext:sdes:repaired-rtp-stream-id\na=sendrecv\na=rtcp-mux\na=rtcp-rsize\na=rtpmap:96 VP8/90000\na=rtcp-fb:96 goog-remb\na=rtcp-fb:96 transport-cc\na=rtcp-fb:96 ccm fir\na=rtcp-fb:96 nack\na=rtcp-fb:96 nack pli\na=rtpmap:97 rtx/90000\na=fmtp:97 apt=96\na=rtpmap:98 VP9/90000\na=rtcp-fb:98 goog-remb\na=rtcp-fb:98 transport-cc\na=rtcp-fb:98 ccm fir\na=rtcp-fb:98 nack\na=rtcp-fb:98 nack pli\na=fmtp:98 profile-id=0\na=rtpmap:99 rtx/90000\na=fmtp:99 apt=98\na=rtpmap:100 VP9/90000\na=rtcp-fb:100 goog-remb\na=rtcp-fb:100 transport-cc\na=rtcp-fb:100 ccm fir\na=rtcp-fb:100 nack\na=rtcp-fb:100 nack pli\na=fmtp:100 profile-id=2\na=rtpmap:101 rtx/90000\na=fmtp:101 apt=100\na=rtpmap:122 VP9/90000\na=rtcp-fb:122 goog-remb\na=rtcp-fb:122 transport-cc\na=rtcp-fb:122 ccm fir\na=rtcp-fb:122 nack\na=rtcp-fb:122 nack pli\na=fmtp:122 profile-id=1\na=rtpmap:102 H264/90000\na=rtcp-fb:102 goog-remb\na=rtcp-fb:102 transport-cc\na=rtcp-fb:102 ccm fir\na=rtcp-fb:102 nack\na=rtcp-fb:102 nack pli\na=fmtp:102 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42001f\na=rtpmap:121 rtx/90000\na=fmtp:121 apt=102\na=rtpmap:127 H264/90000\na=rtcp-fb:127 goog-remb\na=rtcp-fb:127 transport-cc\na=rtcp-fb:127 ccm fir\na=rtcp-fb:127 nack\na=rtcp-fb:127 nack pli\na=fmtp:127 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f\na=rtpmap:120 rtx/90000\na=fmtp:120 apt=127\na=rtpmap:125 H264/90000\na=rtcp-fb:125 goog-remb\na=rtcp-fb:125 transport-cc\na=rtcp-fb:125 ccm fir\na=rtcp-fb:125 nack\na=rtcp-fb:125 nack pli\na=fmtp:125 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f\na=rtpmap:107 rtx/90000\na=fmtp:107 apt=125\na=rtpmap:108 H264/90000\na=rtcp-fb:108 goog-remb\na=rtcp-fb:108 transport-cc\na=rtcp-fb:108 ccm fir\na=rtcp-fb:108 nack\na=rtcp-fb:108 nack pli\na=fmtp:108 level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42e01f\na=rtpmap:109 rtx/90000\na=fmtp:109 apt=108\na=rtpmap:124 H264/90000\na=rtcp-fb:124 goog-remb\na=rtcp-fb:124 transport-cc\na=rtcp-fb:124 ccm fir\na=rtcp-fb:124 nack\na=rtcp-fb:124 nack pli\na=fmtp:124 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=4d001f\na=rtpmap:119 rtx/90000\na=fmtp:119 apt=124\na=rtpmap:123 H264/90000\na=rtcp-fb:123 goog-remb\na=rtcp-fb:123 transport-cc\na=rtcp-fb:123 ccm fir\na=rtcp-fb:123 nack\na=rtcp-fb:123 nack pli\na=fmtp:123 level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=64001f\na=rtpmap:118 rtx/90000\na=fmtp:118 apt=123\na=rtpmap:114 red/90000\na=rtpmap:115 rtx/90000\na=fmtp:115 apt=114\na=rtpmap:116 ulpfec/90000\na=rtpmap:35 flexfec-03/90000\na=rtcp-fb:35 goog-remb\na=rtcp-fb:35 transport-cc\na=fmtp:35 repair-window=10000000\n'
        print('offer sdp : ' , offer)
        sdp = offer
        res, sdpmsg = GstSdp.SDPMessage.new()
        GstSdp.sdp_message_parse_buffer(bytes(sdp.encode()), sdpmsg)
        offer = GstWebRTC.WebRTCSessionDescription.new(GstWebRTC.WebRTCSDPType.OFFER, sdpmsg)
        promise = Gst.Promise.new()
        self.webrtc.emit('set-remote-description', offer, promise)
        promise.interrupt()

        direction_a = GstWebRTC.WebRTCRTPTransceiverDirection.SENDONLY
        caps_a = Gst.caps_from_string("application/x-rtp,media=audio,encoding-name=OPUS,payload=111")
        direction_v = GstWebRTC.WebRTCRTPTransceiverDirection.SENDONLY
        caps_v = Gst.caps_from_string("application/x-rtp,media=video,encoding-name=VP8,payload=96")
        
        self.webrtc.emit('add-transceiver', direction_a, caps_a)
        self.webrtc.emit('add-transceiver', direction_v, caps_v)

        promise = Gst.Promise.new_with_change_func(self.on_answer_created, None, None)
        self.webrtc.emit('create-answer', None, promise)
Esempio n. 4
0
    def setup_audio_sources(self):
        if self.audio_source or self.audio2_source:
            logger.debug("Setup audio elements.")
            self.aud_out_queue = Gst.ElementFactory.make("queue", "queue_a_out")
            self.audioconv = Gst.ElementFactory.make("audioconvert", "audio_conv")
            if prefs.codec == CODEC_VP8:
                self.audioenc = Gst.ElementFactory.make("vorbisenc", "audio_encoder")
                self.audioenc.set_property("quality", 1)
            else:
                self.audioenc = Gst.ElementFactory.make("lamemp3enc", "audio_encoder")
                self.audioenc.set_property("quality", 0)

        if self.audio_source:
            logger.debug("Audio1 Source:\n  {0}".format(self.audio_source))
            self.audiosrc = Gst.ElementFactory.make("pulsesrc", "audio_src")
            self.audiosrc.set_property("device", self.audio_source)
            self.aud_caps = Gst.caps_from_string("audio/x-raw")
            self.aud_caps_filter = Gst.ElementFactory.make("capsfilter", "aud_filter")
            self.aud_caps_filter.set_property("caps", self.aud_caps)

            self.aud_in_queue = Gst.ElementFactory.make("queue", "queue_a_in")

        if self.audio2_source:
            logger.debug("Audio2 Source:\n  {0}".format(self.audio2_source))
            self.audio2src = Gst.ElementFactory.make("pulsesrc", "audio2_src")
            self.audio2src.set_property("device", self.audio2_source)
            self.aud2_caps = Gst.caps_from_string("audio/x-raw")
            self.aud2_caps_filter = Gst.ElementFactory.make("capsfilter", "aud2_filter")
            self.aud2_caps_filter.set_property("caps", self.aud2_caps)
            self.aud2_in_queue = Gst.ElementFactory.make("queue", "queue_a2_in")
            self.audio2conv = Gst.ElementFactory.make("audioconvert", "audio2_conv")

        if self.audio_source and self.audio2_source:
            self.audiomixer = Gst.ElementFactory.make("adder", "audiomixer")
Esempio n. 5
0
    def build_pipeline(self, channels, sinkname, samplerate, srcname,
                       parse_element='wavparse'):
        self.channels = channels
        self.srcname = srcname
        self.sink = self.make_add_link(sinkname, None)
        self.classifier = self.make_add_link('classify', self.sink)
        self.capsfilter = self.make_add_link('capsfilter', self.classifier)
        self.interleave = self.make_add_link('interleave', self.capsfilter)
        self.sources = []
        for i in range(channels):
            ac = self.make_add_link('audioconvert', self.interleave)
            ar = self.make_add_link('audioresample', ac)
            if srcname == 'filesrc':
                wp = self.make_add_link(parse_element, ar)
                fs = self.make_add_link(srcname, wp)
            else:
                cf = self.make_add_link('capsfilter', ar)
                cf.set_property("caps", Gst.caps_from_string("audio/x-raw, "
                                                             "layout=(string)interleaved, "
                                                             "channel-mask=(bitmask)0x0, "
                                                             "rate=%d, channels=1"
                                                             % (samplerate,)))
                fs = self.make_add_link(srcname, cf)
            self.sources.append(fs)

        caps =  Gst.caps_from_string("audio/x-raw, "
                                     "layout=(string)interleaved, "
                                     "channel-mask=(bitmask)0x0, "
                                     "rate=%d, channels=%d"
                                     % (samplerate, channels))
        self.capsfilter.set_property("caps", caps)
        if 0:
            Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL,
                                      "pipeline.dot")
Esempio n. 6
0
    def _renderButtonClickedCb(self, unused_button):
        """
        The render button inside the render dialog has been clicked,
        start the rendering process.
        """
        self.outfile = os.path.join(self.filebutton.get_uri(),
                                    self.fileentry.get_text())
        self.progress = RenderingProgressDialog(self.app, self)
        self.window.hide()  # Hide the rendering settings dialog while rendering

        # FIXME GES: Handle presets here!
        self.containerprofile = EncodingContainerProfile.new(None, None,
                                    Gst.caps_from_string(self.muxertype), None)

        if self.video_output_checkbutton.get_active():
            self.videoprofile = EncodingVideoProfile.new(
                                    Gst.caps_from_string(self.videotype), None,
                                    self.settings.getVideoCaps(True), 0)
            self.containerprofile.add_profile(self.videoprofile)
        if self.audio_output_checkbutton.get_active():
            self.audioprofile = EncodingAudioProfile.new(
                                    Gst.caps_from_string(self.audiotype), None,
                                    self.settings.getAudioCaps(), 0)
            self.containerprofile.add_profile(self.audioprofile)

        self._pipeline.set_render_settings(self.outfile, self.containerprofile)
        self.startAction()
        self.progress.window.show()
        self.progress.connect("cancel", self._cancelRender)
        self.progress.connect("pause", self._pauseRender)
        bus = self._pipeline.get_bus()
        bus.add_signal_watch()
        self._gstSigId[bus] = bus.connect('message', self._busMessageCb)
        self.app.current.pipeline.connect("position", self._updatePositionCb)
Esempio n. 7
0
	def __init__(self):

		src_template = Gst.PadTemplate.new (
		 'src',
		 Gst.PadDirection.SRC,
		 Gst.PadPresence.ALWAYS,
		 Gst.caps_from_string('video/x-raw,format=(string)RGBA'),
		)

		sink_template = Gst.PadTemplate.new (
		 'sink_{l,r}',
		 Gst.PadDirection.SINK,
		 Gst.PadPresence.ALWAYS,
		 Gst.caps_from_string('video/x-raw,format=(string)RGBA'),
		)

		Gst.Element.__init__(self)

		self.sinklpad = Gst.Pad.new_from_template(sink_template, 'sink_l')
		self.sinklpad.set_chain_function_full(self._sinkl_chain, None)
		self.sinklpad.set_event_function_full(self._sinkl_event, None)
		self.add_pad(self.sinklpad)

		self.sinkrpad = Gst.Pad.new_from_template(sink_template, 'sink_r')
		self.sinkrpad.set_chain_function_full(self._sinkr_chain, None)
		self.sinkrpad.set_event_function_full(self._sinkr_event, None)
		self.add_pad(self.sinkrpad)

		self.srcvpad = Gst.Pad.new_from_template(src_template, 'src_v')
		self.srcvpad.set_event_function_full(self._srcv_event, None)
		self.add_pad(self.srcvpad)

		self._bufs_l = collections.deque()
		self._bufs_r = collections.deque()
Esempio n. 8
0
    def setup_audio_sources(self):
        if self.audio_source or self.audio2_source:
            logger.debug("Setup audio elements.")
            self.aud_out_queue = Gst.ElementFactory.make("queue", "queue_a_out")
            self.audioconv = Gst.ElementFactory.make("audioconvert", "audio_conv")
            if self.mode == MODE_BROADCAST:
                self.audio_bitrate = 128000
                self.audioenc = Gst.ElementFactory.make("avenc_aac", "audio_encoder")
                self.audioenc.set_property("bitrate", self.audio_bitrate)
                self.audioenc.set_property("compliance", -2)

                self.aacparse = Gst.ElementFactory.make("aacparse", "aacparse")
                self.aacparse_caps = Gst.caps_from_string("audio/mpeg,mpegversion=4,stream-format=raw")
                self.f_aacparse_caps = Gst.ElementFactory.make("capsfilter", "aacparse_filter")
                self.f_aacparse_caps.set_property("caps", self.aacparse_caps)
            else:
                if prefs.codec == CODEC_VP8:
                    self.audioenc = Gst.ElementFactory.make("vorbisenc", "audio_encoder")
                    self.audioenc.set_property("quality", 1)
                else:
                    self.audioenc = Gst.ElementFactory.make("lamemp3enc", "audio_encoder")
                    self.audioenc.set_property("quality", 0)

        if self.audio_source:
            logger.debug("Audio1 Source:\n  {0}".format(self.audio_source))
            self.audiosrc = Gst.ElementFactory.make("pulsesrc", "audio_src")
            self.audiosrc.set_property("device", self.audio_source)
            if self.mode == MODE_BROADCAST and not self.audio2_source:
                audio_caps = " ".join(["audio/x-raw, format=(string)S16LE, endianness=(int)1234,"
                                       "signed=(boolean)true, width=(int)16, depth=(int)16,",
                                       "rate=(int)44100, channels=(int)2"])
                self.aud_caps = Gst.caps_from_string(audio_caps)
            else:
                #self.aud_caps = Gst.caps_from_string("audio/x-raw")
                self.aud_caps = Gst.caps_from_string("audio/x-raw,channels=2") # added by srikant based on launchpad bug
            self.f_aud_caps = Gst.ElementFactory.make("capsfilter", "aud_filter")
            self.f_aud_caps.set_property("caps", self.aud_caps)

            self.aud_in_queue = Gst.ElementFactory.make("queue", "queue_a_in")

        if self.audio2_source:
            logger.debug("Audio2 Source:\n  {0}".format(self.audio2_source))
            self.audio2src = Gst.ElementFactory.make("pulsesrc", "audio2_src")
            self.audio2src.set_property("device", self.audio2_source)
            if self.mode == MODE_BROADCAST and not self.audio_source:
                audio_caps = " ".join(["audio/x-raw, format=(string)S16LE, endianness=(int)1234,"
                                       "signed=(boolean)true, width=(int)16, depth=(int)16,",
                                       "rate=(int)44100, channels=(int)2"])
                self.aud2_caps = Gst.caps_from_string(audio_caps)
            else:
                self.aud2_caps = Gst.caps_from_string("audio/x-raw, channels=2")
            self.f_aud2_caps = Gst.ElementFactory.make("capsfilter", "aud2_filter")
            self.f_aud2_caps.set_property("caps", self.aud2_caps)
            self.aud2_in_queue = Gst.ElementFactory.make("queue", "queue_a2_in")
            self.audio2conv = Gst.ElementFactory.make("audioconvert", "audio2_conv")

        if self.audio_source and self.audio2_source:
            self.audiomixer = Gst.ElementFactory.make("adder", "audiomixer")
Esempio n. 9
0
class Passthrough(Gst.Element):
    __gstmetadata__ = ("Passthrough element", "element.py", "Proxy buffers",
                       "Andrew Cook <*****@*****.**>")

    _src_template = Gst.PadTemplate.new('src', Gst.PadDirection.SRC,
                                        Gst.PadPresence.ALWAYS,
                                        Gst.caps_from_string('ANY'))
    _sink_template = Gst.PadTemplate.new('sink', Gst.PadDirection.SINK,
                                         Gst.PadPresence.ALWAYS,
                                         Gst.caps_from_string('ANY'))

    _gsttemplates = (
        _src_template,
        _sink_template,
    )

    def __init__(self):
        Gst.Element.__init__(self)

        self.sinkpad = Gst.Pad.new_from_template(self._sink_template, 'sink')
        self.sinkpad.set_chain_function_full(self._sink_chain, None)
        self.sinkpad.set_event_function_full(self._sink_event, None)
        self.sinkpad.set_query_function_full(self._sink_query, None)
        self.add_pad(self.sinkpad)

        self.srcpad = Gst.Pad.new_from_template(self._src_template, 'src')
        self.srcpad.set_event_function_full(self._src_event, None)
        self.srcpad.set_query_function_full(self._src_query, None)
        self.add_pad(self.srcpad)

    def _sink_chain(self, pad, parent, buf):
        return self.srcpad.push(buf)

    def _src_event(self, pad, parent, event):
        return self.sinkpad.push_event(event)

    def _sink_event(self, pad, parent, event):
        return self.srcpad.push_event(event)

    # hack: force the query to be writable by messing with the refcount
    # https://bugzilla.gnome.org/show_bug.cgi?id=746329

    def _sink_query(self, pad, parent, query):
        refcount = query.mini_object.refcount
        query.mini_object.refcount = 1
        ret = self.srcpad.peer_query(query)
        query.mini_object.refcount += refcount - 1
        return ret

    def _src_query(self, pad, parent, query):
        refcount = query.mini_object.refcount
        query.mini_object.refcount = 1
        ret = self.sinkpad.peer_query(query)
        query.mini_object.refcount += refcount - 1
        return ret
Esempio n. 10
0
    def __init__(self):
        GObject.threads_init()
        gst.init(None)
        self.pipeline = gst.Pipeline()
        self.video_source = gst.ElementFactory.make('v4l2src', 'video_source')
        self.video_source.set_property("num-buffers", 1)
        self.videoconvert = gst.ElementFactory.make('videoconvert',
                                                    'videoconvert')
        self.clock = gst.ElementFactory.make('clockoverlay', 'clock')
        self.timer = gst.ElementFactory.make('timeoverlay', 'timer')
        self.videorate = gst.ElementFactory.make('videorate', 'videorate')
        self.sconvert = gst.ElementFactory.make('videoconvert', 'sconvert')
        self.png = gst.ElementFactory.make('pngenc', 'png')
        self.multi_sink = gst.ElementFactory.make('multifilesink',
                                                  'multi_sink')
        self.multi_sink_pad = self.multi_sink.get_static_pad('sink')
        self.probe_id = self.multi_sink_pad.add_probe(
            gst.PadProbeType.EVENT_UPSTREAM, self.probe_callback)

        self.caps = gst.caps_from_string(
            "video/x-raw,format=RGB,width=800,height=600,framerate=5/1")
        self.timer.set_property('valignment', 'bottom')
        self.timer.set_property('halignment', 'right')
        self.clock.set_property('time-format', '%Y/%m/%d %H:%M:%S')
        self.clock.set_property('valignment', 'bottom')
        self.caps1 = gst.caps_from_string("video/x-raw,framerate=1/1")
        self.png.set_property('snapshot', True)
        #self.png.set_property('idct-method',1)
        self.multi_sink.set_property('location', '/tmp/image/frame%05d.png')
        self.filter = gst.ElementFactory.make("capsfilter", "filter")
        self.filter.set_property("caps", self.caps)
        self.filter1 = gst.ElementFactory.make("capsfilter", "filter1")
        self.filter1.set_property("caps", self.caps1)
        self.pipeline.add(self.video_source)
        self.pipeline.add(self.videoconvert)
        self.pipeline.add(self.timer)
        self.pipeline.add(self.clock)
        self.pipeline.add(self.filter)
        self.pipeline.add(self.videorate)
        self.pipeline.add(self.filter1)
        self.pipeline.add(self.sconvert)
        self.pipeline.add(self.png)
        self.pipeline.add(self.multi_sink)
        self.video_source.link(self.filter)
        self.filter.link(self.videoconvert)
        self.videoconvert.link(self.timer)
        self.timer.link(self.clock)
        self.clock.link(self.videorate)
        self.videorate.link(self.filter1)
        self.filter1.link(self.sconvert)
        self.sconvert.link(self.png)
        self.png.link(self.multi_sink)
Esempio n. 11
0
    def __init__(self):
        GObject.threads_init()
        gst.init(None)
        self.pipeline = gst.Pipeline()
        self.video_source = gst.ElementFactory.make('v4l2src', 'video_source')
        self.video_source.set_property("num-buffers",1)
        self.videoconvert = gst.ElementFactory.make('videoconvert', 'videoconvert')
        self.clock = gst.ElementFactory.make('clockoverlay', 'clock')
        self.timer= gst.ElementFactory.make('timeoverlay','timer')
        self.videorate = gst.ElementFactory.make('videorate', 'videorate')
        self.sconvert = gst.ElementFactory.make('videoconvert', 'sconvert')
        self.png = gst.ElementFactory.make('pngenc', 'png')
        self.multi_sink = gst.ElementFactory.make('multifilesink', 'multi_sink')
        self.multi_sink_pad = self.multi_sink.get_static_pad('sink')
        self.probe_id = self.multi_sink_pad.add_probe(gst.PadProbeType.EVENT_UPSTREAM,self.probe_callback)
        
        self.caps = gst.caps_from_string("video/x-raw,format=RGB,width=800,height=600,framerate=5/1")
        self.timer.set_property('valignment','bottom')
        self.timer.set_property('halignment','right')
        self.clock.set_property('time-format','%Y/%m/%d %H:%M:%S')
        self.clock.set_property('valignment','bottom')
        self.caps1 = gst.caps_from_string("video/x-raw,framerate=1/1")
        self.png.set_property('snapshot',True)
        #self.png.set_property('idct-method',1)
        self.multi_sink.set_property('location','/home/pi/frame.png')
        self.filter = gst.ElementFactory.make("capsfilter", "filter")
        self.filter.set_property("caps", self.caps)
        self.filter1 = gst.ElementFactory.make("capsfilter", "filter1")
        self.filter1.set_property("caps", self.caps1)

        self.pipeline.add(self.video_source)
        self.pipeline.add(self.videoconvert)
        self.pipeline.add(self.timer)
        self.pipeline.add(self.clock)
        self.pipeline.add(self.filter)
        self.pipeline.add(self.videorate)
        self.pipeline.add(self.filter1)
        self.pipeline.add(self.sconvert)
        self.pipeline.add(self.png)
        self.pipeline.add(self.multi_sink)

        self.video_source.link(self.filter)
        self.filter.link(self.videoconvert)
        self.videoconvert.link(self.timer)
        self.timer.link(self.clock)
        self.clock.link(self.videorate)
        self.videorate.link(self.filter1)
        self.filter1.link(self.sconvert)
        self.sconvert.link(self.png)
        self.png.link(self.multi_sink)
Esempio n. 12
0
    def __init__(self, dest, name):
        super(WebServiceBin, self).__init__(None, name + '_websrv')

        web_q = Gst.ElementFactory.make('queue', None)
        self.add(web_q)

        dec = Gst.ElementFactory.make('avdec_h264', None)
        self.add(dec)

        scale = Gst.ElementFactory.make('videoscale', None)
        self.add(scale)
        filter1 = Gst.ElementFactory.make('capsfilter', None)
        filter1.set_property(
            'caps',
            Gst.caps_from_string(
                "video/x-raw, width=320, height=180, framerate=(fraction)5/1"))
        self.add(filter1)
        vidconv = Gst.ElementFactory.make('videoconvert', None)
        self.add(vidconv)
        vp8enc = Gst.ElementFactory.make('vp8enc', None)
        self.add(vp8enc)

        webmmux = Gst.ElementFactory.make('webmmux', name + '_webmmux')
        self.add(webmmux)

        filter2 = Gst.ElementFactory.make('capsfilter', None)
        filter2.set_property('caps', Gst.caps_from_string('video/webm'))
        self.add(filter2)

        srvsink = Gst.ElementFactory.make('tcpserversink', None)
        srvsink.set_property('host', dest['ip'])
        srvsink.set_property('port', dest['port'])
        self.add(srvsink)

        web_q.link(dec)
        dec.link(scale)
        scale.link(filter1)
        filter1.link(vidconv)
        vidconv.link(vp8enc)

        v_pad = vp8enc.get_static_pad('src')
        v_pad.link(webmmux.get_request_pad('video_%u'))

        webmmux.link(filter2)
        filter2.link(srvsink)

        g_pad = Gst.GhostPad.new('sink', web_q.get_static_pad('sink'))
        self.add_pad(g_pad)
Esempio n. 13
0
    def __init__(self):
        super(VideoInBin, self).__init__()

        # Video Source
        video_src = Gst.ElementFactory.make("udpsrc", None)
        video_src.set_property("port", 5004)
        video_src.set_property("caps", Gst.caps_from_string(VIDEO_RTP_CAPS))
        self.add(video_src)

        # RTP Theora Depay
        video_rtp_theora_depay = Gst.ElementFactory.make(
            "rtptheoradepay", None)
        self.add(video_rtp_theora_depay)

        # Video decode
        video_decode = Gst.ElementFactory.make("theoradec", None)
        self.add(video_decode)
        video_rtp_theora_depay.link(video_decode)

        # Change colorspace for xvimagesink
        video_convert = Gst.ElementFactory.make("videoconvert", None)
        self.add(video_convert)

        # Send video to xviamgesink
        xvimage_sink = Gst.ElementFactory.make("autovideosink", None)
        self.add(xvimage_sink)

        # Link Elements
        video_src.link(video_rtp_theora_depay)
        video_decode.link(video_convert)
        video_convert.link(xvimage_sink)
Esempio n. 14
0
 def _create_input_frame(self, item):
     if (isinstance(item, GvaFrameData)):
         gst_buffer = None
         if (item.data):
             if (not isinstance(item.data, bytes)):
                 raise Exception("GvaFrameData must contain bytes")
             gst_buffer = Gst.Buffer.new_allocate(None, len(item.data))
             gst_buffer.fill(0, item.data)
             if (item.pts):
                 gst_buffer.pts = item.pts
                 gst_buffer.dts = item.pts
             if (item.duration):
                 gst_buffer.duration = item.duration
             if (item.message) and (isinstance(item.message, str)):
                 try:
                     item.message = json.loads(item.message)
                 except Exception:
                     pass
             if (item.message):
                 GVAJSONMeta.add_json_meta(gst_buffer,
                                           json.dumps(item.message))
         gst_caps = item.caps
         if (item.caps) and (isinstance(item.caps, str)):
             gst_caps = Gst.caps_from_string(item.caps)
         sample = Gst.Sample.new(gst_buffer, gst_caps, item.segment,
                                 item.info)
         return sample
     if isinstance(item, Gst.Sample):
         return item
     if isinstance(item, Gst.Buffer):
         return item
     if isinstance(item, GvaSample):
         return item.sample
     return None
Esempio n. 15
0
    def file_changed(self, new_file):
        print "Setting file:", new_file
        self.filesrc = self.add_element('filesrc')
        self.gdkdec = self.add_element('gdkpixbufdec')
        self.convert = self.add_element('videoconvert')
        self.freeze = self.add_element('imagefreeze')
        self.caps = self.add_element('capsfilter')
        caps = Gst.caps_from_string(self.DEFAULT_VIDEO_CAPS)
        self.caps.set_property('caps', caps)

        self.filesrc.set_property('location', new_file)

        self.filesrc.link(self.gdkdec)
        self.gdkdec.link(self.convert)
        self.convert.link(self.freeze)
        self.freeze.link(self.caps)
        videomixer_pad = self.mixer.get_request_pad("sink_%u")
        self.caps.get_static_pad('src').link(videomixer_pad)

        self.filesrc.set_state(Gst.State.PLAYING)
        self.gdkdec.set_state(Gst.State.PLAYING)
        self.convert.set_state(Gst.State.PLAYING)
        self.freeze.set_state(Gst.State.PLAYING)
        self.caps.set_state(Gst.State.PLAYING)

        videomixer_pad.set_property('zorder', 1)
Esempio n. 16
0
def run_client(card=DEFAULT_ALSASINK,host=DEFAULT_HOST,port=DEFAULT_PORT,rate=DEFAULT_RATE,latency=DEFAULT_LATENCY,config_map=DEFAULT_CONFIG_MAP):
    pipeline = Gst.Pipeline()
    
    src = Gst.ElementFactory.make("udpsrc", "source")
    src.set_property("address", host)
    src.set_property("port", port)
    pad_caps = "application/x-rtp, media=(string)audio, clock-rate=(int){0}, encoding-name=(string)MP4A-LATM, cpresent=(string)0, config=(string){1}, payload=(int)96".format(rate,config_map[rate])
    src.set_property("caps", Gst.caps_from_string(pad_caps))
    jitterbuf = Gst.ElementFactory.make("rtpjitterbuffer","jitterbuf")
    jitterbuf.set_property("latency",latency)
    deserializer = Gst.ElementFactory.make("rtpmp4adepay", "deserializer")
    decoder = Gst.ElementFactory.make("faad", "decoder")
    converter = Gst.ElementFactory.make("audioconvert", "converter")
    dac = Gst.ElementFactory.make("alsasink", "sink")
    dac.set_property("device", card)
    dac.set_property("sync", False)

    pipeline.add(src)
    pipeline.add(jitterbuf)
    pipeline.add(deserializer)
    pipeline.add(decoder)
    pipeline.add(converter)
    pipeline.add(dac)
    src.link(jitterbuf)
    jitterbuf.link(deserializer)
    deserializer.link(decoder)
    decoder.link(converter)
    converter.link(dac)
    pipeline.set_state(Gst.State.PLAYING)
     
    loop = GObject.MainLoop()
    loop.run()
Esempio n. 17
0
 def start(self):
     Media.start(self)
     if not Gst.uri_is_valid(self.uri):
         self.uri = Gst.filename_to_uri(self.uri)
     source = Gst.ElementFactory.make("uridecodebin", None)
     source.set_property("uri", self.uri)
     queue = Gst.ElementFactory.make("queue2", None)
     queue.set_property("max-size-buffers", 5)
     convert = Gst.ElementFactory.make("videoconvert", None)
     sink = Gst.ElementFactory.make("appsink", None)
     sink.set_property("emit-signals", True)
     sink.set_property("max-buffers", 1)
     caps = Gst.caps_from_string("video/x-raw, format=(string){RGBA}")
     sink.set_property("caps", caps)
     self.pipeline.add(source)
     self.pipeline.add(queue)
     self.pipeline.add(convert)
     self.pipeline.add(sink)
     source.connect("pad-added", self.on_pad_added, queue)
     queue.link(convert)
     convert.link(sink)
     sink.connect("new-sample", self.on_frame_cpu)
     # Creates a bus and set callbacks to receive errors
     bus = self.pipeline.get_bus()
     bus.add_signal_watch()
     bus.connect("message::eos", self.on_eos)
     bus.connect("message::error", self.on_error)
     self.pipeline.set_state(Gst.State.PLAYING)
    def init_request(self, id, caps_str):
        self.request_id = id
        if caps_str and len(caps_str) > 0:
            logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
            caps = Gst.caps_from_string(caps_str)
            self.appsrc.set_property("caps", caps)
        else:
            #caps = Gst.caps_from_string(None)
            self.appsrc.set_property("caps", None)
            #self.pipeline.set_state(Gst.State.READY)
            pass
        #self.appsrc.set_state(Gst.State.PAUSED)

        if self.outdir:
            self.pipeline.set_state(Gst.State.PAUSED)
            self.filesink.set_state(Gst.State.NULL)
            self.filesink.set_property('location',
                                       "%s/%s.raw" % (self.outdir, id))
            self.filesink.set_state(Gst.State.PLAYING)

        #self.filesink.set_state(Gst.State.PLAYING)
        #self.decodebin.set_state(Gst.State.PLAYING)
        self.pipeline.set_state(Gst.State.PLAYING)
        self.filesink.set_state(Gst.State.PLAYING)
        # push empty buffer (to avoid hang on client diconnect)
        buf = Gst.Buffer.new_allocate(None, 0, None)
        self.appsrc.emit("push-buffer", buf)
        logger.info('%s: Pipeline initialized' % (self.request_id))
Esempio n. 19
0
def run_server(alsadev=DEFAULT_ALSASRC,audience=DEFAULT_BROADCAST,port=DEFAULT_PORT,bitrate=DEFAULT_BITRATE,alsarate=DEFAULT_RATE):     
    pipeline = Gst.Pipeline()
     
    src = Gst.ElementFactory.make("alsasrc", "source")
    src.set_property("device", alsadev)
    converter = Gst.ElementFactory.make("audioconvert", "converter")
    encoder = Gst.ElementFactory.make("faac", "encoder")
    encoder.set_property("bitrate", bitrate)
    serializer = Gst.ElementFactory.make("rtpmp4apay", "serializer")
    server = Gst.ElementFactory.make("udpsink", "server")
    server.set_property("host", audience)
    server.set_property("port", port)

    pipeline.add(src)
    pipeline.add(converter)
    pipeline.add(encoder)
    pipeline.add(serializer)
    pipeline.add(server)
    src.link_filtered(converter,Gst.caps_from_string("audio/x-raw,format=(string)S32LE,channels=(int)2,rate=(int){0}".format(alsarate)))
    converter.link(encoder)
    encoder.link(serializer)
    serializer.link(server)
    pipeline.set_state(Gst.State.PLAYING)
     
    loop = GObject.MainLoop()
    loop.run()
Esempio n. 20
0
    def init_request(
        self, id, caps_str
    ):  # called from worker.py's recieved_message() if STATE=CONNECTED
        self.request_id = id
        if caps_str and len(
                caps_str
        ) > 0:  # caps (capabilities) is media type (or content type)
            logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
            caps = Gst.caps_from_string(caps_str)
            self.appsrc.set_property("caps", caps)
        else:
            ##caps = Gst.caps_from_string(None)
            self.appsrc.set_property("caps", None)
            ##self.pipeline.set_state(Gst.State.READY)
            pass
        ##self.appsrc.set_state(Gst.State.PAUSED)

        if self.outdir:  # change filesink location property /dev/null -> outdir
            self.pipeline.set_state(Gst.State.PAUSED)
            self.filesink.set_state(Gst.State.NULL)
            self.filesink.set_property('location',
                                       "%s/%s.raw" % (self.outdir, id))
            self.filesink.set_state(Gst.State.PLAYING)

        ##self.filesink.set_state(Gst.State.PLAYING)
        ##self.decodebin.set_state(Gst.State.PLAYING)
        self.pipeline.set_state(Gst.State.PLAYING)
        self.filesink.set_state(Gst.State.PLAYING)
        # Create a new empty buffer
        buf = Gst.Buffer.new_allocate(None, 0, None)
        # Push empty buffer into the appsrc (to avoid hang on client diconnect)
        self.appsrc.emit("push-buffer", buf)
        logger.info('%s: Pipeline initialized' % (self.request_id))
    def _create_sink_elements(self):
        nvvidconv = Gst.ElementFactory.make("nvvideoconvert",
                                            "convertor appsink")
        if not nvvidconv:
            sys.stderr.write(" Unable to create nvvidconv2 \n")

        capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
        if not capsfilter:
            sys.stderr.write(" Unable to create capsfilter \n")

        caps = Gst.Caps.from_string("video/x-raw, format=RGBA")
        capsfilter.set_property("caps", caps)

        sink = Gst.ElementFactory.make("appsink", "sink")
        if not sink:
            sys.stderr.write(" Unable to create appsink \n")
        sink.set_property("emit-signals", True)
        caps = Gst.caps_from_string("video/x-raw, format=RGBA")
        sink.set_property("caps", caps)
        sink.set_property("drop", True)
        sink.set_property("max_buffers", 1)
        # sink.set_property("sync", False)
        sink.set_property("wait-on-eos", False)
        sink.connect("new-sample", new_buffer, sink)

        fakesink = Gst.ElementFactory.make("fakesink", "fakesink")

        self.pipeline.add(nvvidconv)
        self.pipeline.add(capsfilter)
        self.pipeline.add(sink)
        self.pipeline.add(fakesink)

        return nvvidconv, capsfilter, sink, fakesink
Esempio n. 22
0
    def __init__(self, source, multichannel, sample_rate, interval,
                 peak_falloff, peak_ttl, handler, on_error, on_eos):
        BaseVirtualPipeline.__init__(self, on_error, on_eos)
        self.__filters = []
        self.handlers.append(('message::element', handler))

        # Audio source.
        self.__audio_source = Gst.ElementFactory.make('pulsesrc', None)
        self.__audio_source.set_property('device', source)
        self.elements.append(self.__audio_source)

        self.__caps = Gst.caps_from_string(
            "audio/x-raw, channels=(int){}, rate=(int){}".format(
                2 if multichannel else 1, sample_rate))
        self.__caps_filter = Gst.ElementFactory.make('audioconvert', None)
        self.elements.append(self.__caps_filter)

        # Level messages.
        self.__level_analyser = Gst.ElementFactory.make('level', None)
        self.__level_analyser.set_property('interval', interval * Gst.MSECOND)
        self.__level_analyser.set_property('peak-falloff', peak_falloff)
        self.__level_analyser.set_property('peak-ttl', peak_ttl * Gst.MSECOND)
        self.elements.append(self.__level_analyser)

        # Sink
        self.__sink = Gst.ElementFactory.make('fakesink', None)
        self.elements.append(self.__sink)
Esempio n. 23
0
  def on_caps_change_clicked(self, widget):
    width = int(self.width_spin.get_value())
    height = int (self.height_spin.get_value())

    caps = Gst.caps_from_string ("video/x-raw,width=(int)" + str(width) + ",height=(int)" + str(height))
    print ("setting caps to: " + str(width) + ", " + str(height))
    self.capsfilter.set_property("caps", caps)
 def start(self):
     Media.start(self)
     # Create elements
     src = Gst.ElementFactory.make('v4l2src', None)
     convert = Gst.ElementFactory.make("videoconvert", None)
     sink = Gst.ElementFactory.make('appsink', None)
     caps = Gst.caps_from_string("video/x-raw, format=RGBA")
     sink.set_property("caps", caps)
     # Add elements to pipeline
     self.pipeline.add(src)
     self.pipeline.add(convert)
     self.pipeline.add(sink)
     # Set properties
     src.set_property('device', "/dev/video0")
     sink.set_property('emit-signals', True)
     # turns off sync to make decoding as fast as possible
     sink.set_property('sync', False)
     sink.connect('new-sample', self.on_frame_cpu)
     # Link elements
     src.link(convert)
     convert.link(sink)
     # Creates a bus and set callbacks to receive errors
     bus = self.pipeline.get_bus()
     bus.add_signal_watch()
     bus.connect("message::eos", self.on_eos)
     bus.connect("message::error", self.on_error)
     self.pipeline.set_state(Gst.State.PLAYING)
Esempio n. 25
0
    def __init__(self, name, pattern=0):
        Device.__init__(self, name)
        self.ControlPanelClass = VideoTestGenControlPanel

        self.pattern = ObservableVariable(pattern)
        self.pattern.changed.connect(self.change_pattern)

        self.src = Gst.ElementFactory.make('videotestsrc', None)
        self.bin.add(self.src)
        self.src.set_property('pattern', self.pattern.get_value())

        self.convert = Gst.ElementFactory.make('videoconvert', None)
        self.bin.add(self.convert)

        self.text_overlay = Gst.ElementFactory.make('textoverlay', None)
        self.bin.add(self.text_overlay)
        self.text_overlay.set_property("text", self.name)
        self.text_overlay.set_property("shaded-background", True)

        self.caps_filter = Gst.ElementFactory.make('capsfilter', None)
        self.bin.add(self.caps_filter)

        caps = Gst.caps_from_string(Device.DEFAULT_VIDEO_CAPS)
        self.caps_filter.set_property('caps', caps)

        self.src.link(self.text_overlay)
        self.text_overlay.link(self.convert)
        self.convert.link(self.caps_filter)

        self.add_output_video_port_on(self.caps_filter, "src")
Esempio n. 26
0
    def __init__(self, name):
        Device.__init__(self, name)
        self.ControlPanelClass = AudioTestGenControlPanel

        self.freqs = ObservableVariable([1000, 1000])
        self.freqs.changed.connect(self.change_freqs)

        caps = Gst.caps_from_string(self.SINGLE_CHANNEL_AUDIO_CAPS)

        self.src0 = Gst.ElementFactory.make('audiotestsrc', None)
        self.bin.add(self.src0)
        self.src0.set_property('is-live', True)

        self.src1 = Gst.ElementFactory.make('audiotestsrc', None)
        self.bin.add(self.src1)
        self.src1.set_property('is-live', True)

        self.interleave = Gst.ElementFactory.make('interleave', None)
        self.bin.add(self.interleave)

        self.src0.link_filtered(self.interleave, caps)
        self.src1.link_filtered(self.interleave, caps)

        self.add_output_audio_port_on(self.interleave, "src")

        self.change_freqs(self.freqs.get_value())
Esempio n. 27
0
    def __init__(self):
        super(VideoEncoder, self).__init__()

        # Create elements
        q1 = Gst.ElementFactory.make('queue', None)
        convert = Gst.ElementFactory.make('videoconvert', None)
        scale = Gst.ElementFactory.make('videoscale', None)
        enc = Gst.ElementFactory.make('x264enc', None)
        q2 = Gst.ElementFactory.make('queue', 'q2')
        self.enc = enc

        # Add elements to Bin
        print q1
        self.add(q1)
        self.add(convert)
        self.add(scale)
        self.add(enc)
        self.add(q2)

        # Set properties
        scale.set_property('method', 3)  # lanczos, highest quality scaling

        # Link elements
        q1.link(convert)
        convert.link(scale)
        scale.link_filtered(
            enc, Gst.caps_from_string('video/x-raw, width=640, height=480'))
        enc.link(q2)

        # Add Ghost Pads
        self.add_pad(Gst.GhostPad.new('sink', q1.get_static_pad('sink')))
        self.add_pad(Gst.GhostPad.new('src', q2.get_static_pad('src')))
Esempio n. 28
0
    def init_request(self, id, caps_str):
        self.request_id = id
        self._request_id = id
        self.caps_str = caps_str
        logger.info("%s: Initializing request" % (self.request_id))
        if caps_str and len(caps_str) > 0:
            logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
            caps = Gst.caps_from_string(caps_str)
            self.appsrc.set_property("caps", caps)
        else:
            #caps = Gst.caps_from_string("")
            self.appsrc.set_property("caps", None)
            #self.pipeline.set_state(Gst.State.READY)
            pass
        #self.appsrc.set_state(Gst.State.PAUSED)

        if self.outdir:
            self.pipeline.set_state(Gst.State.PAUSED)
            self.filesink.set_state(Gst.State.NULL)
            self.filesink.set_property('location',
                                       "%s/%s.raw" % (self.outdir, id))
            self.filesink.set_state(Gst.State.PLAYING)

        #self.filesink.set_state(Gst.State.PLAYING)
        #self.decodebin.set_state(Gst.State.PLAYING)
        self.pipeline.set_state(Gst.State.PLAYING)
        self.filesink.set_state(Gst.State.PLAYING)
Esempio n. 29
0
    def push(self):
        """ Push a buffer into the source. """
        if self.is_push_buffer_allowed:
            image_number = randint(1, 4)
            filename = 'images/%s.jpg' % image_number
            # open file in binary read mode
            handle = open(filename, "rb")
            data = handle.read()
            handle.close()
            # Allocate GstBuffer
            buf = Gst.Buffer.new_allocate(None, len(data), None)
            buf.fill(0, data)
            # Create GstSample
            sample = Gst.Sample.new(
                buf,
                Gst.caps_from_string("image/jpeg,framerate=(fraction)30/1"),
                None, None)
            # Push Sample on appsrc
            gst_flow_return = self._src.emit('push-sample', sample)

            if gst_flow_return != Gst.FlowReturn.OK:
                print('We got some error, stop sending data')

        else:
            print('It is enough data for buffer....')
Esempio n. 30
0
    def _create_main_pipeline(self, device, size, fps, sync):
        self.pipeline = Gst.Pipeline()

        self.source = Gst.ElementFactory.make('v4l2src', 'source')
        self.source.set_property('device', device)
        self.source.set_property('do-timestamp', 'true')
        # run 'v4l2-ctl --list-ctrls' for full list of controls
        struct, _ = Gst.structure_from_string('name,\
                                               white_balance_temperature_auto=(bool){0},\
                                               backlight_compensation=(int){0},\
                                               exposure_auto=0,\
                                               focus_auto=(bool){0}')
        self.source.set_property('extra-controls', struct)

        caps = Gst.caps_from_string(
            'video/x-raw,format=(string){BGR},width=%d,height=%d,framerate=%d/1'
            % (size[1], size[0], fps))
        self.sink = Gst.ElementFactory.make('appsink', 'sink')
        self.sink.set_property('emit-signals', True)
        self.sink.set_property('sync', sync)
        self.sink.set_property('drop', True)
        self.sink.set_property('max-buffers', 1)
        self.sink.set_property('caps', caps)
        self.sink.emit('pull-preroll')

        self.pipeline.add(self.source)
        self.pipeline.add(self.sink)

        Gst.Element.link(self.source, self.sink)
Esempio n. 31
0
def run_client(card=DEFAULT_ALSASINK,host=DEFAULT_HOST,port=DEFAULT_PORT,rate=DEFAULT_RATE):
    pipeline = Gst.Pipeline()
    
    src = Gst.ElementFactory.make("udpsrc", "source")
    src.set_property("address", host)
    src.set_property("port", port)
    pad_caps = "application/x-rtp, media=(string)audio, clock-rate=(int){0}, encoding-name=(string)L24, encoding-params=(string)2, channels=(int)2, payload=(int)96".format(rate)
    src.set_property("caps", Gst.caps_from_string(pad_caps))
    deserializer = Gst.ElementFactory.make("rtpL24depay", "deserializer")
    converter = Gst.ElementFactory.make("audioconvert", "converter")
    dac = Gst.ElementFactory.make("alsasink", "sink")
    dac.set_property("device", card)
    dac.set_property("sync", False)

    pipeline.add(src)
    pipeline.add(deserializer)
    pipeline.add(converter)
    pipeline.add(dac)
    src.link(deserializer)
    deserializer.link(converter)
    converter.link(dac)
    pipeline.set_state(Gst.State.PLAYING)
     
    loop = GObject.MainLoop()
    loop.run()
Esempio n. 32
0
    def __init__(self):
        super(VideoInBin, self).__init__()

        # Video Source
        video_src = Gst.ElementFactory.make("udpsrc", None)
        video_src.set_property("port", 5004)
        video_src.set_property("caps", Gst.caps_from_string(VIDEO_RTP_CAPS))
        self.add(video_src)

        # RTP Theora Depay
        video_rtp_theora_depay = Gst.ElementFactory.make("rtptheoradepay", None)
        self.add(video_rtp_theora_depay)

        # Video decode
        video_decode = Gst.ElementFactory.make("theoradec", None)
        self.add(video_decode)
        video_rtp_theora_depay.link(video_decode)

        # Change colorspace for xvimagesink
        video_convert = Gst.ElementFactory.make("videoconvert", None)
        self.add(video_convert)

        # Send video to xviamgesink
        xvimage_sink = Gst.ElementFactory.make("autovideosink", None)
        self.add(xvimage_sink)

        # Link Elements
        video_src.link(video_rtp_theora_depay)
        video_decode.link(video_convert)
        video_convert.link(xvimage_sink)
Esempio n. 33
0
    def init_request(self, id, caps_str):
        self.request_id = id
        if caps_str and len(caps_str) > 0:
            logger.info("%s: Setting caps to %s" % (self.request_id, caps_str))
            caps = Gst.caps_from_string(caps_str)
            self.appsrc.set_property("caps", caps)
        else:
            #caps = Gst.caps_from_string("")
            #self.appsrc.set_property("caps", caps)
            #self.pipeline.set_state(Gst.State.READY)
            pass
        #self.appsrc.set_state(Gst.State.PAUSED)

        if self.outdir:
            self.pipeline.set_state(Gst.State.PAUSED)
            self.filesink.set_state(Gst.State.NULL)
            self.filesink.set_property('location', "%s/%s.raw" % (self.outdir, id))
            self.filesink.set_state(Gst.State.PLAYING)

        #self.filesink.set_state(Gst.State.PLAYING)        
        #self.decodebin.set_state(Gst.State.PLAYING)
        self.pipeline.set_state(Gst.State.PLAYING)
        self.filesink.set_state(Gst.State.PLAYING)
        # push empty buffer (to avoid hang on client diconnect)
        buf = Gst.Buffer.new_allocate(None, 0, None)
        self.appsrc.emit("push-buffer", buf)
Esempio n. 34
0
    def __init__(self):
        super(AudioInBin, self).__init__()

        # Audio Source
        audio_src = Gst.ElementFactory.make("udpsrc", None)
        audio_src.set_property("port", 5005)
        audio_src.set_property("caps", Gst.caps_from_string(AUDIO_RTP_CAPS))
        self.add(audio_src)

        # RTP Opus Depay
        audio_rtp = Gst.ElementFactory.make("rtpspeexdepay", None)
        self.add(audio_rtp)

        # Opus Audio Decoding
        audio_dec = Gst.ElementFactory.make("speexdec", None)
        self.add(audio_dec)

        # Audio Sink
        audio_sink = Gst.ElementFactory.make("autoaudiosink", None)
        self.add(audio_sink)

        # Link Elements
        audio_src.link(audio_rtp)
        audio_rtp.link(audio_dec)
        audio_dec.link(audio_sink)
Esempio n. 35
0
    def __init__(self):
        super(AudioEncoder, self).__init__()

        # Create elements
        q1 = Gst.ElementFactory.make('queue', None)
        resample = Gst.ElementFactory.make('audioresample', None)
        convert = Gst.ElementFactory.make('audioconvert', None)
        rate = Gst.ElementFactory.make('audiorate', None)
        enc = Gst.ElementFactory.make('faac', None)
        q2 = Gst.ElementFactory.make('queue', None)

        # Add elements to Bin
        self.add(q1)
        self.add(resample)
        self.add(convert)
        self.add(rate)
        self.add(enc)
        self.add(q2)

        # Link elements
        q1.link(resample)
        resample.link(convert)
        convert.link(rate)
        rate.link(enc)
        rate.link_filtered(enc, Gst.caps_from_string('audio/x-raw,channels=2'))
        enc.link(q2)

        # Add Ghost Pads
        self.add_pad(Gst.GhostPad.new('sink', q1.get_static_pad('sink')))
        self.add_pad(Gst.GhostPad.new('src', q2.get_static_pad('src')))
Esempio n. 36
0
    def __init__(self):
        super(AudioInBin, self).__init__()

        # Audio Source
        audio_src = Gst.ElementFactory.make("udpsrc", None)
        audio_src.set_property("port", 5005)
        audio_src.set_property("caps", Gst.caps_from_string(AUDIO_RTP_CAPS))
        self.add(audio_src)

        # RTP Opus Depay
        audio_rtp = Gst.ElementFactory.make("rtpspeexdepay", None)
        self.add(audio_rtp)

        # Opus Audio Decoding
        audio_dec = Gst.ElementFactory.make("speexdec", None)
        self.add(audio_dec)

        # Audio Sink
        audio_sink = Gst.ElementFactory.make("autoaudiosink", None)
        self.add(audio_sink)

        # Link Elements
        audio_src.link(audio_rtp)
        audio_rtp.link(audio_dec)
        audio_dec.link(audio_sink)
Esempio n. 37
0
    def push(self, imgRawBytes, caps, seq_num, timestamp):
        retVal = False

        if self.is_push_buffer_allowed:
            bufferLength = len(imgRawBytes)

            # Allocate GstBuffer
            buf = Gst.Buffer.new_allocate(None, bufferLength, None)
            buf.fill(0, imgRawBytes)

            # Write message to buffer
            add_message(buf, seq_num, timestamp)

            # Create GstSample
            sample = Gst.Sample.new(buf, Gst.caps_from_string(caps), None,
                                    None)

            # Push sample on appsrc
            gst_flow_return = self._src.emit('push-sample', sample)

            if gst_flow_return != Gst.FlowReturn.OK:
                logging.info('We got some error, stop sending data')
            else:
                retVal = True
        else:
            logging.info(
                'Cannot push buffer forward and hence dropping frame with seq_num '
                + str(seq_num))

        return retVal
Esempio n. 38
0
    def try_start_viewer(self):
        if self.return_queue is None or self.return_queue.empty():
            return True
        self.return_queue.get(False)

        # window = self.builder.get_object("MainWindow")
        sink, widget = None, None
        # gtkglsink = Gst.ElementFactory.make("gtkglsink")
        # if gtkglsink is not None:
        #     print("Using GTKGLSink")
        #     glsinkbin = Gst.ElementFactory.make("glsinkbin")
        #     glsinkbin.set_property("sink", gtkglsink)
        #     widget = gtkglsink.get_property("widget")
        #     sink = glsinkbin
        # else:
        print("Using GTKSink")
        sink = Gst.ElementFactory.make("gtksink")
        widget = sink.get_property("widget")

        if sink is None:
            return True

        viewport = self.builder.get_object("camera_viewport")
        if self.av_widget is not None:
            viewport.remove(self.av_widget)

        viewport.add(widget)
        self.av_widget = widget
        viewport.show_all()

        try:
            pipeline = self.pipeline
            if pipeline is None:
                pipeline = Gst.Pipeline()
            else:
                pipeline.set_state(Gst.State.NULL)

            # src = Gst.parse_launch("v4l2src device=/dev/video20 ! video/x-raw ! videoconvert")
            src = Gst.ElementFactory.make("v4l2src")
            src.set_property("device", "/dev/video20")
            jpg = Gst.ElementFactory.make("jpegdec")
            conv = Gst.ElementFactory.make("videoconvert")
            caps = Gst.caps_from_string("video/x-raw")
            pipeline.add(src)
            pipeline.add(jpg)
            pipeline.add(conv)
            pipeline.add(sink)
            src.link(jpg)
            jpg.link_filtered(conv, caps)
            conv.link(sink)
            self.pipeline = pipeline
            self.av_src = src
            self.av_conv = conv
            self.av_sink = sink
        except:
            return True

        self.pipeline.set_state(Gst.State.PLAYING)
        return False
Esempio n. 39
0
    def on_caps_change_clicked(self, widget):
        width = int(self.width_spin.get_value())
        height = int(self.height_spin.get_value())

        caps = Gst.caps_from_string("video/x-raw,width=(int)" + str(width) +
                                    ",height=(int)" + str(height))
        print("setting caps to: " + str(width) + ", " + str(height))
        self.capsfilter.set_property("caps", caps)
Esempio n. 40
0
 def testCaps(self):
     caps = Gst.caps_from_string(
         'video/x-raw,width=10,framerate=5.0;video/x-raw,'
         'width=15,framerate=10.0')
     self.assertEquals(gstreamer.caps_repr(caps),
         'video/x-raw, width=(int)10, '
                       'framerate=(double)5; video/x-raw, '
                       'width=(int)15, framerate=(double)10')
Esempio n. 41
0
    def __init__(self, dest, name):
        super(WebServiceBin, self).__init__(None, name+'_websrv')

        web_q = Gst.ElementFactory.make('queue', None)
        self.add(web_q)

        dec = Gst.ElementFactory.make('avdec_h264', None)
        self.add(dec)

        scale = Gst.ElementFactory.make('videoscale', None)
        self.add(scale)
        filter1 = Gst.ElementFactory.make('capsfilter', None)
        filter1.set_property('caps', Gst.caps_from_string("video/x-raw, width=320, height=180, framerate=(fraction)5/1"))
        self.add(filter1)
        vidconv = Gst.ElementFactory.make('videoconvert', None)
        self.add(vidconv)
        vp8enc = Gst.ElementFactory.make('vp8enc', None)
        self.add(vp8enc)

        webmmux = Gst.ElementFactory.make('webmmux', name+'_webmmux')
        self.add(webmmux)

        filter2 = Gst.ElementFactory.make('capsfilter', None)
        filter2.set_property('caps', Gst.caps_from_string('video/webm'))
        self.add(filter2)

        srvsink = Gst.ElementFactory.make('tcpserversink', None)
        srvsink.set_property('host', dest['ip'])
        srvsink.set_property('port', dest['port'])
        self.add(srvsink)

        web_q.link(dec)
        dec.link(scale)
        scale.link(filter1)
        filter1.link(vidconv)
        vidconv.link(vp8enc)

        v_pad = vp8enc.get_static_pad('src')
        v_pad.link(webmmux.get_request_pad('video_%u'))

        webmmux.link(filter2)
        filter2.link(srvsink)

        g_pad = Gst.GhostPad.new('sink', web_q.get_static_pad('sink'))
        self.add_pad(g_pad)
Esempio n. 42
0
    def add(self, element_name: str, *args, **props):
        el = Gst.ElementFactory.make(element_name, *args)
        for k, v in props.items():
            if k == 'caps':
                v = Gst.caps_from_string(v)
            el.set_property(k, v)

        self.pipeline.add(el)
        return el
Esempio n. 43
0
 def getAudioCaps(self):
     """ Returns the GstCaps corresponding to the audio settings """
     # TODO: Figure out why including 'depth' causes pipeline failures:
     astr = "rate=%d,channels=%d" % (self.audiorate, self.audiochannels)
     caps_str = "audio/x-raw,%s" % (astr)
     audio_caps = Gst.caps_from_string(caps_str)
     if self.aencoder:
         return get_compatible_sink_caps(self.aencoder, audio_caps)
     return audio_caps
    def init_request(self, id, caps_str):
        """
        Sets appsrc caps property for new request, changes filesink location property accordin to outdir, 
        starts transitioning pipeline to PLAYING state, and pushes empty buffer to appcrs.
        """
        self.request_id = id
        self.log.info("Initializing request: %s" % (self.request_id))

        # caps (capabilities) is media type (or content type)
        if caps_str and len(caps_str) > 0:
            self.appsrc.set_property('caps', Gst.caps_from_string(caps_str))
            self.log.info(
                "Set appsrc property: %s = %s" %
                ('caps', self.appsrc.get_property('caps').to_string()))
        else:
            self.appsrc.set_property('caps', None)

        # make sure decoder is not silent
        self.asr.set_property('silent', False)

        if self.outdir:  # change filesink location property /dev/null -> outdir
            self.pipeline.set_state(Gst.State.PAUSED)
            self.filesink.set_state(Gst.State.NULL)
            self.filesink.set_property(
                'location', '%s/%s.raw' % (self.outdir, self.request_id))
            self.filesink.set_state(Gst.State.PLAYING)

        ret = self.pipeline.set_state(Gst.State.PLAYING)
        if ret == Gst.StateChangeReturn.FAILURE:
            print("ERROR: Unable to set the pipeline to the PLAYING state",
                  file=sys.stderr)
            sys.exit(-1)
        else:
            self.log.info("Setting pipeline to PLAYING: %s" %
                          (Gst.Element.state_change_return_get_name(ret)))

        ret = self.filesink.set_state(Gst.State.PLAYING)
        if self.filesink.set_state(
                Gst.State.PLAYING) == Gst.StateChangeReturn.FAILURE:
            print("ERROR: Unable to set the filesink to the PLAYING state",
                  file=sys.stderr)
            sys.exit(-1)
        else:
            self.log.info("Setting filesink to PLAYING: %s" %
                          (Gst.Element.state_change_return_get_name(ret)))
        # Create a new empty buffer
        #buf = Gst.Buffer.new_allocate(None, 0, None)
        #if buf:
        #    self.log.info("Pushing empty buffer to pipeline")
        #    # Push empty buffer into the appsrc (to avoid hang on client diconnect)
        #    self.appsrc.emit('push-buffer', buf)

        self.finished = False

        Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL,
                                  '%s_init' % self.request_id)
Esempio n. 45
0
def gst_rtp_pipeline(test_params):
    print("starting gstreamer rtp pipeline..")
    GObject.threads_init()
    Gst.init(None)

    # setup a mock RTSP server
    rtsp_server = GstRtspServer.RTSPServer.new()
    rtsp_media_factory = GstRtspServer.RTSPMediaFactory.new()
    mounts = rtsp_server.get_mount_points()
    rtsp_server.set_service(str(test_params.rtsp_port))
    rtsp_media_factory.set_launch('videotestsrc ! \
		     video/x-raw,format=RGB,width=640,height=480,framerate=30/1 ! \
		     videoconvert ! x264enc ! video/x-h264,profile=baseline ! rtph264pay name=pay0 pt=96')
    mounts.add_factory('/test', rtsp_media_factory)
    rtsp_server.attach(None)

    # setup a pipeline which will receive RTP video and decode it, calling new_gst_buffer() on every decoded frame
    udpsrc = Gst.ElementFactory.make("udpsrc", None)
    udpsrc.set_property('port', test_params.rtp_port)
    udpsrc.set_property('caps', Gst.caps_from_string('application/x-rtp, encoding-name=H264, payload=96'))
    rtph264depay = Gst.ElementFactory.make("rtph264depay", None)
    h264parse = Gst.ElementFactory.make("h264parse", None)
    avdec_h264 = Gst.ElementFactory.make("avdec_h264", None)
    sink = Gst.ElementFactory.make("appsink", None)

    pipeline = Gst.Pipeline.new("rtp-pipeline")
    pipeline.add(udpsrc)
    pipeline.add(rtph264depay)
    pipeline.add(h264parse)
    pipeline.add(avdec_h264)
    pipeline.add(sink)

    udpsrc.link(rtph264depay)
    rtph264depay.link(h264parse)
    h264parse.link(avdec_h264)
    avdec_h264.link(sink)

    # gst event loop/bus
    loop_thread = GstMainLoopThread()

    sink.set_property("emit-signals", True)
    sink.connect("new-sample", loop_thread.gst_new_buffer, sink)

    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", loop_thread.gst_bus_call, loop_thread.loop)

    # start play back and listed to events
    pipeline.set_state(Gst.State.PLAYING)
    loop_thread.start()

    yield loop_thread

    print("stopping gstreamer rtp pipeline..")
    loop_thread.loop.quit()
    pipeline.set_state(Gst.State.NULL)
Esempio n. 46
0
def create_gst_playback_pipeline(options):
    global videoconvert1
    # PIPELINE="splitfilesrc ! decodebin ! videoconvert ! video/x-raw,format=BGRx ! \
    # gvapython module=insert_metadata.py ! \
    # gvawatermark ! videoconvert ! ximagesink"
    pipeline = Gst.Pipeline()

    start_pts = 0
    if os.path.isdir(options.input_video_path):
        src = Gst.ElementFactory.make("splitfilesrc")
        src.set_property("location", options.input_video_path + "/*.mp4")
    else:
        src = Gst.ElementFactory.make("filesrc")
        start_pts = get_timestamp_from_filename(options.input_video_path)
        src.set_property("location", options.input_video_path)

    decodebin = Gst.ElementFactory.make("decodebin")
    videoconvert1 = Gst.ElementFactory.make("videoconvert")
    capsfilter = Gst.ElementFactory.make("capsfilter")
    caps = Gst.caps_from_string("video/x-raw, format=(string){BGRx}")
    capsfilter.set_property("caps", caps)

    # Attach the insert_metadata module to gvapython
    play_script_dir = os.path.dirname(os.path.abspath(__file__))
    insert_metadata_script = os.path.join(
        play_script_dir, 'preproc_callbacks/insert_metadata.py')
    gvapython = Gst.ElementFactory.make("gvapython")
    gvapython.set_property("module", insert_metadata_script)
    gvapython.set_property("class", "FrameInfo")

    insert_preproc_metadata_arguments = '{{ "metadata_file_path" : "{input_file}" , '\
                                        '"offset_timestamp" : {timestamp} }}'
    insert_preproc_metadata_arguments = insert_preproc_metadata_arguments.format(
        input_file=options.metadata_file_path, timestamp=start_pts)
    gvapython.set_property("kwarg", insert_preproc_metadata_arguments)

    gvawatermark = Gst.ElementFactory.make("gvawatermark")
    videoconvert2 = Gst.ElementFactory.make("videoconvert")
    ximagesink = Gst.ElementFactory.make("ximagesink")

    pipeline.add(src, decodebin, videoconvert1, capsfilter, gvapython,
                 gvawatermark, videoconvert2, ximagesink)

    src.link(decodebin)

    # dynamic linking for decodebin
    decodebin.connect("pad-added", decodebin_pad_added)

    # link the rest of the elements
    videoconvert1.link(capsfilter)
    capsfilter.link(gvapython)
    gvapython.link(gvawatermark)
    gvawatermark.link(videoconvert2)
    videoconvert2.link(ximagesink)

    return pipeline
Esempio n. 47
0
  def create_pipeline(self):
    #self.pipe = Gst.parse_launch ("v4l2src ! clockoverlay ! capsfilter name=caps ! vp8enc end-usage=cbr resize-allowed=true name=enc target-bitrate=500000 deadline=200000 threads=1 cpu-used=16 ! vp8parse ! tee name=t ! queue ! vp8dec ! autovideosink sync=false t. ! queue ! webmmux ! filesink location=/tmp/test.webm")
    self.pipe = Gst.parse_launch ("v4l2src ! videoscale ! clockoverlay ! capsfilter name=caps ! vp8enc end-usage=cbr resize-allowed=true name=enc target-bitrate=500000 deadline=200000 threads=1 cpu-used=16 ! vp8parse ! vp8dec ! autovideosink sync=false ")
    bus = self.pipe.get_bus()
    bus.add_watch(GLib.PRIORITY_DEFAULT, bus_callback, None)

    self.capsfilter = self.pipe.get_by_name ("caps")
    caps = Gst.caps_from_string ("video/x-raw,height=(int)720")
    self.capsfilter.set_property("caps", caps)
    self.encoder = self.pipe.get_by_name ("enc")
Esempio n. 48
0
    def _add_logitech(self, bin, w=1920, h=1080):
        src = Gst.ElementFactory.make('v4l2src', 'logitech')
        src.set_property('device', '/dev/video0')
        bin.add(src)

        dec = Gst.ElementFactory.make('omxh264dec', None)
        bin.add(dec)

        src.link_filtered(dec, Gst.caps_from_string('video/x-h264, framerate=(fraction)30/1, width=(int)%d, height=(int)%d' % (w,h)))
        return dec
 def set_sampling_rate(self, sr):
     ''' Sets the sampling rate of the logging device. Sampling
     rate must be given as an integer for example 16000 for setting
     16Khz sampling rate The sampling rate would be set in the
     device to the nearest available. '''
     self.pause_grabbing()
     caps_str = 'audio/x-raw-int,rate=%d,channels=%d,depth=16' % (
         sr, self.channels)
     self.caps1.set_property('caps', Gst.caps_from_string(caps_str))
     self.resume_grabbing()
Esempio n. 50
0
 def _GetVideoEncoderCaps(self):
     profile = self.GetTypedProperty("Profile", str)
     if profile in ("main", "high", "baseline", "constrained-baseline"):
         caps = Gst.caps_from_string(
             "video/x-h264,profile={}".format(profile))
         return caps
     elif profile:
         self._Log(logging.WARN, "value '%s' for profile not supported!",
                   profile)
     return None
Esempio n. 51
0
    def __init__(self):
        self.window = Gtk.Window()
        self.window.connect("destroy", self.quit)
        self.window.set_default_size(1280, 720)

        self.box = Gtk.Box(spacing=6)

        self.drawingarea = Gtk.DrawingArea()
        self.drawingarea.set_size_request(640, 480)
        self.box.add(self.drawingarea)

        self.label = Gtk.Label("test")
        self.box.add(self.label)
        self.window.add(self.box)

        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message::eos", self.on_eos)
        self.bus.connect("message::error", self.on_error)
        # self.bus.connect('message', self.debug)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect("sync-message::element", self.on_sync_message)

        self.caps = Gst.caps_from_string(
            "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264"
        )

        # Create GStreamer elements
        self.udpsrc = Gst.ElementFactory.make("udpsrc", None)
        self.depay = Gst.ElementFactory.make("rtph264depay", None)
        self.decode = Gst.ElementFactory.make("avdec_h264", None)
        self.convert = Gst.ElementFactory.make("videoconvert", None)
        self.sink = Gst.ElementFactory.make("autovideosink", None)

        # Add playbin to the pipeline
        self.pipeline.add(self.udpsrc)
        self.pipeline.add(self.depay)
        self.pipeline.add(self.decode)
        self.pipeline.add(self.convert)
        self.pipeline.add(self.sink)

        self.udpsrc.link_filtered(self.depay, self.caps)
        self.depay.link(self.decode)
        self.decode.link(self.convert)
        self.convert.link(self.sink)

        # Set properties
        self.udpsrc.set_property("port", 5008)
        self.sink.set_property("sync", False)
Esempio n. 52
0
    def build_preview(self):

        """ Prepare Elements """
        video_source = Gst.ElementFactory.make('autovideosrc', "video-source")
        video_rate = Gst.ElementFactory.make('videorate', None)
        video_scale = Gst.ElementFactory.make("videoscale", None)
        video_scale.set_property('add-borders', True)
        video_caps = Gst.ElementFactory.make("capsfilter", None)
        video_caps.set_property("caps", Gst.caps_from_string(CAPS))
        # video_queue = Gst.ElementFactory.make("queue", None)
        video_convert = Gst.ElementFactory.make("videoconvert", None)
        ximage_sink = Gst.ElementFactory.make("ximagesink", "video-output")

        """ Create Pipeline """
        self.pipe = Gst.Pipeline()

        """ Add Elements to Pipeline """
        self.pipe.add(video_source)
        self.pipe.add(video_rate)
        self.pipe.add(video_scale)
        self.pipe.add(video_caps)
        # self.pipe.add(video_queue)
        self.pipe.add(video_convert)
        self.pipe.add(ximage_sink)

        """ Connect Elements """
        video_source.link(video_rate)
        # video_rate.link(video_caps)
        video_rate.link(video_scale)
        video_scale.link(video_caps)
        video_caps.link(video_convert)
        # video_caps.link(video_queue)
        # video_queue.link(video_convert)
        video_convert.link(ximage_sink)

        # Grab the Bus
        bus = self.pipe.get_bus()

        # Listen to signals (messages) on bus
        bus.add_signal_watch()

        # Synchronously handle video transmission messages
        bus.enable_sync_message_emission()

        # Handler for EOS & Errors
        bus.connect('message', self.on_message)

        """ Begin Playing """
        self.pipe.set_state(Gst.State.PLAYING)

        # External Reference
        self.video_caps = video_caps

        """ Return Pipeline Bus """
        return self.pipe.get_bus()
Esempio n. 53
0
 def __init__(self, parent, name=None):
     super(RecordBin, self).__init__(parent, name)
     
     rec_q = Gst.ElementFactory.make('queue', None)
     self.bin.add(rec_q)
     scale = Gst.ElementFactory.make('videoscale', None)
     self.bin.add(scale)
     caps = Gst.caps_from_string("video/x-raw, width=420, height=240")
     filter1 = Gst.ElementFactory.make('capsfilter', 'filter1')
     filter1.set_property('caps', caps) 
     self.bin.add(filter1)
     rate = Gst.ElementFactory.make('videorate', None)
     self.bin.add(rate)
     caps = Gst.caps_from_string("video/x-raw, framerate=(fraction)15/1")
     filter2 = Gst.ElementFactory.make('capsfilter', 'filter2')
     filter2.set_property('caps', caps)
     self.bin.add(filter2)
     vidconv2 = Gst.ElementFactory.make('videoconvert', 'vidconv2')
     self.bin.add(vidconv2)
     enc = Gst.ElementFactory.make('x264enc', 'enc')
     enc.set_property('tune', 0x00000004)
     self.bin.add(enc)
     filter3 = Gst.ElementFactory.make('capsfilter', 'filter3')
     filter3.set_property('caps', Gst.caps_from_string('video/x-h264, profile=baseline'))
     self.bin.add(filter3)
     recsink = Gst.ElementFactory.make('appsink', 'recsink')
     recsink.set_property('emit-signals', True)
     recsink.set_property('async', False)
     recsink.connect('new-sample', self.on_new_sample, self.parent)
     self.bin.add(recsink)
     
     rec_q.link(scale)
     scale.link(filter1)
     filter1.link(rate)
     rate.link(filter2)
     filter2.link(vidconv2)
     vidconv2.link(enc)
     enc.link(filter3)
     filter3.link(recsink)
     
     g_pad = Gst.GhostPad.new('sink', rec_q.get_static_pad('sink'))
     self.add_pad(g_pad)
Esempio n. 54
0
    def __init__(self):
        super(Video, self).__init__()
        self.connect('destroy', self.quit)
        self.set_default_size(1366, 768)

        self.screen = self.get_screen()
        self.visual = self.screen.get_rgba_visual()
        self.set_decorated(True)
        self.image = Gtk.Image()

        #enable capture of keys
        self.add_events(Gdk.EventMask.KEY_PRESS_MASK)
        self.add_events(Gdk.EventMask.KEY_RELEASE_MASK)

        self.videowidget = Gtk.DrawingArea()
        self.add(self.videowidget)
        self.videowidget.set_size_request(*self.get_size())
        self.videowidget.show()

        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        # Create GStreamer elements
        self.src = Gst.ElementFactory.make('udpsrc', None)
        self.src.set_property("port", 5004)
        caps = Gst.caps_from_string('application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)H264')
        self.src.set_property("caps", caps)
        self.depay = Gst.ElementFactory.make("rtph264depay", None)
        self.src.connect("pad-added", self.on_pad_added)
        self.decode = Gst.ElementFactory.make("avdec_h264", None)
        self.convert = Gst.ElementFactory.make("videoconvert",None)
        self.sink = Gst.ElementFactory.make('xvimagesink', None)
        self.sink.set_property('sync', False)

        # Add elements to the pipeline
        self.pipeline.add(self.src)
        self.pipeline.add(self.depay)
        self.pipeline.add(self.decode)
        self.pipeline.add(self.convert)
        self.pipeline.add(self.sink)

        self.src.link(self.depay)
        self.depay.link(self.decode)
        self.decode.link(self.convert)
        self.convert.link(self.sink)
Esempio n. 55
0
    def _add_capture(self, bin, w=1920, h=1080):
        src = Gst.ElementFactory.make('v4l2src', 'hdmicapture')
        src.set_property('device', '/dev/video1')
        bin.add(src)

        cf = Gst.ElementFactory.make('capsfilter', None)
        cf.set_property('caps', Gst.caps_from_string('video/x-raw, format=(string)I420, framerate=(fraction)30/1, width=(int)%d, height=(int)%d' % (w,h)))
        bin.add(cf)

        src.link(cf)
        return cf
Esempio n. 56
0
    def add_output_audio_port_on(self, device, device_pad_name="src", port_name="out"):
        element_name = self.bin.get_name() + "." + port_name
        interaudiosink = Gst.ElementFactory.make('interaudiosink', element_name)
        self.bin.add(interaudiosink)

        channel = element_name
        interaudiosink.set_property('channel', channel)
        self.register_port(channel, port_name, 'audio', 'out')

        caps = Gst.caps_from_string(self.DEFAULT_AUDIO_CAPS)
        device.link_filtered(interaudiosink, caps)
Esempio n. 57
0
    def add_input_audio_port_on(self, device, device_pad_name="sink", port_name="in"):
        element_name = self.bin.get_name() + "." + port_name
        interaudiosrc = Gst.ElementFactory.make('interaudiosrc', element_name)
        self.bin.add(interaudiosrc)

        channel = element_name
        interaudiosrc.set_property('channel', channel)
        self.register_port(channel, port_name, 'audio', 'in')

        caps = Gst.caps_from_string(self.DEFAULT_AUDIO_CAPS)
        interaudiosrc.link_filtered(device, caps)