def __init__(self, filename):
        Source.__init__(self)

        self.filename = filename
        filesrc = make_element('filesrc', {'location': self.filename})
        decodebin = make_element('decodebin')

        self.add(filesrc)
        self.add(decodebin)
        filesrc.link(decodebin)

        audiobin = Gst.parse_bin_from_description(FILE_AUDIO_BIN_STR, True)
        videobin = Gst.parse_bin_from_description(FILE_VIDEO_BIN_STR, True)

        self.add(audiobin)
        self.add(videobin)

        self.audio_srcpad = Gst.GhostPad.new('audio_src',
                                             audiobin.get_static_pad('src'))
        self.add_pad(self.audio_srcpad)

        self.video_srcpad = Gst.GhostPad.new('video_src',
                                             videobin.get_static_pad('src'))
        self.add_pad(self.video_srcpad)

        self.audiobin = audiobin
        self.videobin = videobin

        decodebin.connect('pad-added', self._new_decoded_pad)
示例#2
0
    def build_pipeline(self):
        # Create GStreamer elements
        self.videobin = Gst.parse_bin_from_description(
            'queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! '
            + self.video_sink, True)
        self.audiobin = Gst.parse_bin_from_description('queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! \
                audioconvert name=audiosink ! '                                                + \
                ('ladspa-sc4-1882-so-sc4 ratio=5 attack-time=5 release-time=120 threshold-level=-10 ! \
                ladspa-fast-lookahead-limiter-1913-so-fastlookaheadlimiter input-gain=10 limit=-3 ! '                                                                                                      if self.use_compressor
                else 'queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! ') \
                    + self.audio_sink, True)
        self.decodebin = Gst.ElementFactory.make('decodebin', 'dec')
        self.audioconvert_tee = Gst.ElementFactory.make(
            'audioconvert', 'audioconvert_tee')
        self.videoconvert_tee = Gst.ElementFactory.make(
            'videoconvert', 'videoconvert_tee')
        self.audiotee = Gst.ElementFactory.make('tee', 'audiotee')
        self.videotee = Gst.ElementFactory.make('tee', 'videotee')
        if self.add_sink:
            self.add_pipeline = Gst.parse_bin_from_description(
                self.add_sink, False)
            self.pipeline.add(self.add_pipeline)

        # Add everything to the pipeline
        self.pipeline.add(self.decodebin)
        self.pipeline.add(self.audioconvert_tee)
        self.pipeline.add(self.videoconvert_tee)
        self.pipeline.add(self.audiotee)
        self.pipeline.add(self.videotee)

        self.audioconvert_tee.link(self.audiotee)
        self.videoconvert_tee.link(self.videotee)

        self.decodebin.connect('pad-added', self.on_pad_added)
        self.decodebin.connect('no-more-pads', self.on_no_more_pads)
示例#3
0
    def create_video_capture_bin(self):

        # Running on RaspberryPi
        if ON_RPI:
            self.main_recordpipe = Gst.parse_bin_from_description(
                "queue name=filequeue ! deinterlace " +
                "! v4l2h264enc name=encoder " +
                "! h264parse config-interval=-1 !" +
                "mp4mux ! filesink name=filesink", True)

            self.temp_recordpipe = Gst.parse_bin_from_description(
                "queue name=filequeue ! deinterlace " +
                "! v4l2h264enc name=encoder " +
                "! h264parse config-interval=-1 !" +
                "mp4mux ! filesink name=filesink", True)
        # Running on PC
        else:
            self.main_recordpipe = Gst.parse_bin_from_description(
                "queue name=filequeue ! deinterlace " +
                "! x264enc tune=zerolatency bitrate=8000 name=encoder " +
                "! h264parse config-interval=-1 !" +
                "mp4mux ! filesink name=filesink", True)
            self.temp_recordpipe = Gst.parse_bin_from_description(
                "queue name=filequeue ! deinterlace " +
                "! x264enc tune=zerolatency bitrate=8000 name=encoder " +
                "! h264parse config-interval=-1 !" +
                "mp4mux ! filesink name=filesink", True)

        self.main_recordpipe.set_property("message_forward", "true")
        self.main_recordpipe.set_property("name", "recordbin1")

        self.temp_recordpipe.set_property("message_forward", "true")
        self.temp_recordpipe.set_property("name", "recordbin2")
示例#4
0
    def build_pipeline(self):
        # Create GStreamer elements
        self.videobin = Gst.parse_bin_from_description('queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! '
            + self.video_sink, True)
        self.audiobin = Gst.parse_bin_from_description('queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! \
                audioconvert name=audiosink ! ' + \
                ('ladspa-sc4-1882-so-sc4 ratio=5 attack-time=5 release-time=120 threshold-level=-10 ! \
                ladspa-fast-lookahead-limiter-1913-so-fastlookaheadlimiter input-gain=10 limit=-3 ! ' if self.use_compressor
                else 'queue max-size-buffers=0 max-size-bytes=0 max-size-time=1000000000 ! ') \
                    + self.audio_sink, True)
        self.decodebin = Gst.ElementFactory.make('decodebin', 'dec')
        self.audioconvert_tee = Gst.ElementFactory.make('audioconvert', 'audioconvert_tee')
        self.videoconvert_tee = Gst.ElementFactory.make('videoconvert', 'videoconvert_tee')
        self.audiotee = Gst.ElementFactory.make('tee', 'audiotee')
        self.videotee = Gst.ElementFactory.make('tee', 'videotee')
        if self.add_sink:
            self.add_pipeline = Gst.parse_bin_from_description(self.add_sink, False)
            self.pipeline.add(self.add_pipeline)

        # Add everything to the pipeline
        self.pipeline.add(self.decodebin)
        self.pipeline.add(self.audioconvert_tee)
        self.pipeline.add(self.videoconvert_tee)
        self.pipeline.add(self.audiotee)
        self.pipeline.add(self.videotee)

        self.audioconvert_tee.link(self.audiotee)
        self.videoconvert_tee.link(self.videotee)

        self.decodebin.connect('pad-added', self.on_pad_added)
        self.decodebin.connect('no-more-pads', self.on_no_more_pads)
示例#5
0
    def src_pad_added(self, content, handle, stream, pad, codec):
        type = content.get_property("media-type")
        if type == Farstream.MediaType.AUDIO:
            sink = Gst.parse_bin_from_description(
                "audioconvert ! audioresample ! audioconvert ! autoaudiosink",
                True)
        elif type == Farstream.MediaType.VIDEO:
            sink = Gst.parse_bin_from_description(
                "videoconvert ! videoscale ! autovideosink", True)

        self.pipeline.add(sink)
        pad.link(sink.get_static_pad("sink"))
        sink.set_state(Gst.State.PLAYING)
示例#6
0
    def reinit_pipeline(self, uri):
        if self.pipeline.get_by_name('tee'):
            self.pipeline.remove(self.tee_queue)
        if self.pipeline.get_by_name('uri'):
            self.pipeline.remove(self.source)
        if self.pipeline.get_by_name('filesink'):
            self.pipeline.remove(self.filesink)

        if 'http://' in uri or 'https://' in uri:
            self.source = Gst.ElementFactory.make('souphttpsrc', 'uri')
            self.source.set_property(
                'user-agent', self.user_agent) if self.user_agent else None
            self.source.set_property('cookies',
                                     ['cf_clearance=' +
                                      self.cookie]) if self.cookie else None

            if self.file_save_dir and not os.path.isfile(
                    self.file_save_dir + '/' + os.path.basename(uri)):
                self.tee_queue = Gst.parse_bin_from_description(
                    'tee name=tee \
                                tee. ! queue name=filequeue \
                                tee. ! queue2 name=decodequeue use-buffering=true',
                    False)
                self.filesink = Gst.ElementFactory.make('filesink', 'filesink')
                self.filesink.set_property(
                    'location',
                    self.file_save_dir + '/' + os.path.basename(uri))
                self.filesink.set_property('async', False)
            else:
                self.tee_queue = Gst.parse_bin_from_description(
                    'tee name=tee ! queue2 name=decodequeue use-buffering=true',
                    False)
                self.filesink = None
        else:
            self.tee_queue = Gst.parse_bin_from_description(
                'tee name=tee ! queue2 name=decodequeue use-buffering=true',
                False)
            self.source = Gst.ElementFactory.make('filesrc', 'uri')
            self.filesink = None

        self.pipeline.add(self.tee_queue)
        self.pipeline.get_by_name('decodequeue').link(self.decodebin)
        self.pipeline.add(self.source)
        if self.filesink:
            self.pipeline.add(self.filesink)
            self.pipeline.get_by_name('filequeue').link(self.filesink)
        self.source.link(self.pipeline.get_by_name('tee'))
        self.source.set_property('location', uri)

        self.has_audio = False
        self.has_video = False
示例#7
0
    def __init__(self, options={}): 
        base.Base.__init__(self, options)
        Gst.Bin.__init__(self)

        # FIXME check route in conf/recorderui and define options
        if "background" not in self.options:
            background= (path.join(path.dirname(path.abspath(galicaster.__file__)), "..", "resources", "bg.png") )
        else:
            background = (path.join(path.dirname(path.abspath(galicaster.__file__)), "..", self.options["background"]))

        if self.options["drivertype"] == "v4l":
            driver_type = "v4lsrc"
        else:
            driver_type = "v4l2src"

        aux = (pipestr.replace("gc-epiphan-preview", "sink-" + self.options['name'])
                      .replace('gc-epiphan-enc', self.options['videoencoder'])
                      .replace('gc-epiphan-mux', self.options['muxer']))
        size = self.options['resolution']
        width, height =  [int(a) for a in size.split(re.search('[,x:]',size).group())]
        bin_end = Gst.parse_bin_from_description(aux, True)
        logger.info("Setting background for Epiphan: %s", background)
        bin_start = Switcher("canguro", self.options['location'], background, 
                             driver_type, [width,height], self.options['framerate'])
        self.bin_start=bin_start            
        self.add(bin_start, bin_end)
        bin_start.link(bin_end)

        sink = self.get_by_name("gc-epiphan-sink")
        sink.set_property('location', path.join(self.options['path'], self.options['file']))
示例#8
0
文件: fs-gui.py 项目: pexip/farstream
 def make_source(self):
     AUDIOSOURCE = "audiotestsrc is-live=1 wave=1  ! volume name=v"
     #AUDIOSOURCE = "pulsesrc name=v"
     source = Gst.parse_bin_from_description(AUDIOSOURCE + " volume=" + str(INITIAL_VOLUME/100) + " ! level message=1 name=l", True)
     self.volume = source.get_by_name("v")
     self.level = source.get_by_name("l")
     return source
示例#9
0
 def _setTestsrc(self):
     stringPipeline = "videotestsrc"
     self._bin = Gst.parse_bin_from_description(stringPipeline, True)
     self._pipeline.add(self._bin)
     self._pipeline.add(self._gtksink)
     # Link the pipeline to the sink that will display the video.
     self._bin.link(self._gtksink)
示例#10
0
文件: uri.py 项目: streamuj/brave
 def create_audio_elements(self):
     bin = Gst.parse_bin_from_description(
         f'audiorate ! audioconvert ! audioresample ! {config.default_audio_caps()} ! '
         + 'queue ! interaudiosink name=interaudiosink', True)
     self.playsink.set_property('audio-sink', bin)
     self.interaudiosink = bin.get_by_name('interaudiosink')
     self.create_interaudiosrc_and_connections()
示例#11
0
文件: player.py 项目: nesterow/semgtk
    def __init__(self, pipeline):
        super().__init__()
        # Only setup the widget after the window is shown.
        self.connect('realize', self._on_realize)

        # Parse a gstreamer pipeline and create it.
        self._bin = Gst.parse_bin_from_description(pipeline, True)
  def __init__(self, options={}):
        raise Exception("Not implemented. Using gst 0.10")

        base.Base.__init__(self, options)
        Gst.Bin.__init__(self, self.options['name'])

        gcvideosink = get_videosink(videosink=self.options['videosink'], name='sink-'+self.options['name'])
        aux = pipestr.replace('gc-vsink', gcvideosink)
        bin = Gst.parse_bin_from_description(aux, True)
        # replace identity
        self.add(bin)

        element = self.get_by_name('gc-blackmagic-sink')
        element.set_property('location', path.join(self.options['path'], self.options['file']))
        
        element = self.get_by_name('gc-blackmagic-src')
        try:
            value = int(self.options['input'])
        except ValueError:
            value = self.options['input']                                
        element.set_property('input', value)

        element = self.get_by_name('gc-blackmagic-src')
        try:
            mode = int(self.options['input-mode'])
        except ValueError:
            mode = self.options['input-mode']                                
        element.set_property('input-mode', mode)

        for pos in ['right','left','top','bottom']:
            element = self.get_by_name('gc-blackmagic-crop')
            element.set_property(pos, int(self.options['videocrop-' + pos]))
示例#13
0
 def _consume_done(self, config, ghostPad, recv_rtp_socket,
                   recv_rtcp_socket):
     Gst.info('%s _consume_done %s' % (self.name, config))
     #
     desc = getConsumerPipelineDesc(config)
     Gst.debug('%s _produce_done desc=%s' % (self.name, desc))
     bin = Gst.parse_bin_from_description(desc, False)
     self.add(bin)
     rtpbin = bin.get_by_name('rtpbin')
     # setup sockets
     bin.get_by_name('rtp_udpsrc').set_property('socket', recv_rtp_socket)
     bin.get_by_name('rtcp_udpsrc').set_property('socket', recv_rtcp_socket)
     #
     # bin.set_state(Gst.State.PAUSED)
     # link ghost pad
     src_pad = bin.get_by_name('sink').get_static_pad('src')
     tmp_pad = ghostPad.get_target()
     ghostPad.set_target(src_pad)
     self.remove_pad(tmp_pad)
     #
     bin.set_state(Gst.State.PLAYING)
     #
     self.emit('consumer-added', config['consumerId'])
     #
     self.mediasoup.resumeConsumer(config['transportId'],
                                   config['consumerId'],
                                   self._resume_consumer_done,
                                   self._on_error, config)
示例#14
0
 def make_source(self):
     AUDIOSOURCE = "audiotestsrc is-live=1 wave=1  ! volume name=v"
     #AUDIOSOURCE = "pulsesrc name=v"
     source = Gst.parse_bin_from_description(AUDIOSOURCE + " volume=" + str(INITIAL_VOLUME/100) + " ! level message=1 name=l", True)
     self.volume = source.get_by_name("v")
     self.level = source.get_by_name("l")
     return source
示例#15
0
    def __init__(self, bin_desc):
        Gst.Bin.__init__(self)

        self.internal_bin = Gst.parse_bin_from_description(bin_desc, True)
        self.add(self.internal_bin)
        self.add_pad(Gst.GhostPad.new(None, self.internal_bin.sinkpads[0]))
        self.add_pad(Gst.GhostPad.new(None, self.internal_bin.srcpads[0]))
示例#16
0
    def __init__(self, options={}):
        raise Exception("Not implemented. Using gst 0.10")

        base.Base.__init__(self, options)
        Gst.Bin.__init__(self, self.options['name'])

        gcvideosink = get_videosink(videosink=self.options['videosink'],
                                    name='sink-' + self.options['name'])
        aux = pipestr.replace('gc-vsink', gcvideosink)
        bin = Gst.parse_bin_from_description(aux, True)
        # replace identity
        self.add(bin)

        element = self.get_by_name('gc-blackmagic-sink')
        element.set_property(
            'location', path.join(self.options['path'], self.options['file']))

        element = self.get_by_name('gc-blackmagic-src')
        try:
            value = int(self.options['input'])
        except ValueError:
            value = self.options['input']
        element.set_property('input', value)

        element = self.get_by_name('gc-blackmagic-src')
        try:
            mode = int(self.options['input-mode'])
        except ValueError:
            mode = self.options['input-mode']
        element.set_property('input-mode', mode)

        for pos in ['right', 'left', 'top', 'bottom']:
            element = self.get_by_name('gc-blackmagic-crop')
            element.set_property(pos, int(self.options['videocrop-' + pos]))
示例#17
0
    def startRecording(self):
        print("start recording : recording = %s" % self.Recording)
        if self.Recording:
            return

        fileName = "/tmp/spyCam.mp4"

        self.recPipe = Gst.parse_bin_from_description(
            """queue name=filequeue ! \
        h264parse ! mp4mux ! filesink async=false location=%s""" % fileName,
            True)

        self.pipe.add(self.recPipe)
        self.pipe.get_by_name("tee").link(self.recPipe)

        res = self.recPipe.set_state(Gst.State.PLAYING)
        print(res)

        if res == Gst.StateChangeReturn.SUCCESS or res == Gst.StateChangeReturn.ASYNC:
            self.Recording = True
            print "Recording"
        else:
            self.Recording = False
            print "NOT recording"

        return self.Recording
示例#18
0
    def get_kuscheduler(self, interval):
        if not gstreamer.element_factory_exists('keyunitsscheduler'):
            register()

        kubin = Gst.parse_bin_from_description('keyunitsscheduler interval=%s '
                'name=scheduler' % interval, True)
        self._kuscheduler = kubin.get_by_name('scheduler')
        return kubin
示例#19
0
    def __init__(self, bin_desc):
        Gst.Bin.__init__(self)
        Loggable.__init__(self)

        self.internal_bin = Gst.parse_bin_from_description(bin_desc, True)
        self.add(self.internal_bin)
        self.add_pad(Gst.GhostPad.new(None, self.internal_bin.sinkpads[0]))
        self.add_pad(Gst.GhostPad.new(None, self.internal_bin.srcpads[0]))
示例#20
0
    def create_image_capture_pipeline(self):
        self.capture_pipe = Gst.parse_bin_from_description(
            "queue name=capture_queue ! filesink name=jpegcapture " +
            "location=initial.jpg", True)

        # Create objects
        self.capture_queue = self.capture_pipe.get_by_name("capture_queue")
        self.capture_sink = self.capture_pipe.get_by_name("jpegcapture")
示例#21
0
    def __init__(self, uri=None, name=None):
        ges_pipeline = Gst.ElementFactory.make("playbin", name)
        ges_pipeline.props.video_filter = Gst.parse_bin_from_description("videoflip method=automatic", True)
        SimplePipeline.__init__(self, ges_pipeline)

        self.__uri = None
        if uri:
            self.uri = uri
示例#22
0
def make_video_sink(pipeline, xid, name, sync=True):
    "Make a bin with a video sink in it, that will be displayed on xid."
    bin = Gst.parse_bin_from_description("videoconvert ! videoscale ! videoconvert ! xvimagesink", True)
    sink = bin.get_by_interface(GstVideo.VideoOverlay)
    assert sink
    bin.set_name("videosink_%d" % xid)
    sink.set_window_handle(xid)
    sink.props.sync = sync
    return bin
示例#23
0
文件: fs-gui.py 项目: pexip/farstream
def make_video_sink(pipeline, xid, name, sync=True):
    "Make a bin with a video sink in it, that will be displayed on xid."
    bin = Gst.parse_bin_from_description("videoconvert ! videoscale ! videoconvert ! xvimagesink", True)
    sink = bin.get_by_interface(GstVideo.VideoOverlay)
    assert sink
    bin.set_name("videosink_%d" % xid)
    sink.set_window_handle(xid)
    sink.props.sync = sync
    return bin
示例#24
0
文件: uri.py 项目: bevand10/brave
    def create_video_elements(self):
        bin_as_string = ('videoconvert ! videoscale ! capsfilter name=capsfilter ! '
                         'queue' + self.default_video_pipeline_string_end())
        bin = Gst.parse_bin_from_description(bin_as_string, True)

        self.capsfilter = bin.get_by_name('capsfilter')
        self.final_video_tee = bin.get_by_name('final_video_tee')
        self._update_video_filter_caps()
        self.playsink.set_property('video-sink', bin)
示例#25
0
    def _produce_done(self, config, ghostPad):
        Gst.info('%s _produce_done %s' % (self.name, config))
        #
        desc = getProducerPipelineDesc(config)
        Gst.info('%s _produce_done desc=%s' % (self.name, desc))
        bin = Gst.parse_bin_from_description(desc, False)
        self.add(bin)
        #
        # handle time display
        if self.clock_overlay:
            clock_overlay = bin.get_by_name('clock_overlay')

            def on_v_encoder_buffer(pad, info):
                clock_overlay.set_property(
                    'text',
                    datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f UTC'))
                return Gst.PadProbeReturn.OK

            clock_overlay.get_static_pad('video_sink').add_probe(
                Gst.PadProbeType.BUFFER, on_v_encoder_buffer)
        #
        bin.set_state(Gst.State.PAUSED)
        # create a udpsrc element with the same rtcp_udpsink socket
        rtcp_socket = bin.get_by_name('rtcp_udpsink').get_property(
            'used-socket')
        rtcp_udpsrc = Gst.ElementFactory.make('udpsrc')
        rtcp_udpsrc.set_property('name', 'rtcp_udpsrc')
        rtcp_udpsrc.set_property('socket', rtcp_socket)
        bin.add(rtcp_udpsrc)
        #recv_rtcp_sink_0 = rtpbin.get_request_pad('recv_rtcp_sink_0')
        #rtcp_udpsrc.get_static_pad('src').link(recv_rtcp_sink_0)
        #
        # rtcp_udpsrc -> tee --recv_rtcp_sink_0
        #                    `-recv_rtcp_sink_1
        #                    `-recv_rtcp_sink_2
        rtcp_udpsrc_tee = Gst.ElementFactory.make('tee')
        bin.add(rtcp_udpsrc_tee)
        rtcp_udpsrc.link(rtcp_udpsrc_tee)
        #
        rtpbin = bin.get_by_name('rtpbin')
        recv_rtcp_sink_0 = rtpbin.get_request_pad('recv_rtcp_sink_0')
        rtcp_udpsrc_tee.get_request_pad('src_0').link(recv_rtcp_sink_0)
        if config.get('simulcast', False):
            recv_rtcp_sink_1 = rtpbin.get_request_pad('recv_rtcp_sink_1')
            rtcp_udpsrc_tee.get_request_pad('src_1').link(recv_rtcp_sink_1)
            recv_rtcp_sink_2 = rtpbin.get_request_pad('recv_rtcp_sink_2')
            rtcp_udpsrc_tee.get_request_pad('src_2').link(recv_rtcp_sink_2)
        # link source ghost pad
        sink_pad = bin.get_by_name('src').get_static_pad('sink')
        tmp_pad = ghostPad.get_target()
        ghostPad.set_target(sink_pad)
        self.remove_pad(tmp_pad)
        #
        bin.set_state(Gst.State.PLAYING)
        #
        self.emit('producer-added', config['producerId'])
示例#26
0
    def __init__(self, bin_desc):
        Gst.Bin.__init__(self)
        Loggable.__init__(self)

        self.internal_bin = Gst.parse_bin_from_description(bin_desc, True)
        self.add(self.internal_bin)
        sinkpad, = [pad for pad in self.internal_bin.iterate_sink_pads()]
        self.add_pad(Gst.GhostPad.new(None, sinkpad))
        srcpad, = [pad for pad in self.internal_bin.iterate_src_pads()]
        self.add_pad(Gst.GhostPad.new(None, srcpad))
示例#27
0
 def start_record(self):
     # Filename (current time)
     filename = datetime.now().strftime("%Y-%m-%d_%H.%M.%S") + ".avi"
     print(filename)
     self.recordpipe = Gst.parse_bin_from_description(
         "queue name=filequeue ! jpegenc ! avimux ! filesink location=" +
         filename, True)
     self.pipeline.add(self.recordpipe)
     self.pipeline.get_by_name("tee").link(self.recordpipe)
     self.recordpipe.set_state(Gst.State.PLAYING)
示例#28
0
    def create_tcp_server_pipeline(self):
        # Create recording bin
        self.server_pipe = Gst.parse_bin_from_description(
            "queue name=server_queue ! multipartmux boundary=" +
            "\"--videoboundary\" ! tcpserversink " +
            "host={} port={}".format(IP_Address, PORT), True)
        self.server_pipe.set_property("message_forward", "true")
        self.server_pipe.set_property("name", "server")

        # Create objects
        self.server_queue = self.server_pipe.get_by_name("server_queue")
示例#29
0
    def __init__(self):
        Source.__init__(self)

        # self.video_caps = Gst.caps_from_string('application/x-rtp,media=video,encoding-name=VP8,payload=97,clock-rate=90000')
        # self.audio_caps = Gst.caps_from_string('application/x-rtp,media=audio,encoding-name=OPUS,payload=100,clock-rate=48000')
        # self.setup_bin()

        audiobin = Gst.parse_bin_from_description(TEST_AUDIO_BIN_STR, True)
        videobin = Gst.parse_bin_from_description(TEST_VIDEO_BIN_STR, True)

        self.add(audiobin)
        self.add(videobin)

        self.audio_srcpad = Gst.GhostPad.new('audio_src',
                                             audiobin.get_static_pad('src'))
        self.add_pad(self.audio_srcpad)

        self.video_srcpad = Gst.GhostPad.new('video_src',
                                             videobin.get_static_pad('src'))
        self.add_pad(self.video_srcpad)
示例#30
0
文件: gstVid.py 项目: sumchege/Gtk-pi
    def start_recording():
        global recordpipeline

        filename = datetime.datetime.now().strftime(
            "%Y-%m-%d_%H.%M.%S") + ".avi"
        recordpipeline = Gst.parse_bin_from_description(
            "queue name=filequeue ! jpegenc ! avimux ! filesink location=" +
            filename, True)
        pipeline.add(recordpipeline)
        pipeline.get_by_name("tee").link(recordpipeline)
        recordpipeline.set_state(Gst.State.PLAYING)
示例#31
0
    def __init__(self, rtmpURL):
        Source.__init__(self)

        self.rtmpURL = rtmpURL
        rtmpsrc = make_element('rtmpsrc', {'location': self.rtmpURL})
        parsebin = make_element('parsebin')

        self.add(rtmpsrc)
        self.add(parsebin)

        self.video_srcpad = None
        self.audio_srcpad = None

        rtmpsrc.link(parsebin)
        parsebin.connect('pad-added', self._new_parsed_pad)

        self.audiobin = Gst.parse_bin_from_description(RTMP_AUDIO_BIN_STR,
                                                       True)
        self.videobin = Gst.parse_bin_from_description(RTMP_VIDEO_BIN_STR,
                                                       True)
    def start_pipeline(self, video_formats, audio_only):
        pipeline = None
        video = None

        if not audio_only:
            for format_type in video_formats:
                pipeline = PIPELINES.get(format_type)
                if pipeline:
                    print('Video Format: ' + format_type)
                    break

            pipeline = pipeline or VP8_PIPELINE
            video = Gst.parse_bin_from_description(pipeline, True)

        audio = Gst.parse_bin_from_description(AUDIO_PIPELINE, True)

        webrtc = Gst.ElementFactory.make("webrtcbin", "sendonly")
        webrtc.set_property('bundle-policy', 'max-bundle')

        pipe = Gst.Pipeline.new('main')

        if video:
            pipe.add(video)
        pipe.add(audio)
        pipe.add(webrtc)

        if video:
            video.link(webrtc)

        audio.link(webrtc)

        self.pipe = pipe

        self.webrtc = self.pipe.get_by_name('sendonly')

        self.webrtc.connect('on-negotiation-needed',
                            self.on_negotiation_needed)
        self.webrtc.connect('on-ice-candidate',
                            self.send_ice_candidate_message)
        #self.webrtc.connect('notify::ice-connection-state', self.on_conn_changed)
        self.pipe.set_state(Gst.State.PLAYING)
示例#33
0
    def record_input(self, inputname):
        vals = self.get_input(inputname)
        vals['index'] = datetime.datetime.now().strftime('%a_%H:%M.%S')
        vals['file_prefix'] = self.file_prefix + vals['index']
        if vals['src']['type'] == 'decklinkvideosrc':
            if self.use_vaapi:
                enc = [
                  "vaapipostproc scale-method=hq",
                  "! video/x-raw, width={src[caps][width]}, height={src[caps][height]}",
                  "! vaapih264enc init-qp=23 keyframe-period=120",
                ]
            else:
                enc = [
                  "videoconvert",
                  "! videoscale",
                  "! video/x-raw, width={src[caps][width]}, height={src[caps][height]}",
                  "! x264enc pass=4 quantizer=23 speed-preset=ultrafast",
                ]
            capture_spec = [
                  ] + enc + [
                  "! mpegtsmux",
                  "! filesink location={file_prefix}_{name}.ts",
            ]
        elif vals['src']['type'] == 'decklinkaudiosrc':
            capture_spec = [
                "audioconvert",
                "! lamemp3enc quality=1 target=quality encoding-engine-quality=standard",
                "! id3v2mux ! filesink location={file_prefix}_{name}.mp3", 
            ]
        elif vals['src']['type'] == 'alsa':
            capture_spec = [
                "audioconvert",
                "! lamemp3enc quality=1 target=quality encoding-engine-quality=standard",
                "! id3v2mux ! filesink location={file_prefix}_{name}.mp3", 
            ]
        else:
            raise Exception('Unknown device type ' + device['src']['type'])
        capture_spec = [l.format(**vals) for l in capture_spec]
        bin_spec = " ".join(capture_spec)
        logging.debug(bin_spec)
        self.bins[inputname] = Gst.parse_bin_from_description(bin_spec, True)

        queue = Gst.ElementFactory.make('queue')
        self.pipeline.add(queue)
        self.pipeline.add(self.bins[inputname])

        tee = self.pipeline.get_by_name('%s_rec_tee' % inputname)
        tee.link(queue)
        queue.link(self.bins[inputname])

        overlay = self.pipeline.get_by_name('%s_textoverlay' % inputname)
        overlay.set_property("text", inputname + " (REC)")
        overlay.set_property("color", 0xffff8060)
示例#34
0
文件: uri.py 项目: streamuj/brave
    def create_video_elements(self):
        bin = Gst.parse_bin_from_description(
            f'videoconvert ! videoscale ! capsfilter name=capsfilter ! ' +
            'queue name=queue_into_intervideosink ! intervideosink name=intervideosink',
            True)

        self.capsfilter = bin.get_by_name('capsfilter')
        self._update_video_filter_caps()

        self.playsink.set_property('video-sink', bin)
        self.intervideosink = bin.get_by_name('intervideosink')
        self.create_intervideosrc_and_connections()
示例#35
0
文件: jingle_rtp.py 项目: irl/gajim
 def make_bin_from_config(self, config_key, pipeline, text):
     pipeline = pipeline % gajim.config.get(config_key)
     try:
         bin = Gst.parse_bin_from_description(pipeline, True)
         return bin
     except GLib.GError as e:
         gajim.nec.push_incoming_event(InformationEvent(None,
             conn=self.session.connection, level='error',
             pri_txt=_('%s configuration error') % text.capitalize(),
             sec_txt=_("Couldn't setup %s. Check your configuration.\n\n"
             "Pipeline was:\n%s\n\nError was:\n%s") % (text, pipeline,
             str(e))))
         raise JingleContentSetupException
示例#36
0
    def reinit_pipeline(self, uri):
        if self.pipeline.get_by_name('tee'):
            self.pipeline.remove(self.tee_queue)
        if self.pipeline.get_by_name('uri'):
            self.pipeline.remove(self.source)
        if self.pipeline.get_by_name('filesink'):
            self.pipeline.remove(self.filesink)

        if 'http://' in uri or 'https://' in uri:
            self.source = Gst.ElementFactory.make('souphttpsrc' ,'uri')
            self.source.set_property('user-agent', self.user_agent) if self.user_agent else None
            self.source.set_property('cookies', ['cf_clearance=' + self.cookie]) if self.cookie else None

            if self.file_save_dir and not os.path.isfile(self.file_save_dir + '/' + os.path.basename(uri)):
                self.tee_queue = Gst.parse_bin_from_description('tee name=tee \
                                tee. ! queue name=filequeue \
                                tee. ! queue2 name=decodequeue use-buffering=true', False)
                self.filesink = Gst.ElementFactory.make('filesink' ,'filesink')
                self.filesink.set_property('location', self.file_save_dir + '/' + os.path.basename(uri))
                self.filesink.set_property('async', False)
            else:
                self.tee_queue = Gst.parse_bin_from_description('tee name=tee ! queue2 name=decodequeue use-buffering=true', False)
                self.filesink = None
        else:
            self.tee_queue = Gst.parse_bin_from_description('tee name=tee ! queue2 name=decodequeue use-buffering=true', False)
            self.source = Gst.ElementFactory.make('filesrc' ,'uri')
            self.filesink = None

        self.pipeline.add(self.tee_queue)
        self.pipeline.get_by_name('decodequeue').link(self.decodebin)
        self.pipeline.add(self.source)
        if self.filesink:
            self.pipeline.add(self.filesink)
            self.pipeline.get_by_name('filequeue').link(self.filesink)
        self.source.link(self.pipeline.get_by_name('tee'))
        self.source.set_property('location', uri)

        self.has_audio = False
        self.has_video = False
示例#37
0
 def start_recording(self):
     location = 'vidoutput' + str(self.num_recordings) + '.mkv'
     self.rec_pipe = Gst.parse_bin_from_description(
         "queue name=vidqueue ! "
         "h265parse ! "
         "matroskamux ! "
         "filesink name=vidsink location=" + location + " async=false",
         True)
     self.pipeline.add(self.rec_pipe)
     self.tee.link(self.rec_pipe)
     self.rec_pipe.set_state(Gst.State.PLAYING)
     print('Starting Recording...')
     self.num_recordings += 1
示例#38
0
    def __init__(self, src):
        super().__init__()
        self.connect('realize', self._on_realize)
        self.video_bin = Gst.parse_bin_from_description(src, True)
        self.pipeline = Gst.Pipeline()
        factory = self.pipeline.get_factory()
        self.gtksink = factory.make('gtksink')
        self.gtksink.set_property('sync', SYNC)

        self.pipeline.add(self.gtksink)
        self.pipeline.add(self.video_bin)
        self.video_bin.link(self.gtksink)
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
示例#39
0
    def __init__(self, loop, pipeline_desc):
        desc = pipeline_desc.format(fd=1)
        logging.info('pipeline description: %s' % desc)
        self.pipeline = Gst.Pipeline()
        self.bin = Gst.parse_bin_from_description(desc, False)
        self.pipeline.add(self.bin)
        self.stream_sink = MultiFdSink(
            self.pipeline.get_by_name('stream_sink'), name='stream')
        self.tee = self.pipeline.get_by_name('t1')
        self.mjpeg_bin = None
        loop.create_task(watch_bus(self.pipeline.get_bus()))

        Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL,
                                  "graph.dot")
示例#40
0
 def create_elements(self):
     # The effects filters can mess with the alpha channel.
     # The best solution I've found is to allow it to move into RGBx, then force a detour via RGB
     # to remove the alpha channel, before moving back to our default RGBA.
     # This is done in a 'bin' so that the overlay can be manipulated as one thing.
     desc = (
         'videoconvert ! %s ! videoconvert ! capsfilter caps="video/x-raw,format=RGB" ! '
         'videoconvert ! capsfilter caps="video/x-raw,format=RGBA"'
     ) % self.props['effect_name']
     self.element = Gst.parse_bin_from_description(desc, True)
     self.element.set_name('%s_bin' % self.uid())
     place_to_add_elements = getattr(self.source, 'final_video_tee').parent
     if not place_to_add_elements.add(self.element):
         self.logger.warning(
             'Unable to add effect overlay bin to the source pipeline')
示例#41
0
    def link_audio_sink(self, pad, sinkcount):
        "Link the audio sink to the pad"
        # print >>sys.stderr, "LINKING AUDIO SINK"
        # if not self.adder:
        #     audiosink = Gst.ElementFactory.make("alsasink", None)
        #     audiosink.set_property("buffer-time", 50000)
        #     self.pipeline.add(audiosink)

        #     try:
        #         self.adder = Gst.ElementFactory.make("liveadder", None)
        #     except Gst.ElementNotFoundError:
        #         audiosink.set_state(Gst.State.PLAYING)
        #         pad.link(audiosink.get_static_pad("sink"))
        #         return
        #     self.pipeline.add(self.adder)
        #     audiosink.set_state(Gst.State.PLAYING)
        #     self.adder.link(audiosink)
        #     self.adder.set_state(Gst.State.PLAYING)
        # convert1 = Gst.ElementFactory.make("audioconvert", None)
        # self.pipeline.add(convert1)
        # resample = Gst.ElementFactory.make("audioresample", None)
        # self.pipeline.add(resample)
        # convert2 = Gst.ElementFactory.make("audioconvert", None)
        # self.pipeline.add(convert2)
        # convert1.link(resample)
        # resample.link(convert2)
        # convert2.link(self.adder)
        # pad.link(convert1.get_static_pad("sink"))
        # convert2.set_state(Gst.State.PLAYING)
        # resample.set_state(Gst.State.PLAYING)
        # convert1.set_state(Gst.State.PLAYING)

        sink = Gst.parse_bin_from_description("level name=l ! pulsesink name=v", True)
        sink.set_name("audiosink_" + str(sinkcount));
        print sink
        print sink.get_name()
        self.pipeline.add(sink)
        sink.set_state(Gst.State.PLAYING)
        if pad.link(sink.get_static_pad("sink")) != Gst.PadLinkReturn.OK:
            print("LINK FAILED")
        self.pipeline.post_message(Gst.Message.new_latency(sink))
        return sink
示例#42
0
    def start_recording(self):
        logging.debug("Start recording")
        # how to dynamically set a property
        inputname = "0"
        vals = self.get_input(inputname)
        vals['index'] = datetime.datetime.now().strftime('%a_%H:%M.%S')
        vals['fileprefix'] = 'monk_' + vals['index']
        vid_capture = [
              #"queue",
              #"! valve drop=false name=\"{name}_rec_valve\"",
              #"! vaapipostproc scale-method=hq",
              "videoconvert",
              "! videoscale",
              "! video/x-raw, width={src[caps][width]}, height={src[caps][height]}",
              #"! vaapih264enc init-qp=16 keyframe-period=120",
              "! x264enc speed-preset=ultrafast",
              "! mpegtsmux",
              "! filesink location={fileprefix}_{name}.ts",
        ]
        vid_capture = [l.format(**vals) for l in vid_capture]
        bin_spec = " ".join(vid_capture)
            #"{name}_rec_tee.",
        logging.debug(bin_spec)
        self.bins[inputname] = Gst.parse_bin_from_description(bin_spec, True)

        queue = Gst.ElementFactory.make('queue')
        self.pipeline.add(queue)
        self.pipeline.add(self.bins[inputname])

        tee = self.pipeline.get_by_name('%s_rec_tee' % inputname)
        self.pipeline.set_state(Gst.State.PAUSED)
        tee.link(queue)
        queue.link(self.bins[inputname])
        self.bins[inputname].set_state(Gst.State.PLAYING)
        #self.bins[inputname].sync_state_with_parent()


        overlay = self.pipeline.get_by_name('%s_textoverlay' % inputname)
        overlay.set_property("color", 0xffff8060)
示例#43
0
    def build_pipeline(self):
        video_format = ",".join([
            "video/x-raw",
            "framerate=30/1",
            "width={}".format(self.width),
            "height={}".format(self.height),
        ])

        audio_format = ",".join([
            "audio/x-raw",
            "channels=2"
        ])

        stream_bin = Gst.parse_bin_from_description("""
            videoconvert ! videoscale ! videorate ! {video_format} ! queue
            ! x264enc bitrate={self.vbitrate} speed-preset=ultrafast
            ! h264parse ! queue ! mux.

            jackaudiosrc client-name=tc ! {audio_format} ! audioconvert
            ! queue ! voaacenc bitrate={self.abitrate} ! queue ! mux.

            flvmux name=mux streamable=true ! queue
            ! rtmpsink location={self.destination}
        """.format(**locals()), True)

        self.pipeline = Gst.Pipeline()
        self.pipeline.add(self.source)
        self.pipeline.add(stream_bin)
        self.source.link(stream_bin)
        def link(*args):
            self.source.link(stream_bin)
        self.source.connect("pad-added", link)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_message)
    def __init__(self, src_description):
        self.position = Gst.CLOCK_TIME_NONE
        self.duration = Gst.CLOCK_TIME_NONE
        self.pcr_configured = False
        self.is_recording = False
        self.changed_id = -1

        self.window = Gtk.Window()
        self.window.connect('destroy', self.quit)
        self.window.set_default_size(800, 450)

        box = Gtk.Box()
        box.set_spacing (5)
        box.set_orientation(Gtk.Orientation.VERTICAL)
        self.window.add(box)

        self.drawingarea = Gtk.DrawingArea()
        box.pack_start(self.drawingarea, True, True, 0)

        hbox = Gtk.Box()
        hbox.set_spacing (5)
        hbox.set_orientation(Gtk.Orientation.HORIZONTAL)

        self.stop_button = Gtk.Button(label='Stop')
        def stop_button_press_cb(widget, event):
            self.seek_end()
        self.stop_button.connect('button-press-event', stop_button_press_cb)

        hbox.pack_start(self.stop_button, False, False, 0)

        self.pause_button = Gtk.Button(label='Pause')
        def pause_button_press_cb(widget, event):
            self.pause()
        self.pause_button.connect('button-press-event', pause_button_press_cb)

        hbox.pack_start(self.pause_button, False, False, 0)

        self.adjustment = Gtk.Adjustment(0.0, 0.00, 100.0, 0.1, 1.0, 1.0)
        self.scale = Gtk.Scale(orientation=Gtk.Orientation.HORIZONTAL, adjustment=self.adjustment)
        self.scale.set_digits(0)
        self.scale.set_hexpand(True)
        self.scale.set_valign(Gtk.Align.START)
        self.scale.connect('button-press-event', self.scale_button_press_cb)
        self.scale.connect('button-release-event', self.scale_button_release_cb)
        self.scale.connect('format-value', self.scale_format_value_cb)

        hbox.pack_start(self.scale, False, True, 0)

        box.pack_start(hbox, False, False, 0)

        hbox2 = Gtk.Box()
        hbox2.set_spacing(5)
        hbox2.set_orientation(Gtk.Orientation.HORIZONTAL)

        self.label_duration = Gtk.Label('duration: --:--')
        hbox2.pack_start(self.label_duration, False, False, 0)

        self.label_buf_begin = Gtk.Label('buffer begin: --:--')
        hbox2.pack_start(self.label_buf_begin, False, False, 0)

        self.label_buf_end = Gtk.Label('buffer end: --:--')
        hbox2.pack_start(self.label_buf_end, False, False, 0)

        box.pack_start(hbox2, False, False, 0)

        # Create GStreamer pipeline
        self.pipeline = Gst.Pipeline()

        # Create bus to get events from GStreamer pipeline
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message::eos', self.on_eos)
        self.bus.connect('message::error', self.on_error)

        # This is needed to make the video output in our DrawingArea:
        self.bus.enable_sync_message_emission()
        self.bus.connect('sync-message::element', self.on_sync_message)

        ring_buffer_size = 512 * 1024 * 1024

        # Create GStreamer elements
        self.playbin = Gst.parse_bin_from_description(
            src_description + \
            ' ! queue ' \
            ' ! tsshifterbin name=timeshifterbin' \
                ' cache-size=%u' \
            ' ! decodebin ! autovideosink'
            % (ring_buffer_size),
            False);
        self.pipeline.add(self.playbin)

        ts = self.pipeline.get_by_name("timeshifter");
        if ts:
            def overrun_handler(obj):
                print 'Received overrun signal'
                buf_begin, _ = self.query_buffering()
                seek_to = buf_begin + 2 * Gst.SECOND
                print 'Will seek to %s' % self.format_time(seek_to)
                self.seek(seek_to)
                self.pipeline.set_state(Gst.State.PLAYING)

            ts.connect('overrun', overrun_handler)
            print 'Registered overrun handler'

        self.update_id = GObject.timeout_add(1000, self.update_scale_cb)
示例#45
0
文件: jingle_rtp.py 项目: irl/gajim
 def get_fallback_src(self):
     # TODO: Use avatar?
     pipeline = 'videotestsrc is-live=true ! video/x-raw,framerate=10/1 ! videoconvert'
     return Gst.parse_bin_from_description(pipeline, True)