示例#1
0
    def __init__(self, sessionid, audio_format):
        gst.Bin.__init__(self)

        capsfilter = gst.element_factory_make("capsfilter")
        volume = gst.element_factory_make("volume")
        volume.set_property("volume", settings.config["master_volume"])
        shout2send = gst.element_factory_make("shout2send")
        shout2send.set_property("username", "source")
        shout2send.set_property("password", "roundice")
        shout2send.set_property("mount", server.icecast_mount_point(sessionid, audio_format))

        self.add(capsfilter, volume, shout2send)
        capsfilter.link(volume)

        if audio_format.upper() == "MP3":
            capsfilter.set_property(
                "caps",
                gst.caps_from_string("audio/x-raw-int,rate=44100,channels=2,width=16,depth=16,signed=(boolean)true"),
            )
            lame = gst.element_factory_make("lame")
            self.add(lame)
            gst.element_link_many(volume, lame, shout2send)
        elif audio_format.upper() == "OGG":
            capsfilter.set_property("caps", gst.caps_from_string("audio/x-raw-float,rate=44100,channels=2,width=32"))
            vorbisenc = gst.element_factory_make("vorbisenc")
            oggmux = gst.element_factory_make("oggmux")
            self.add(vorbisenc, oggmux)
            gst.element_link_many(volume, vorbisenc, oggmux, shout2send)
        else:
            raise "Invalid format"

        pad = capsfilter.get_pad("sink")
        ghostpad = gst.GhostPad("sink", pad)
        self.add_pad(ghostpad)
示例#2
0
文件: pip.py 项目: defendor/freeseer
 def load_inputs(self, player, mixer, inputs):
     # Load main source
     input1 = inputs[0]
     player.add(input1)
     
     mainsrc_capsfilter = gst.element_factory_make("capsfilter", "mainsrc_capsfilter")
     mainsrc_capsfilter.set_property('caps',
                     gst.caps_from_string('video/x-raw-rgb, width=640, height=480'))
     player.add(mainsrc_capsfilter)
     
     input1.link(mainsrc_capsfilter)
     srcpad = mainsrc_capsfilter.get_pad("src")
     sinkpad = mixer.get_pad("sink_main")
     srcpad.link(sinkpad)
 
     # Load the secondary source
     input2 = inputs[1]
     player.add(input2)
     
     pipsrc_capsfilter = gst.element_factory_make("capsfilter", "pipsrc_capsfilter")
     pipsrc_capsfilter.set_property('caps',
                     gst.caps_from_string('video/x-raw-rgb, width=200, height=150'))
     player.add(pipsrc_capsfilter)
     
     input2.link(pipsrc_capsfilter)
     srcpad = pipsrc_capsfilter.get_pad("src")
     sinkpad = mixer.get_pad("sink_pip")
     srcpad.link(sinkpad)
示例#3
0
 def create_pipeline(self):
    self.labelDebugData.set_text("start pipeline")
    src = gst.element_factory_make("v4l2camsrc", "src") 
    src.set_property ("device", "/dev/video0")
    #check dev for video call if this doesn't work
    #src.set_property ("always-copy", True)
    #src.set_property ("width", 640)
    #src.set_property ("height", 480)
    #src.set_property ("framerate", 30)
    #src = gst.element_factory_make("v4l2camsrc", "src")
    self.pipeline.add(src)
    
    screen_csp = gst.element_factory_make("ffmpegcolorspace", "screen_csp")
    self.pipeline.add(screen_csp)
    
    screen_caps = gst.element_factory_make("capsfilter", "screen_caps")
    # Alternate caps to run outside Internet Tablet (e.g. in a PC with webcam)
    screen_caps.set_property('caps', gst.caps_from_string("video/x-raw-yuv,width=640,height=480"))
    #screen_caps.set_property('caps', gst.caps_from_string("video/x-raw-yuv,width=640,height=480,bpp=24,depth=24,framerate=30/1"))
    
    self.pipeline.add(screen_caps)
    
    
    image_csp = gst.element_factory_make("ffmpegcolorspace", "image_csp")
    self.pipeline.add(image_csp)
    
    image_caps = gst.element_factory_make("capsfilter", "image_caps")
    # Alternate caps to run outside Internet Tablet (e.g. in a PC with webcam)
    image_caps.set_property('caps', gst.caps_from_string("video/x-raw-yuv,width=640,height=480"))
    self.pipeline.add(image_caps)
    
    
    tee = gst.element_factory_make("tee", "tee")
    self.pipeline.add(tee)
    
    screen_queue = gst.element_factory_make("queue", "screen_queue")
    self.pipeline.add(screen_queue)
    
    self.screen_sink = gst.element_factory_make("xvimagesink", "screen_sink")
    self.pipeline.add(self.screen_sink)
    
    image_queue = gst.element_factory_make("queue", "image_queue")
    self.pipeline.add(image_queue)
    
    image_sink = gst.element_factory_make("fakesink", "image_sink")
    self.pipeline.add(image_sink)
    
    self.pad = image_sink.get_pad('sink')
    self.pad.add_buffer_probe(self.buffer_cb)
    
    gst.element_link_many(src, tee, screen_caps, screen_csp, screen_queue, self.screen_sink)
    #gst.element_link_many(src, screen_caps, tee, screen_queue, sink)
    gst.element_link_many(tee, image_caps, image_csp, image_queue, image_sink)
    
    self.window.show_all()
    
    self.pipeline.set_state(gst.STATE_PLAYING)
    
    self.labelDebugData.set_text("end pipeline")
示例#4
0
文件: spykee.py 项目: zaheerm/zspykee
 def configure_pipeline(self, pipeline, properties):
     self._vsource = self.pipeline.get_by_name("vsrc")
     self._vsource.set_property('caps',
         gst.caps_from_string("image/jpeg, width=320, height=240"))
     self._asource = self.pipeline.get_by_name("asrc")
     self._asource.set_property('caps',
         gst.caps_from_string("audio/x-raw-int,rate=16000,channels=1,width=16,depth=16,signed=true,endianness=1234"))
     self.debug("Configured pipeline")
示例#5
0
class AudioResyncer(gst.Element):
    '''
    I retimestamp incomming buffers adding a fixed delay.
    '''

    __gproperties__ = {
        'delay': (float, 'delay (in ms)',
            'Resynchronisation delay in milliseconds',
            -1000000, 1000000, 0,
            gobject.PARAM_READWRITE)}

    _sinkpadtemplate = gst.PadTemplate("sink",
                                        gst.PAD_SINK,
                                        gst.PAD_ALWAYS,
                                        gst.caps_from_string(
                                            "audio/x-raw-float;"
                                            "audio/x-raw-int"))

    _srcpadtemplate = gst.PadTemplate("src",
                                        gst.PAD_SRC,
                                        gst.PAD_ALWAYS,
                                        gst.caps_from_string(
                                            "audio/x-raw-float;"
                                            "audio/x-raw-int"))

    def __init__(self, delay=0):
        gst.Element.__init__(self)

        self.sinkpad = gst.Pad(self._sinkpadtemplate, "sink")
        self.sinkpad.set_chain_function(self.chainfunc)
        self.add_pad(self.sinkpad)

        self.srcpad = gst.Pad(self._srcpadtemplate, "src")
        self.add_pad(self.srcpad)

        self._delay = long(delay * gst.MSECOND)
        print self._delay

    def do_get_property(self, property):
        if property.name == "delay":
            return self._delay
        else:
            raise AttributeError('unknown property %s' % property.name)

    def do_set_property(self, property, value):
        if property.name == "delay":
            self._delay = long(value * gst.MSECOND)
        else:
            raise AttributeError('unknown property %s' % property.name)

    def chainfunc(self, pad, buffer):
        if self._delay != 0:
            buffer.make_metadata_writable
            buffer.timestamp = buffer.timestamp + self._delay
        self.srcpad.push(buffer)
        return gst.FLOW_OK
示例#6
0
def old():
    filepipe = gst.Pipeline("filepipe")

    # fromfile = gst.element_factory_make('audiotestsrc','testfile')
    # fromfile = gst.element_factory_make('filesrc','testfile')
    # fromfile.set_property('location','falling.wav')
    # fromfile.set_state(gst.STATE_PLAYING)
    # gtk.main()
    append_pipe(filepipe, 'testfile', 'filesrc', { 'location' : 'falling.wav' })
    append_pipe(filepipe, 'decodebin', 'decodebin2', {})
    append_pipe(filepipe, 'audioconvert', 'audioconvert', {})
    file_volume = append_pipe(filepipe, 'filevolume', 'volume', {})
    file_ident = gst.element_factory_make("identity")
    filepipe.add(file_ident) # needed?
    fileoutcaps = gst.caps_from_string("audio/x-raw-int,channels=2,rate=44100,depth=16")
    # isn't this just sequential 'link'?
    file_volume.link(file_ident, fileoutcaps)
    filepipe_srcpad = gst.GhostPad("src", file_ident.get_pad("src"))

    # append_pipe(filepipe, 'speaker', 'alsasink', {})
    # filepipe.set_state(gst.STATE_PLAYING)

    ## Mic pipe
    micpipe = gst.Pipeline("micpipe")
    append_pipe(micpipe, 'mic', 'alsasrc',{})
    mic_volume = append_pipe(micpipe, 'micvolume', 'volume', {})
    mic_ident = gst.element_factory_make("identity")
    micpipe.add(mic_ident) # needed?
    micoutcaps = gst.caps_from_string("audio/x-raw-int,channels=2,rate=44100,depth=16")
    # isn't this just sequential 'link'?
    mic_volume.link(mic_ident, micoutcaps)
    micpipe_srcpad = gst.GhostPad("src", mic_ident.get_pad("src"))

    # append_pipe(micpipe, 'speaker', 'alsasink', {})
    # micpipe.set_state(gst.STATE_PLAYING)

    # MIX
    mix = gst.Pipeline("mix")
    mix.add(filepipe)
    mix.add(micpipe)
    mixer = append_pipe(mix,"adder",'adder', {})

    ch1 = mixer.get_request_pad('sink%d')
    filepipe_srcpad.link(ch1)
    ch2 = mixer.get_request_pad('sink%d')
    micpipe_srcpad.link(ch2)

    append_pipe(mix, 'audioconvert', 'audioconvert', {})
    append_pipe(mix, 'mixspeaker', 'alsasink', {})

    mix.set_state(gst.STATE_PLAYING)



    gtk.main()
示例#7
0
def old():
    filepipe = gst.Pipeline("filepipe")

    # fromfile = gst.element_factory_make('audiotestsrc','testfile')
    # fromfile = gst.element_factory_make('filesrc','testfile')
    # fromfile.set_property('location','falling.wav')
    # fromfile.set_state(gst.STATE_PLAYING)
    # gtk.main()
    append_pipe(filepipe, 'testfile', 'filesrc', {'location': 'falling.wav'})
    append_pipe(filepipe, 'decodebin', 'decodebin2', {})
    append_pipe(filepipe, 'audioconvert', 'audioconvert', {})
    file_volume = append_pipe(filepipe, 'filevolume', 'volume', {})
    file_ident = gst.element_factory_make("identity")
    filepipe.add(file_ident)  # needed?
    fileoutcaps = gst.caps_from_string(
        "audio/x-raw-int,channels=2,rate=44100,depth=16")
    # isn't this just sequential 'link'?
    file_volume.link(file_ident, fileoutcaps)
    filepipe_srcpad = gst.GhostPad("src", file_ident.get_pad("src"))

    # append_pipe(filepipe, 'speaker', 'alsasink', {})
    # filepipe.set_state(gst.STATE_PLAYING)

    ## Mic pipe
    micpipe = gst.Pipeline("micpipe")
    append_pipe(micpipe, 'mic', 'alsasrc', {})
    mic_volume = append_pipe(micpipe, 'micvolume', 'volume', {})
    mic_ident = gst.element_factory_make("identity")
    micpipe.add(mic_ident)  # needed?
    micoutcaps = gst.caps_from_string(
        "audio/x-raw-int,channels=2,rate=44100,depth=16")
    # isn't this just sequential 'link'?
    mic_volume.link(mic_ident, micoutcaps)
    micpipe_srcpad = gst.GhostPad("src", mic_ident.get_pad("src"))

    # append_pipe(micpipe, 'speaker', 'alsasink', {})
    # micpipe.set_state(gst.STATE_PLAYING)

    # MIX
    mix = gst.Pipeline("mix")
    mix.add(filepipe)
    mix.add(micpipe)
    mixer = append_pipe(mix, "adder", 'adder', {})

    ch1 = mixer.get_request_pad('sink%d')
    filepipe_srcpad.link(ch1)
    ch2 = mixer.get_request_pad('sink%d')
    micpipe_srcpad.link(ch2)

    append_pipe(mix, 'audioconvert', 'audioconvert', {})
    append_pipe(mix, 'mixspeaker', 'alsasink', {})

    mix.set_state(gst.STATE_PLAYING)

    gtk.main()
示例#8
0
 def make_caps(self, width, height):
     caps = {}
     inside_width = width/3
     inside_height = height/3
     resolution = ",width=" + str(inside_width) + ",height=" + str(inside_height)
     caps_string_inside = "video/x-raw-yuv" + resolution
     resolution = ",width=" + str(width) + ",height=" + str(height)
     caps_string_outside = "video/x-raw-yuv" + resolution
     caps['B'] = gst.caps_from_string(caps_string_inside)
     caps['A'] = gst.caps_from_string(caps_string_outside)
     return caps
示例#9
0
 def make_caps(self, width, height):
     caps = {}
     inside_width = width/3
     inside_height = height/3
     resolution = ",width=" + str(inside_width) + ",height=" + str(inside_height)
     caps_string_inside = "video/x-raw-yuv" + resolution
     resolution = ",width=" + str(width) + ",height=" + str(height)
     caps_string_outside = "video/x-raw-yuv" + resolution
     caps['B'] = gst.caps_from_string(caps_string_inside)
     caps['A'] = gst.caps_from_string(caps_string_outside)
     return caps
示例#10
0
    def load_inputs(self, player, mixer, inputs):
        # Load main source
        input1 = inputs[0]

        # Create videoscale element in order to scale to dimensions not supported by camera
        mainsrc_scale = gst.element_factory_make("videoscale", "mainsrc_scale")

        # Create ffmpegcolorspace element to convert from what camera supports to rgb
        mainsrc_colorspace = gst.element_factory_make("ffmpegcolorspace", "mainsrc_colorspace")

        # Create capsfilter for limiting to x-raw-rgb pixel video format and setting dimensions
        mainsrc_capsfilter = gst.element_factory_make("capsfilter", "mainsrc_capsfilter")
        mainsrc_capsfilter.set_property('caps',
                        gst.caps_from_string('video/x-raw-rgb, width=640, height=480'))

        mainsrc_elements = [input1, mainsrc_scale, mainsrc_capsfilter, mainsrc_colorspace]

        # Add elements to player in list order
        map(lambda element: player.add(element), mainsrc_elements)

        # Link elements in a specific order
        input1.link(mainsrc_scale)
        mainsrc_scale.link(mainsrc_capsfilter)
        mainsrc_capsfilter.link(mainsrc_colorspace)

        # Link colorspace element to sink pad for pixel format conversion
        srcpad = mainsrc_colorspace.get_pad("src")
        sinkpad = mixer.get_pad("sink_main")
        srcpad.link(sinkpad)

        # Load the secondary source
        input2 = inputs[1]

        # Create gst elements as above, but set smaller dimensions
        pipsrc_scale = gst.element_factory_make("videoscale", "pipsrc_scale")
        pipsrc_colorspace = gst.element_factory_make("ffmpegcolorspace", "pipsrc_colorspace")
        pipsrc_capsfilter = gst.element_factory_make("capsfilter", "pipsrc_capsfilter")
        pipsrc_capsfilter.set_property('caps',
                        gst.caps_from_string('video/x-raw-rgb, width=200, height=150'))

        pipsrc_elements = [input2, pipsrc_scale, pipsrc_capsfilter, pipsrc_colorspace]

        #Add elements to player in list order
        map(lambda element: player.add(element), pipsrc_elements)

        # Link elements in specific order
        input2.link(pipsrc_scale)
        pipsrc_scale.link(pipsrc_capsfilter)
        pipsrc_capsfilter.link(pipsrc_colorspace)

        # Link colorspace element to sink pad for pixel format conversion
        srcpad = pipsrc_colorspace.get_pad("src")
        sinkpad = mixer.get_pad("sink_pip")
        srcpad.link(sinkpad)
示例#11
0
    def get_videomixer_bin(self):
        bin = gst.Bin()

        # Video Rate
        videorate = gst.element_factory_make("videorate", "videorate")
        bin.add(videorate)
        videorate_cap = gst.element_factory_make("capsfilter",
                                                 "video_rate_cap")
        videorate_cap.set_property(
            "caps",
            gst.caps_from_string(
                "%s, framerate=%d/1" %
                (self.config.input_type, self.config.framerate)))
        bin.add(videorate_cap)
        # --- End Video Rate

        # Video Scaler (Resolution)
        videoscale = gst.element_factory_make("videoscale", "videoscale")
        bin.add(videoscale)
        videoscale_cap = gst.element_factory_make("capsfilter",
                                                  "videoscale_cap")

        # Change the resolution of the source video.
        log.debug("Record Resolution: %s", self.config.resolution)
        if self.config.resolution != "No Scaling":
            width, height = widget.resmap[self.config.resolution]
            videoscale_cap.set_property(
                'caps',
                gst.caps_from_string("{}, width={}, height={}".format(
                    self.config.input_type, width, height)))

        bin.add(videoscale_cap)
        # --- End Video Scaler

        colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
        bin.add(colorspace)

        # Link Elements
        videorate.link(videorate_cap)
        videorate_cap.link(videoscale)
        videoscale.link(videoscale_cap)
        videoscale_cap.link(colorspace)

        # Setup ghost pad
        sinkpad = videorate.get_pad("sink")
        sink_ghostpad = gst.GhostPad("sink", sinkpad)
        bin.add_pad(sink_ghostpad)

        srcpad = colorspace.get_pad("src")
        src_ghostpad = gst.GhostPad("src", srcpad)
        bin.add_pad(src_ghostpad)

        return bin
示例#12
0
    def __init__(self):

        pipe = gst.element_factory_make('pipeline', "cam2soma_pipeline")
        
        cam = gst.element_factory_make('dc1394src', "camera")
        cfiltVid = gst.element_factory_make('capsfilter', "camera_capsfilter")
        cfiltVid.set_property('caps', gst.caps_from_string("video/x-raw-gray, width=640, height=480, framerate=100/1"))
        qVid = gst.element_factory_make('queue', "q_video")
        qVid.set_property("leaky", 2)
        qVid.set_property("max-size-buffers", 1)

        color = gst.element_factory_make('ffmpegcolorspace', "color")

        pipe.add(cam)
        pipe.add(cfiltVid)
        pipe.add(qVid)
        pipe.add(color)    

        gst.element_link_many(cam, cfiltVid, qVid, color)

        events = gst.element_factory_make('somaeventsource', "ses")
        cfiltEv = gst.element_factory_make('capsfilter', "events_capsfilter")
        cfiltEv.set_property('caps', gst.caps_from_string("soma/event, src=74"))
        camEv = gst.element_factory_make('somacameraevent', 'sce')
        qEv = gst.element_factory_make('queue', "q_events")
        qEv.set_property("leaky", 2)
        qEv.set_property("max-size-buffers", 1)

        pipe.add(events)
        pipe.add(cfiltEv)
        pipe.add(camEv)
        pipe.add(qEv)        

        gst.element_link_many(events, cfiltEv, camEv, qEv)

        sync = gst.element_factory_make('somasynctracker')

        pipe.add(sync)

        qEv.link_pads('src', sync, "diode_sink")
        color.link_pads('src', sync, "video_sink")

        posPad = gst.GhostPad("pos_src", sync.get_pad('pos_src'))
        bwPad = gst.GhostPad("bw_src", sync.get_pad('bw_src'))
        videoPad = gst.GhostPad("video_src", sync.get_pad('video_src'))

        pipe.add_pad(posPad)
        pipe.add_pad(bwPad)
        pipe.add_pad(videoPad)
        
        self.pipeline = pipe 
示例#13
0
    def __init__(self, sessionid, audio_format, bitrate):
        gst.Bin.__init__(self)
        #self.taginjector = gst.element_factory_make("taginject")
        #self.taginjector.set_property("tags","title=\"asset_id=123\"")

        capsfilter = gst.element_factory_make("capsfilter")
        volume = gst.element_factory_make("volume")
        volume.set_property("volume", settings.config["master_volume"])
        shout2send = gst.element_factory_make("shout2send")
        shout2send.set_property("username",
                                settings.config["icecast_source_username"])
        shout2send.set_property("password",
                                settings.config["icecast_source_password"])
        #shout2send.set_property("username", "source")
        #shout2send.set_property("password", "roundice")
        shout2send.set_property("mount",
                                icecast_mount_point(sessionid, audio_format))
        #shout2send.set_property("streamname","initial name")
        #self.add(capsfilter, volume, self.taginjector, shout2send)
        self.add(capsfilter, volume, shout2send)
        capsfilter.link(volume)

        if audio_format.upper() == "MP3":
            capsfilter.set_property(
                "caps",
                gst.caps_from_string(
                    "audio/x-raw-int,rate=44100,channels=2,width=16,depth=16,signed=(boolean)true"
                ))
            lame = gst.element_factory_make("lame")
            lame.set_property("bitrate", int(bitrate))
            logging.debug("roundstreamsink: bitrate: " + str(int(bitrate)))
            self.add(lame)
            #gst.element_link_many(volume, lame, self.taginjector, shout2send)
            gst.element_link_many(volume, lame, shout2send)
        elif audio_format.upper() == "OGG":
            capsfilter.set_property(
                "caps",
                gst.caps_from_string(
                    "audio/x-raw-float,rate=44100,channels=2,width=32"))
            vorbisenc = gst.element_factory_make("vorbisenc")
            oggmux = gst.element_factory_make("oggmux")
            self.add(vorbisenc, oggmux)
            #gst.element_link_many(volume, vorbisenc, oggmux, self.taginjector, shout2send)
            gst.element_link_many(volume, vorbisenc, oggmux, shout2send)
        else:
            raise "Invalid format"

        pad = capsfilter.get_pad("sink")
        ghostpad = gst.GhostPad("sink", pad)
        self.add_pad(ghostpad)
示例#14
0
class AsxDecoder(BasePlaylistElement):
    __gstdetails__ = ('ASX Decoder', 'Decoder',
                      'Convert .asx to text/uri-list', 'Mopidy')

    sinkpad_template = gst.PadTemplate('sink', gst.PAD_SINK, gst.PAD_ALWAYS,
                                       gst.caps_from_string('audio/x-ms-asx'))

    srcpad_template = gst.PadTemplate('src', gst.PAD_SRC, gst.PAD_ALWAYS,
                                      gst.caps_from_string('text/uri-list'))

    __gsttemplates__ = (sinkpad_template, srcpad_template)

    def convert(self, data):
        return parse_asx(data)
示例#15
0
    def init_pipeline(self):
        """Function pipeline constructs a pipeline containing a stream
        from the camera.
        """
        # Create pipeline:
        #                                   /-> screen_queue -> csp2 -> screen_sink
        #   img_src (camera) -> csp -> tee -|
        #                                   \-> image_queue -> image_sink
        #
        self.pipeline = gst.Pipeline("camera-pipeline")
        self.img_src = gst.element_factory_make("v4l2camsrc", "img_src")
        self.img_src.set_property("device", "/dev/video1")
        self.csp = gst.element_factory_make("ffmpegcolorspace", "csp")
        self.caps1 = gst.element_factory_make("capsfilter", "caps1")
        self.caps1.set_property(
            "caps",
            gst.caps_from_string("video/x-raw-rgb,width=%i,height=%i,bpp=24,depth=24" % (self.width, self.height)),
        )
        self.csp2 = gst.element_factory_make("ffmpegcolorspace", "csp2")
        self.caps2 = gst.element_factory_make("capsfilter", "caps2")
        self.caps2.set_property("caps", gst.caps_from_string("video/x-raw-yuv"))
        self.tee = gst.element_factory_make("tee", "tee")
        self.screen_queue = gst.element_factory_make("queue", "screen_queue")
        self.image_queue = gst.element_factory_make("queue", "image_queue")
        self.screen_sink = gst.element_factory_make("xvimagesink", "screen_sink")
        self.image_sink = gst.element_factory_make("fakesink", "image_sink")
        self.pipeline.add(
            self.img_src,
            self.csp,
            self.caps1,
            self.csp2,
            self.caps2,
            self.tee,
            self.screen_queue,
            self.image_queue,
            self.screen_sink,
            self.image_sink,
        )

        # Link the pipeline
        gst.element_link_many(self.img_src, self.csp, self.caps1, self.tee)
        if self.overlay:
            gst.element_link_many(self.tee, self.screen_queue, self.csp2, self.caps2, self.screen_sink)
        gst.element_link_many(self.tee, self.image_queue, self.image_sink)

        # Tell image_sink to emit handoff signals
        self.image_sink.set_property("signal-handoffs", True)

        self.pipeline.set_state(gst.STATE_PLAYING)
示例#16
0
    def get_videomixer_bin(self):
        bin = gst.Bin()

        # Video Rate
        videorate = gst.element_factory_make("videorate", "videorate")
        bin.add(videorate)
        videorate_cap = gst.element_factory_make("capsfilter",
                                                 "video_rate_cap")
        videorate_cap.set_property("caps",
                        gst.caps_from_string("%s, framerate=%d/1" % (self.config.input_type, self.config.framerate)))
        bin.add(videorate_cap)
        # --- End Video Rate

        # Video Scaler (Resolution)
        videoscale = gst.element_factory_make("videoscale", "videoscale")
        bin.add(videoscale)
        videoscale_cap = gst.element_factory_make("capsfilter",
                                                  "videoscale_cap")

        # Change the resolution of the source video.
        log.debug("Record Resolution: %s", self.config.resolution)
        if self.config.resolution != "No Scaling":
            width, height = widget.resmap[self.config.resolution]
            videoscale_cap.set_property('caps',
                                        gst.caps_from_string("{}, width={}, height={}"
                                        .format(self.config.input_type, width, height)))

        bin.add(videoscale_cap)
        # --- End Video Scaler

        colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
        bin.add(colorspace)

        # Link Elements
        videorate.link(videorate_cap)
        videorate_cap.link(videoscale)
        videoscale.link(videoscale_cap)
        videoscale_cap.link(colorspace)

        # Setup ghost pad
        sinkpad = videorate.get_pad("sink")
        sink_ghostpad = gst.GhostPad("sink", sinkpad)
        bin.add_pad(sink_ghostpad)

        srcpad = colorspace.get_pad("src")
        src_ghostpad = gst.GhostPad("src", srcpad)
        bin.add_pad(src_ghostpad)

        return bin
示例#17
0
class XspfDecoder(BasePlaylistElement):
    __gstdetails__ = ('XSPF Decoder', 'Decoder',
                      'Convert .pls to text/uri-list', 'Mopidy')

    sinkpad_template = gst.PadTemplate(
        'sink', gst.PAD_SINK, gst.PAD_ALWAYS,
        gst.caps_from_string('application/xspf+xml'))

    srcpad_template = gst.PadTemplate('src', gst.PAD_SRC, gst.PAD_ALWAYS,
                                      gst.caps_from_string('text/uri-list'))

    __gsttemplates__ = (sinkpad_template, srcpad_template)

    def convert(self, data):
        return parse_xspf(data)
示例#18
0
    def get_videomixer_bin(self):
        bin = gst.Bin()

        # Video Rate
        videorate = gst.element_factory_make("videorate", "videorate")
        bin.add(videorate)
        videorate_cap = gst.element_factory_make("capsfilter",
                                                 "video_rate_cap")
        videorate_cap.set_property(
            "caps",
            gst.caps_from_string(
                "%s, framerate=%d/1" %
                (self.config.input_type, self.config.framerate)))
        bin.add(videorate_cap)
        # --- End Video Rate

        # Video Scaler (Resolution)
        videoscale = gst.element_factory_make("videoscale", "videoscale")
        bin.add(videoscale)
        videoscale_cap = gst.element_factory_make("capsfilter",
                                                  "videoscale_cap")
        if self.config.resolution != "NOSCALE":
            videoscale_cap.set_property(
                'caps',
                gst.caps_from_string('%s, width=640, height=480' %
                                     (self.config.input_type)))
        bin.add(videoscale_cap)
        # --- End Video Scaler

        colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
        bin.add(colorspace)

        # Link Elements
        videorate.link(videorate_cap)
        videorate_cap.link(videoscale)
        videoscale.link(videoscale_cap)
        videoscale_cap.link(colorspace)

        # Setup ghost pad
        sinkpad = videorate.get_pad("sink")
        sink_ghostpad = gst.GhostPad("sink", sinkpad)
        bin.add_pad(sink_ghostpad)

        srcpad = colorspace.get_pad("src")
        src_ghostpad = gst.GhostPad("src", srcpad)
        bin.add_pad(src_ghostpad)

        return bin
示例#19
0
 def config(self, dict):
     self.video_src.set_property("pattern", int(dict["pattern"]))
     caps = gst.caps_from_string("video/x-raw-yuv, width=%d, height=%d;"
                                 "video/x-raw-rgb, width=%d, height=%d" %
                                 (int(dict["width"]), int(dict["height"]),
                                  int(dict["width"]), int(dict["height"])))
     self.capsfilter.set_property("caps", caps)
示例#20
0
    def __init__(self):
        #pozriet sa na toto este
        self.caps = gst.caps_from_string("audio/x-raw-int, channels=2, rate=44100, width=16, depth=16")

        self.playlist = Playlist()

        self.pipeline = gst.Pipeline("player")
        self.status = None
        self.now_playing = None

        #create adder and tee
        self.adder = gst.element_factory_make("adder", "adder")
        self.__adder_sink = None
        self.tee = gst.element_factory_make("tee", "tee")

        self.pipeline.add(self.adder, self.tee)

        adder_src = self.adder.get_pad("src")
        adder_src.link(self.tee.get_pad("sink"))

        outputbin = OutputBin()
        self.pipeline.add(outputbin)
        tee_src = self.tee.get_request_pad("src%d")
        tee_src.link(outputbin.get_pad("sink"))

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message", self.on_message)
示例#21
0
    def __init__(self, needed_caps):
        self._width = None
        self._height = None
        self._CurrentFrame = None
        gobject.threads_init()
        self._mutex = mutex()
        gst.Bin.__init__(self)
        self._capsfilter = gst.element_factory_make ("capsfilter", "capsfilter")
        caps = gst.caps_from_string(needed_caps)                                     
        self._capsfilter.set_property("caps",caps)
        self.add(self._capsfilter)
        
        fakesink = gst.element_factory_make('fakesink','fakesink')
        fakesink.set_property("sync",True)
        self.add(fakesink)
        self._capsfilter.link(fakesink)
        
        pad = self._capsfilter.get_pad("sink")
        ghostpad = gst.GhostPad("sink", pad)
        
        pad2probe = fakesink.get_pad("sink")
        pad2probe.add_buffer_probe(self.buffer_probe)

        self.add_pad(ghostpad)
        self.sink = self._capsfilter
示例#22
0
 def getAudioCaps(self):
     """ Returns the GstCaps corresponding to the audio settings """
     astr = "rate=%d,channels=%d" % (self.audiorate, self.audiochannels)
     astrcaps = gst.caps_from_string("audio/x-raw-int,%s;audio/x-raw-float,%s" % (astr, astr))
     if self.aencoder:
         return get_compatible_sink_caps(self.aencoder, astrcaps)
     return astrcaps
示例#23
0
    def setupGst(self):
        print "Setting up gstreamer pipeline"
        self.gstWindowId = self.video_container.winId()

        self.player = gst.Pipeline("player")
        self.tee = gst.element_factory_make("tee")
        sinkx = gst.element_factory_make("ximagesink", 'sinkx_overview')
        fcs = gst.element_factory_make('ffmpegcolorspace')
        caps = gst.caps_from_string('video/x-raw-yuv')
        self.capture_enc = gst.element_factory_make("jpegenc")
        self.capture_sink = gst.element_factory_make("capturesink")
        self.capture_sink_queue = gst.element_factory_make("queue")
        self.resizer = gst.element_factory_make("videoscale")

        # Video render stream
        self.player.add(self.source, self.tee)
        gst.element_link_many(self.source, self.tee)

        self.player.add(fcs, self.resizer, sinkx)
        gst.element_link_many(self.tee, fcs, self.resizer, sinkx)

        self.player.add(self.capture_sink_queue, self.capture_enc,
                        self.capture_sink)
        gst.element_link_many(self.tee, self.capture_sink_queue,
                              self.capture_enc, self.capture_sink)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
示例#24
0
    def __init__(self, uri, process=None, hopsize=512, caps=None):
        if uri.startswith('/'):
            from urllib import quote
            uri = 'file://' + quote(uri)
        src = gst.element_factory_make('uridecodebin')
        src.set_property('uri', uri)
        src.connect('pad-added', self.source_pad_added_cb)
        conv = gst.element_factory_make('audioconvert')
        self.conv = conv
        rsmpl = gst.element_factory_make('audioresample')
        capsfilter = gst.element_factory_make('capsfilter')
        if caps:
            capsfilter.set_property('caps', gst.caps_from_string(caps))
        sink = AubioSink("AubioSink", process=process)
        sink.set_property('hopsize', hopsize)  # * calcsize('f'))

        self.pipeline = gst.Pipeline()

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self.on_eos)

        self.apad = conv.get_pad('sink')

        self.pipeline.add(src, conv, rsmpl, capsfilter, sink)

        gst.element_link_many(conv, rsmpl, capsfilter, sink)

        self.mainloop = gobject.MainLoop()
        self.pipeline.set_state(gst.STATE_PLAYING)
示例#25
0
    def __init__(self, array, samplerate):
        self.appsrc = gst.element_factory_make("appsrc")
        self.pos = 0
        self.samplerate = samplerate
        if array.ndim == 1:
            array.resize((array.shape[0], 1))
        self.length, self.channels = array.shape
        self.array = array.astype("float32")
        self.per_sample = gst.SECOND // samplerate
        self.fac = self.channels * array.dtype.itemsize
        #self.appsrc.set_property("size", (self.length * self.channels *
        #                                  array.dtype.itemsize))
        self.appsrc.set_property("format", gst.FORMAT_TIME)
        capstr = """audio/x-raw-float,
                    width=%d,
                    depth=%d,
                    rate=%d,
                    channels=%d,
                    endianness=(int)1234,
                    signed=true""" % (self.array.dtype.itemsize * 8,
                                      self.array.dtype.itemsize * 8,
                                      self.samplerate, self.channels)
        self.appsrc.set_property("caps", gst.caps_from_string(capstr))
        self.appsrc.set_property("stream-type", 0)  # Seekable
        self.appsrc.set_property('block', True)

        self.appsrc.connect("need-data", self.need_data)
        self.appsrc.connect("seek-data", self.seek_data)
        self.appsrc.connect("enough-data", self.enough_data)
示例#26
0
class LedVideoSink(gst.BaseSink):

    __gsttemplates__ = (gst.PadTemplate(
        "sink", gst.PAD_SINK, gst.PAD_ALWAYS,
        gst.caps_from_string(
            "video/x-raw-rgb,width=16,height=15,bpp=24,framerate=40/1")), )

    sinkpad = property(lambda self: self.get_pad("sink"))

    def __init__(self, matrix):
        gst.BaseSink.__init__(self)
        self.matrix = matrix
        self.set_sync(True)

        gst.info('setting chain/event functions')
        # will break seeking
        #self.sinkpad.set_event_function(self.eventfunc)

    def do_render(self, buffer):
        self.matrix.send_raw_image(buffer)
        return gst.FLOW_OK

    def eventfunc(self, pad, event):
        self.info("%s event:%r" % (pad, event.type))
        return True
示例#27
0
    def SetProperties(self):
        """
		Sets basic Event properties like location, start, duration, etc.
		"""
        if self.file:
            if self.single_decode_bin:
                self.gnlsrc.remove(self.single_decode_bin)
                self.single_decode_bin.set_state(gst.STATE_NULL)

            Globals.debug("creating SingleDecodeBin")
            caps = gst.caps_from_string("audio/x-raw-int;audio/x-raw-float")
            f = PlatformUtils.pathname2url(self.GetAbsFile())
            Globals.debug("file uri is:", f)
            self.single_decode_bin = SingleDecodeBin(caps=caps, uri=f)
            self.gnlsrc.add(self.single_decode_bin)
            Globals.debug("setting event properties:")
            propsDict = {
                "caps": caps,
                "start": long(self.start * gst.SECOND),
                "duration": long(self.duration * gst.SECOND),
                "media-start": long(self.offset * gst.SECOND),
                "media-duration": long(self.duration * gst.SECOND),
                "priority": 2
            }

            for prop, value in propsDict.iteritems():
                self.gnlsrc.set_property(prop, value)
                Globals.debug("\t", prop, "=", value)
示例#28
0
 def do_set_property(self, property, value):
     if property.name == "audiorate":
         caps = gst.caps_from_string("audio/x-raw-int, rate=%d" % (value))
         self.capsfilter.set_property("caps", caps)
         self.audiorate_property = value
     else:
         Log.warning("audioresample unknown property %s" % property.name)
示例#29
0
    def __init__(self):
        pipe = gst.element_factory_make('pipeline', "vidpos2oggPipeline")
        
        pos2text = gst.element_factory_make('somapos2text', "pos2text")
        textFilt = gst.element_factory_make('capsfilter')
        textFilt.set_property('caps', gst.caps_from_string("text/x-cmml, encoded=False"))
        theora = gst.element_factory_make('theoraenc', "theora")
        oggmux = gst.element_factory_make('oggmux', "oggmux")
        
        pipe.add(pos2text)
        pipe.add(textFilt)
        pipe.add(theora)
        pipe.add(oggmux)
        
        gst.element_link_many(theora, oggmux)
        gst.element_link_many(pos2text, textFilt, oggmux) #<----- WHY IS THIS LINK FAILING!
    
        pos_sink = gst.GhostPad("pos_sink", pos2text.get_pad('sink'))
        vid_sink = gst.GhostPad('video_sink', theora.get_pad("sink"))

        ogg_src = gst.GhostPad("ogg_src", oggmux.get_pad('src'))

        pipe.add_pad(pos_sink)
        pipe.add_pad(vid_sink)
        pipe.add_pad(ogg_src)

        self.pipeline = pipe
示例#30
0
    def _gst_init(self):
        # self._videosink will receive the buffers so we can upload them to GPU
        if PY2:
            self._videosink = gst.element_factory_make('appsink', 'videosink')
            self._videosink.set_property('caps', gst.Caps(_VIDEO_CAPS))
        else:
            self._videosink = gst.ElementFactory.make('appsink', 'videosink')
            self._videosink.set_property('caps',
                 gst.caps_from_string(_VIDEO_CAPS))

        self._videosink.set_property('async', True)
        self._videosink.set_property('drop', True)
        self._videosink.set_property('qos', True)
        self._videosink.set_property('emit-signals', True)
        self._videosink.connect('new-' + BUF_SAMPLE, partial(
            _gst_new_buffer, ref(self)))

        # playbin, takes care of all, loading, playing, etc.
        # XXX playbin2 have some issue when playing some video or streaming :/
        #self._playbin = gst.element_factory_make('playbin2', 'playbin')
        if PY2:
            self._playbin = gst.element_factory_make('playbin', 'playbin')
        else:
            self._playbin = gst.ElementFactory.make('playbin', 'playbin')
        self._playbin.set_property('video-sink', self._videosink)

        # gstreamer bus, to attach and listen to gst messages
        self._bus = self._playbin.get_bus()
        self._bus.add_signal_watch()
        self._bus.connect('message', _on_gst_message)
        self._bus.connect('message::eos', partial(
            _on_gst_eos, ref(self)))
示例#31
0
    def setupGst(self):
        print "Setting up gstreamer pipeline"
        self.gstWindowId = self.video_container.winId()

        self.player = gst.Pipeline("player")
        self.tee = gst.element_factory_make("tee")
        sinkx = gst.element_factory_make("ximagesink", 'sinkx_overview')
        fcs = gst.element_factory_make('ffmpegcolorspace')
        caps = gst.caps_from_string('video/x-raw-yuv')
        self.capture_enc = gst.element_factory_make("jpegenc")
        self.capture_sink = gst.element_factory_make("capturesink")
        self.capture_sink_queue = gst.element_factory_make("queue")
        self.resizer =  gst.element_factory_make("videoscale")

        # Video render stream
        self.player.add(      self.source, self.tee)
        gst.element_link_many(self.source, self.tee)

        self.player.add(fcs,                 self.resizer, sinkx)
        gst.element_link_many(self.tee, fcs, self.resizer, sinkx)

        self.player.add(                self.capture_sink_queue, self.capture_enc, self.capture_sink)
        gst.element_link_many(self.tee, self.capture_sink_queue, self.capture_enc, self.capture_sink)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
示例#32
0
文件: settings.py 项目: qlf/Pitivi
 def getAudioCaps(self):
     """ Returns the GstCaps corresponding to the audio settings """
     astr = "rate=%d,channels=%d" % (self.audiorate, self.audiochannels)
     astrcaps = gst.caps_from_string("audio/x-raw-int,%s;audio/x-raw-float,%s" % (astr, astr))
     if self.aencoder:
         return get_compatible_sink_caps(self.aencoder, astrcaps)
     return astrcaps
示例#33
0
    def __init__(self, uri, process = None, hopsize = 512,
            caps = None):
        if uri.startswith('/'):
            from urllib import quote
            uri = 'file://'+quote(uri)
        src = gst.element_factory_make('uridecodebin')
        src.set_property('uri', uri)
        src.connect('pad-added', self.source_pad_added_cb)
        conv = gst.element_factory_make('audioconvert')
        self.conv = conv
        rsmpl = gst.element_factory_make('audioresample')
        capsfilter = gst.element_factory_make('capsfilter')
        if caps:
            capsfilter.set_property('caps', gst.caps_from_string(caps))
        sink = AubioSink("AubioSink", process = process)
        sink.set_property('hopsize', hopsize) # * calcsize('f'))

        self.pipeline = gst.Pipeline()

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self.on_eos)

        self.apad = conv.get_pad('sink')

        self.pipeline.add(src, conv, rsmpl, capsfilter, sink)

        gst.element_link_many(conv, rsmpl, capsfilter, sink)

        self.mainloop = gobject.MainLoop()
        self.pipeline.set_state(gst.STATE_PLAYING)
示例#34
0
class KinectDepthSrc(gst.BaseSrc):
    """ Depth """
    #here we register our plugin details
    __gstdetails__ = (
        "Kinect depth source",
        "kinectdepthsrc.py",
        "Source element for Kinect depth",
        "Oleksandr Lavrushchenko <*****@*****.**>")
 
    _src_template = gst.PadTemplate ("src",
                                     gst.PAD_SRC,
                                     gst.PAD_ALWAYS,
                                     gst.caps_from_string ("video/x-raw-gray,bpp=(int)16,depth=(int)16,width=[ 1, 2147483647 ],height=[ 1, 2147483647 ],framerate=[ 0/1, 2147483647/1 ]"))

 
    __gsttemplates__ = (_src_template,)


    def __init__ (self, *args, **kwargs):
        gst.BaseSrc.__init__(self)
        gst.info('creating srcpad')
        self.src_pad = gst.Pad (self._src_template)
        self.src_pad.use_fixed_caps()

    def do_create(self, offset, length):
        depth, timestamp = freenect.sync_get_depth()
        databuf = numpy.getbuffer(depth)
        self.buf = gst.Buffer(databuf)
        self.buf.timestamp = 0
        self.buf.duration = pow(2, 63) -1
        return gst.FLOW_OK, self.buf
示例#35
0
 def do_set_property(self, property, value):
     if property.name == "audiorate":
         caps = gst.caps_from_string("audio/x-raw-int, rate=%d" % (value))
         self.capsfilter.set_property("caps", caps)
         self.audiorate_property = value
     else:
         Log.warning('audioresample unknown property %s' % property.name)
示例#36
0
    def __init__(self, array, samplerate):
        self.appsrc = gst.element_factory_make("appsrc")
        self.pos = 0
        self.samplerate = samplerate
        if array.ndim == 1:
            array.resize((array.shape[0], 1))
        self.length, self.channels = array.shape
        self.array = array.astype("float32")
        self.per_sample = gst.SECOND // samplerate
        self.fac = self.channels * array.dtype.itemsize
        #self.appsrc.set_property("size", (self.length * self.channels *
        #                                  array.dtype.itemsize))
        self.appsrc.set_property("format", gst.FORMAT_TIME)
        capstr = """audio/x-raw-float,
                    width=%d,
                    depth=%d,
                    rate=%d,
                    channels=%d,
                    endianness=(int)1234,
                    signed=true""" % (self.array.dtype.itemsize*8,
                                      self.array.dtype.itemsize*8,
                                      self.samplerate,
                                      self.channels)
        self.appsrc.set_property("caps", gst.caps_from_string(capstr))
        self.appsrc.set_property("stream-type", 0)  # Seekable
        self.appsrc.set_property('block', True)

        self.appsrc.connect("need-data", self.need_data)
        self.appsrc.connect("seek-data", self.seek_data)
        self.appsrc.connect("enough-data", self.enough_data)
示例#37
0
    def music_delivery(self, session, frames, frame_size, num_frames,
                       sample_type, sample_rate, channels):
        """Callback used by pyspotify"""
        # pylint: disable = R0913
        # Too many arguments (8/5)
        assert sample_type == 0, 'Expects 16-bit signed integer samples'
        capabilites = """
            audio/x-raw-int,
            endianness=(int)1234,
            channels=(int)%(channels)d,
            width=(int)16,
            depth=(int)16,
            signed=(boolean)true,
            rate=(int)%(sample_rate)d
        """ % {
            'sample_rate': sample_rate,
            'channels': channels,
        }
        buffer_ = gst.Buffer(bytes(frames))
        buffer_.set_caps(gst.caps_from_string(capabilites))

        if self.audio.emit_data(buffer_).get():
            return num_frames
        else:
            return 0
示例#38
0
文件: Event.py 项目: Barbosabyte/bard
    def SetProperties(self):
        """
		Sets basic Event properties like location, start, duration, etc.
		"""
        if self.file:
            if self.single_decode_bin:
                self.gnlsrc.remove(self.single_decode_bin)
                self.single_decode_bin.set_state(gst.STATE_NULL)

            Globals.debug("creating SingleDecodeBin")
            caps = gst.caps_from_string("audio/x-raw-int;audio/x-raw-float")
            f = PlatformUtils.pathname2url(self.GetAbsFile())
            Globals.debug("file uri is:", f)
            self.single_decode_bin = SingleDecodeBin(caps=caps, uri=f)
            self.gnlsrc.add(self.single_decode_bin)
            Globals.debug("setting event properties:")
            propsDict = {
                "caps": caps,
                "start": long(self.start * gst.SECOND),
                "duration": long(self.duration * gst.SECOND),
                "media-start": long(self.offset * gst.SECOND),
                "media-duration": long(self.duration * gst.SECOND),
                "priority": 2,
            }

            for prop, value in propsDict.iteritems():
                self.gnlsrc.set_property(prop, value)
                Globals.debug("\t", prop, "=", value)
示例#39
0
    def _gst_init(self):
        # self._videosink will receive the buffers so we can upload them to GPU
        if PY2:
            self._videosink = gst.element_factory_make('appsink', 'videosink')
            self._videosink.set_property('caps', gst.Caps(_VIDEO_CAPS))
        else:
            self._videosink = gst.ElementFactory.make('appsink', 'videosink')
            self._videosink.set_property('caps',
                                         gst.caps_from_string(_VIDEO_CAPS))

        self._videosink.set_property('async', True)
        self._videosink.set_property('drop', True)
        self._videosink.set_property('qos', True)
        self._videosink.set_property('emit-signals', True)
        self._videosink.connect('new-' + BUF_SAMPLE,
                                partial(_gst_new_buffer, ref(self)))

        # playbin, takes care of all, loading, playing, etc.
        # XXX playbin2 have some issue when playing some video or streaming :/
        #self._playbin = gst.element_factory_make('playbin2', 'playbin')
        if PY2:
            self._playbin = gst.element_factory_make('playbin', 'playbin')
        else:
            self._playbin = gst.ElementFactory.make('playbin', 'playbin')
        self._playbin.set_property('video-sink', self._videosink)

        # gstreamer bus, to attach and listen to gst messages
        self._bus = self._playbin.get_bus()
        self._bus.add_signal_watch()
        self._bus.connect('message', _on_gst_message)
        self._bus.connect('message::eos', partial(_on_gst_eos, ref(self)))
示例#40
0
 def config(self, dict):
     self.video_src.set_property("pattern", int(dict["pattern"]))
     caps = gst.caps_from_string(
         "video/x-raw-yuv, width=%d, height=%d;"
         "video/x-raw-rgb, width=%d, height=%d"
         % (int(dict["width"]), int(dict["height"]), int(dict["width"]), int(dict["height"]))
     )
     self.capsfilter.set_property("caps", caps)
示例#41
0
文件: overlay.py 项目: flyapen/UgFlu
 def do_create(self, offset, length):
     self.debug("Pushing buffer")
     gstBuf = gst.Buffer(self.imgBuf)
     padcaps = gst.caps_from_string(self.capsStr)
     gstBuf.set_caps(padcaps)
     gstBuf.timestamp = 0
     gstBuf.duration = pow(2, 63) -1
     return gst.FLOW_OK, gstBuf
示例#42
0
 def __init__(self, *args, **kwargs):
     self.caps = gst.caps_from_string(
         'audio/x-raw-int, rate=7600, endianness=1234, channels=1, width=16, depth=16, signed=true'
     )
     gst.BaseSrc.__init__(self)
     gst.info("Creating Kaicong src pad")
     self.src_pad = gst.Pad(self._src_template)
     self.src_pad.use_fixed_caps()
示例#43
0
class lal_fixodc(gst.BaseTransform):
    __gstdetails__ = ("Fix ODC sample format", "Generic",
                      "Type-casts float to int", __author__)

    __gproperties__ = {}

    __gsttemplates__ = (
        gst.PadTemplate(
            "sink", gst.PAD_SINK, gst.PAD_ALWAYS,
            gst.caps_from_string("audio/x-raw-float, " +
                                 "rate = (int) [1, MAX], " +
                                 "channels = (int) 1, " +
                                 "endianness = (int) BYTE_ORDER, " +
                                 "width = (int) 32")),
        gst.PadTemplate(
            "src", gst.PAD_SRC, gst.PAD_ALWAYS,
            gst.caps_from_string("audio/x-raw-int, " +
                                 "rate = (int) [1, MAX], " +
                                 "channels = (int) 1, " +
                                 "endianness = (int) BYTE_ORDER, " +
                                 "width = (int) 32," + "depth = (int) 32," +
                                 "signed = (bool) false")))

    def __init__(self):
        super(lal_fixodc, self).__init__()
        self.set_gap_aware(True)

    def do_transform_caps(self, direction, caps):
        if direction == gst.PAD_SRC:
            tmpltcaps = self.get_pad("sink").get_pad_template_caps()
        elif direction == gst.PAD_SINK:
            tmpltcaps = self.get_pad("src").get_pad_template_caps()
        else:
            raise AssertionError
        rate, = [s["rate"] for s in caps]
        result = gst.Caps()
        for s in tmpltcaps:
            s = s.copy()
            s["rate"] = rate
            result.append_structure(s)
        return result

    def do_transform(self, ibuf, obuf):
        pipeio.array_from_audio_buffer(
            obuf)[:] = pipeio.array_from_audio_buffer(ibuf)
        return gst.FLOW_OK
示例#44
0
 def testCaps(self):
     caps = gst.caps_from_string(
         'video/x-raw-yuv,width=10,framerate=5.0;video/x-raw-rgb,'
         'width=15,framerate=10.0')
     self.assertEquals(
         gstreamer.caps_repr(caps), 'video/x-raw-yuv, width=(int)10, '
         'framerate=(double)5; video/x-raw-rgb, '
         'width=(int)15, framerate=(double)10')
示例#45
0
文件: gstreamer.py 项目: bok/mopidy
 def deliver_data(self, caps_string, data):
     """Deliver audio data to be played"""
     app_src = self.gst_pipeline.get_by_name('appsrc')
     caps = gst.caps_from_string(caps_string)
     buffer_ = gst.Buffer(buffer(data))
     buffer_.set_caps(caps)
     app_src.set_property('caps', caps)
     app_src.emit('push-buffer', buffer_)
示例#46
0
 def config(self, dict):
     self.video_src.set_property("device", dict["v4l2_device"])
     caps = gst.caps_from_string(
         "video/x-raw-yuv, pixel-aspect-ratio=1/1, width=%d, height=%d;"
         "video/x-raw-rgb, pixel-aspect-ratio=1/1, width=%d, height=%d" %
         (int(dict["width"]), int(dict["height"]), int(
             dict["width"]), int(dict["height"])))
     self.capsfilter.set_property("caps", caps)
示例#47
0
 def config(self, dict):
     num, den = Fract.fromdecimal(dict["framerate"])
     caps = gst.caps_from_string(
             "video/x-raw-yuv, width=%d, height=%d, framerate=%d/%d" % (
                 int(dict["width"]), int(dict["height"]), num, den
             )
     )
     self.capsfilter.set_property("caps", caps)
示例#48
0
 def testCaps(self):
     caps = gst.caps_from_string(
         'video/x-raw-yuv,width=10,framerate=5.0;video/x-raw-rgb,'
         'width=15,framerate=10.0')
     self.assertEquals(gstreamer.caps_repr(caps),
         'video/x-raw-yuv, width=(int)10, '
                       'framerate=(double)5; video/x-raw-rgb, '
                       'width=(int)15, framerate=(double)10')
示例#49
0
 def do_create(self, offset, length):
     self.debug("Pushing buffer")
     gstBuf = gst.Buffer(self.imgBuf)
     padcaps = gst.caps_from_string(self.capsStr)
     gstBuf.set_caps(padcaps)
     gstBuf.timestamp = 0
     gstBuf.duration = self.duration * gst.SECOND
     return gst.FLOW_OK, gstBuf
示例#50
0
文件: Media.py 项目: papachappa/sbc
    def __init__(self):
        self.logger = logger


        # initiate receive pipeline

        self.bands = 80
#/home/papachappa/robot/back_sbc_tests/sipp_remote_library/scenario/scenario_SBC/Transcode/uac_init_invite_A-B/pcap/pcma
        self.receive_pipe = gst.Pipeline("receive_pipe")
        self.receive_bus = self.receive_pipe.get_bus()
        self.receive_bus.set_flushing(True)

        file_src = gst.element_factory_make("filesrc", "file_src")
        file_src.set_property("location", RECEIVED_PCMA_FILE)
        self.receive_pipe.add(file_src)
        


        #sourcepad = gst_element_get_static_pad(source, "src")
        #gst.Element.add_pad()
        #gst_pad_set_caps (sourcepad, "audio/x-alaw", "rate", 8000, "channels"1)


        #caps = gst.Caps("audio/x-alaw, rate=8000, channels=1")
        #capsFilter = gst.element_factory_make("capsfilter")
        #capsFilter.props.caps = caps
        #self.receive_pipe.add(caps) 
        
        #self.sinkpadtemplate  = gst.PadTemplate("sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps("audio/x-alaw, rate=8000, channels=1"))
        #self.sinkpad = gst.Pad(self.sinkpadtemplate, "sink")
        #s = self.add_pad(self.sinkpad)
        #gst.Element.add_pad(s)

        receive_alaw = gst.element_factory_make("capsfilter", "receive_alaw")
        receive_alaw.set_property("caps", gst.caps_from_string("audio/x-alaw, rate=8000, channels=1"))
        self.receive_pipe.add(receive_alaw)

        
        receive_dec = gst.element_factory_make("alawdec", "receive_dec")
        self.receive_pipe.add(receive_dec)

        receive_conv = gst.element_factory_make("audioconvert", "receive_conv")
        self.receive_pipe.add(receive_conv)

        receive_level = gst.element_factory_make("level", "receive_level")
        self.receive_pipe.add(receive_level)

        receive_spectrum = gst.element_factory_make("spectrum", "receive_spectrum")
        receive_spectrum.set_property("bands", self.bands)
        receive_spectrum.set_property("message-phase", True)
        self.receive_pipe.add(receive_spectrum)
        
        receive_sink = gst.element_factory_make("fakesink", "receive_sink")
        self.receive_pipe.add(receive_sink)

        gst.element_link_many(file_src, receive_alaw,
                              receive_dec, receive_conv, receive_level,
                              receive_spectrum, receive_sink)
示例#51
0
    def __init__(self, sessionid, audio_format, bitrate):
        gst.Bin.__init__(self)
        # self.taginjector = gst.element_factory_make("taginject")
        # self.taginjector.set_property("tags","title=\"asset_id=123\"")

        capsfilter = gst.element_factory_make("capsfilter")
        volume = gst.element_factory_make("volume")
        volume.set_property("volume", settings.MASTER_VOLUME)
        shout2send = gst.element_factory_make("shout2send")
        shout2send.set_property("username", settings.ICECAST_SOURCE_USERNAME)
        shout2send.set_property("password", settings.ICECAST_SOURCE_PASSWORD)
        shout2send.set_property("mount",
                                icecast2.mount_point(sessionid, audio_format))
        # shout2send.set_property("streamname","initial name")
        # self.add(capsfilter, volume, self.taginjector, shout2send)
        self.add(capsfilter, volume, shout2send)
        capsfilter.link(volume)

        if audio_format.upper() == "MP3":
            capsfilter.set_property(
                "caps",
                gst.caps_from_string(
                    "audio/x-raw-int,rate=44100,channels=2,width=16,depth=16,signed=(boolean)true"))
            lame = gst.element_factory_make("lame")
            lame.set_property("bitrate", int(bitrate))
            logger.debug("roundstreamsink: bitrate: " + str(int(bitrate)))
            self.add(lame)
            #gst.element_link_many(volume, lame, self.taginjector, shout2send)
            gst.element_link_many(volume, lame, shout2send)
        elif audio_format.upper() == "OGG":
            capsfilter.set_property(
                "caps",
                gst.caps_from_string(
                    "audio/x-raw-float,rate=44100,channels=2,width=32"))
            vorbisenc = gst.element_factory_make("vorbisenc")
            oggmux = gst.element_factory_make("oggmux")
            self.add(vorbisenc, oggmux)
            #gst.element_link_many(volume, vorbisenc, oggmux, self.taginjector, shout2send)
            gst.element_link_many(volume, vorbisenc, oggmux, shout2send)
        else:
            raise "Invalid format"

        pad = capsfilter.get_pad("sink")
        ghostpad = gst.GhostPad("sink", pad)
        self.add_pad(ghostpad)
示例#52
0
 def set_sampling_rate(self, sr):
     """Sets the sampling rate of the capture device
     Sampling rate must be given as an integer for example 16000 for
     setting 16Khz sampling rate
     The sampling rate would be set in the device to the nearest available"""
     self.pause_grabbing()
     caps_str = "audio/x-raw-int,rate=%d,channels=1,depth=16" % (sr, )
     self.caps1.set_property("caps", gst.caps_from_string(caps_str))
     self.resume_grabbing()
示例#53
0
 def getAudioCaps(self):
     """ Returns the GstCaps corresponding to the audio settings """
     # TODO: Figure out why including 'depth' causes pipeline failures:
     astr = "rate=%d,channels=%d" % (self.audiorate, self.audiochannels)
     caps_str = "audio/x-raw-int,%s;audio/x-raw-float,%s" % (astr, astr)
     audio_caps = gst.caps_from_string(caps_str)
     if self.aencoder:
         return get_compatible_sink_caps(self.aencoder, audio_caps)
     return audio_caps
示例#54
0
 def getAudioCaps(self):
     """ Returns the GstCaps corresponding to the audio settings """
     # TODO: Figure out why including 'depth' causes pipeline failures:
     astr = "rate=%d,channels=%d" % (self.audiorate, self.audiochannels)
     caps_str = "audio/x-raw-int,%s;audio/x-raw-float,%s" % (astr, astr)
     audio_caps = gst.caps_from_string(caps_str)
     if self.aencoder:
         return get_compatible_sink_caps(self.aencoder, audio_caps)
     return audio_caps
示例#55
0
 def do_get_caps(self):
     print "Get caps"
     if not self.caps:
         if self.of:
             caps_str = self.of.readline()
             self.caps = gst.caps_from_string(caps_str.rstrip('\n'))
         else:
             return None
     return self.caps
示例#56
0
def source_setup(pipeline, source, data):
    print ("Source has been created. Configuring")
    data.app_source = source
    #Configure appsrc
    audio_caps_text = AUDIO_CAPS.format(SAMPLE_RATE)
    audio_caps = gst.caps_from_string(audio_caps_text)
    source.set_property("caps", audio_caps)
    source.connect("need-data", start_feed, data)
    source.connect("enough-data", stop_feed, data)
示例#57
0
 def do_get_caps(self):
     print "Get caps"
     if not self.caps:
         if self.of:
             caps_str = self.of.readline()
             self.caps = gst.caps_from_string(caps_str.rstrip("\n"))
         else:
             return None
     return self.caps
示例#58
0
 def set_sampling_rate(self, sr):
     ''' Sets the sampling rate of the logging device. Sampling
     rate must be given as an integer for example 16000 for setting
     16Khz sampling rate The sampling rate would be set in the
     device to the nearest available. '''
     self.pause_grabbing()
     caps_str = 'audio/x-raw-int,rate=%d,channels=%d,depth=16' % (
         sr, self.channels)
     self.caps1.set_property('caps', gst.caps_from_string(caps_str))
     self.resume_grabbing()