def _create_videobin(self):
        queue = Gst.ElementFactory.make("queue", "videoqueue")
        queue.set_property("max-size-time", 5000000000) # 5 seconds
        queue.set_property("max-size-bytes", 33554432) # 32mb
        queue.connect("overrun", self._log_queue_overrun)

        scale = Gst.ElementFactory.make("videoscale", "vbscale")

        scalecapsfilter = Gst.ElementFactory.make("capsfilter", "scalecaps")

        scalecaps = Gst.Caps('video/x-raw-yuv,width=160,height=120')
        scalecapsfilter.set_property("caps", scalecaps)

        colorspace = Gst.ElementFactory.make("ffmpegcolorspace", "vbcolorspace")

        enc = Gst.ElementFactory.make("theoraenc", "vbenc")
        enc.set_property("quality", 16)

        mux = Gst.ElementFactory.make("oggmux", "vbmux")

        sink = Gst.ElementFactory.make("filesink", "vbfile")
        sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))

        self._videobin = Gst.Bin("videobin")
        self._videobin.add(queue, scale, scalecapsfilter, colorspace, enc, mux, sink)

        queue.link(scale)
        scale.link_pads(None, scalecapsfilter, "sink")
        scalecapsfilter.link_pads("src", colorspace, None)
        Gst.element_link_many(colorspace, enc, mux, sink)

        pad = queue.get_static_pad("sink")
        self._videobin.add_pad(Gst.GhostPad("sink", pad))
Esempio n. 2
0
    def openfile(self, filename, aModel):
        self.images = list()
        self.errors = list()
        self.fileName = filename
        self.codec = ""
        if (self.player != None):
            self.player.set_state(gst.STATE_NULL)

        self.__isEndOfStream = False
        self.player = gst.element_factory_make("playbin", "player")
        videoBin = gst.Bin("video")
        videoFilter = gst.element_factory_make("capsfilter", "videofilter")
        videoBin.add(videoFilter)
        videoFilter.set_property("caps",
                                 gst.Caps("video/x-raw-rgb, depth=24, bpp=24"))
        ghostPad = gst.GhostPad("sink", videoFilter.get_pad("sink"))
        videoBin.add_pad(ghostPad)
        videoSink = gst.element_factory_make("fakesink", "videosink")
        videoBin.add(videoSink)
        pad = videoSink.get_pad("sink")
        pad.add_buffer_probe(self.__onBufferProbe)
        gst.element_link_many(videoFilter, videoSink)
        self.player.set_property("video-sink", videoBin)

        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.watchID = self.bus.connect("message", self.__onMessage)
        self.player.set_property("uri", "file://" + filename)
        self.player.set_state(gst.STATE_PAUSED)
        self.model = aModel
Esempio n. 3
0
    def openfile(self, filename ,aModel):
        self.images = list()
        self.errors = list()
        self.fileName = filename
        self.codec = ""
        if (self.player != None):
            self.player.set_state(gst.STATE_NULL)
           
        self.__isEndOfStream = False
        self.player = gst.element_factory_make("playbin", "player")
        videoBin = gst.Bin("video")
        videoFilter = gst.element_factory_make("capsfilter", "videofilter")
        videoBin.add(videoFilter)
        videoFilter.set_property("caps", gst.Caps("video/x-raw-rgb, depth=24, bpp=24"))
        ghostPad = gst.GhostPad("sink", videoFilter.get_pad("sink"))
        videoBin.add_pad(ghostPad)
        videoSink = gst.element_factory_make("fakesink", "videosink")
        videoBin.add(videoSink)
        pad = videoSink.get_pad("sink")
        pad.add_buffer_probe(self.__onBufferProbe)
        gst.element_link_many(videoFilter, videoSink)
        self.player.set_property("video-sink", videoBin)

        self.bus = self.player.get_bus()
        self.bus.add_signal_watch()
        self.watchID = self.bus.connect("message", self.__onMessage)
        self.player.set_property("uri", "file://" + filename)
        self.player.set_state(gst.STATE_PAUSED)
        self.model = aModel
Esempio n. 4
0
 def build_pipeline(self, video_src, video_sink, pipeline):
     self._visualiser = gst.element_factory_make(self._visualisation)
     self._color_space = gst.element_factory_make("ffmpegcolorspace")
     self._audioconvert = gst.element_factory_make("audioconvert")
     pipeline.add(video_src, self._audioconvert, self._visualiser,
                  self._color_space, video_sink)
     gst.element_link_many(video_src, self._audioconvert, self._visualiser,
                           self._color_space, video_sink)
    def _create_xbin(self):
        scale = Gst.ElementFactory.make("videoscale")
        cspace = Gst.ElementFactory.make("ffmpegcolorspace")
        xsink = Gst.ElementFactory.make("ximagesink", "xsink")
        xsink.set_property("force-aspect-ratio", True)

        # http://thread.gmane.org/gmane.comp.video.Gstreamer.devel/29644
        xsink.set_property("sync", False)

        self._xbin = Gst.Bin("xbin")
        self._xbin.add(scale, cspace, xsink)
        Gst.element_link_many(scale, cspace, xsink)

        pad = scale.get_static_pad("sink")
        self._xbin.add_pad(Gst.GhostPad("sink", pad))
    def _create_photobin(self):
        queue = Gst.ElementFactory.make("queue", "pbqueue")
        queue.set_property("leaky", True)
        queue.set_property("max-size-buffers", 1)

        colorspace = Gst.ElementFactory.make("ffmpegcolorspace", "pbcolorspace")
        jpeg = Gst.ElementFactory.make("jpegenc", "pbjpeg")

        sink = Gst.ElementFactory.make("fakesink", "pbsink")
        sink.connect("handoff", self._photo_handoff)
        sink.set_property("signal-handoffs", True)

        self._photobin = Gst.Bin("photobin")
        self._photobin.add(queue, colorspace, jpeg, sink)

        Gst.element_link_many(queue, colorspace, jpeg, sink)

        pad = queue.get_static_pad("sink")
        self._photobin.add_pad(Gst.GhostPad("sink", pad))
Esempio n. 7
0
def set_pipeline():
    # create the gstreamer elements; pulse source mp3 192kbps cbr
    faudiosrc = Gst.ElementFactory.make("pulsesrc", "pulsesrc")
    if device is None:
        faudiosrc.set_property("device", "{0}".format(default_device))
    else:
        faudiosrc.set_property("device", "{0}".format(device))
    faudioamp = Gst.ElementFactory.make('audioamplify', "audioamplify")
    faudioamp.set_property("amplification", 1)
    faudiocon = Gst.ElementFactory.make("audioconvert", "audioconvert")
    faudioenc = Gst.ElementFactory.make("lamemp3enc", "lamemp3enc")
    faudioenc.set_property("target", 1)
    faudioenc.set_property("bitrate", 192)
    faudioenc.set_property("cbr", "true")
    faudiosink = Gst.ElementFactory.make("filesink", "filesink")
    faudiosink.set_property("location", "{0}".format(FAILOVER_FILE))
    # add elements to the pipeline
    pipe.add(faudiosrc, faudioamp, faudiocon, faudioenc, faudiosink)
    Gst.element_link_many(faudiosrc, faudioamp, faudiocon, faudioenc, faudiosink)
Esempio n. 8
0
    def build_pipeline(self, video_src, video_sink, pipeline):
        # Create the pipeline elements
        self._decodebin = gst.element_factory_make("decodebin2")
        self._autoconvert = gst.element_factory_make("autoconvert")

        videocap = gst.Caps("video/x-raw-yuv")
        self._filter = gst.element_factory_make("capsfilter")
        self._filter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self._color_space = gst.element_factory_make("ffmpegcolorspace")

        self._queue1 = gst.element_factory_make("queue")

        pipeline.add(video_src, self._decodebin, self._autoconvert,
                     self._queue1, self._filter, self._color_space, video_sink)

        # Link everything we can link now
        gst.element_link_many(video_src, self._decodebin)
        gst.element_link_many(self._queue1, self._autoconvert, self._filter,
                              self._color_space, video_sink)
Esempio n. 9
0
        def _create_audiobin(self):
            ''' Assemble all the pieces we need. '''
            src = Gst.ElementFactory.make("alsasrc", "absrc")

            # attempt to use direct access to the 0,0 device, solving
            # some A/V sync issues
            src.set_property("device", "plughw:0,0")
            hwdev_available = src.set_state(Gst.State.PAUSED) != \
                Gst.StateChangeReturn.FAILURE
            src.set_state(Gst.State.NULL)
            if not hwdev_available:
                src.set_property("device", "default")

            srccaps = Gst.Caps(
                "audio/x-raw-int,rate=16000,channels=1,depth=16")

            # guarantee perfect stream, important for A/V sync
            rate = Gst.ElementFactory.make("audiorate")

            # without a buffer here, gstreamer struggles at the start
            # of the recording and then the A/V sync is bad for the
            # whole video (possibly a gstreamer/ALSA bug -- even if it
            # gets caught up, it should be able to resync without
            # problem)
            queue = Gst.ElementFactory.make("queue", "audioqueue")
            queue.set_property("leaky", True)  # prefer fresh data
            queue.set_property("max-size-time", 5000000000)  # 5 seconds
            queue.set_property("max-size-buffers", 500)

            enc = Gst.ElementFactory.make("wavenc", "abenc")

            sink = Gst.ElementFactory.make("filesink", "absink")
            sink.set_property("location", self.capture_file)

            self._audiobin = Gst.Bin("audiobin")
            self._audiobin.add(src, rate, queue, enc, sink)

            src.link(rate, srccaps)
            Gst.element_link_many(rate, queue, enc, sink)
    def _create_audiobin(self):
        src = Gst.ElementFactory.make("alsasrc", "absrc")

        # attempt to use direct access to the 0,0 device, solving some A/V
        # sync issues
        src.set_property("device", "plughw:0,0")
        hwdev_available = src.set_state(Gst.STATE_PAUSED) != Gst.STATE_CHANGE_FAILURE
        src.set_state(Gst.STATE_NULL)
        if not hwdev_available:
            src.set_property("device", "default")

        srccaps = Gst.Caps("audio/x-raw-int,rate=16000,channels=1,depth=16")

        # guarantee perfect stream, important for A/V sync
        rate = Gst.ElementFactory.make("audiorate")

        # without a buffer here, Gstreamer struggles at the start of the
        # recording and then the A/V sync is bad for the whole video
        # (possibly a Gstreamer/ALSA bug -- even if it gets caught up, it
        # should be able to resync without problem)
        queue = Gst.ElementFactory.make("queue", "audioqueue")
        queue.set_property("leaky", True) # prefer fresh data
        queue.set_property("max-size-time", 5000000000) # 5 seconds
        queue.set_property("max-size-buffers", 500)
        queue.connect("overrun", self._log_queue_overrun)

        enc = Gst.ElementFactory.make("wavenc", "abenc")

        sink = Gst.ElementFactory.make("filesink", "absink")
        sink.set_property("location", os.path.join(Instance.instancePath, "output.wav"))

        self._audiobin = Gst.Bin("audiobin")
        self._audiobin.add(src, rate, queue, enc, sink)

        src.link(rate, srccaps)
        Gst.element_link_many(rate, queue, enc, sink)
Esempio n. 11
0
 def build_pipeline(self, video_src, video_sink, pipeline):
     self._decodebin = gst.element_factory_make("decodebin2")
     self._visualiser = gst.element_factory_make(self._visualisation)
     self._color_space = gst.element_factory_make("ffmpegcolorspace")
     self._audioconvert = gst.element_factory_make("audioconvert")
     self._audiosink = gst.element_factory_make("autoaudiosink")
     self._tee = gst.element_factory_make('tee', "tee")
     self._queue1 = gst.element_factory_make("queue")
     self._queue2 = gst.element_factory_make("queue")
     pipeline.add(video_src, self._decodebin, self._audioconvert, self._tee,
                  self._queue1, self._audiosink, self._queue2,
                  self._visualiser, self._color_space, video_sink)
     gst.element_link_many(video_src, self._decodebin)
     gst.element_link_many(self._audioconvert, self._tee)
     self._tee.link(self._queue1)
     self._queue1.link(self._audiosink)
     self._tee.link(self._queue2)
     gst.element_link_many(self._queue2, self._visualiser,
                           self._color_space, video_sink)
Esempio n. 12
0
    def __init__(self,
                 name,
                 device,
                 image=None,
                 driver_type="v4lsrc",
                 size=[1024, 768],
                 framerate="25/1"):

        Gst.Bin.__init__(self, name)
        self.eph_error = False
        self.let_pass = False
        self.removing = False

        self.devicepath = device
        self.driver_type = driver_type

        # Create elements
        self.image = Gst.ElementFactory.make('videotestsrc', 'image')
        #for sending eos
        self.src1 = self.image

        self.device = Gst.ElementFactory.make(self.driver_type, 'device')
        scale = Gst.ElementFactory.make('videoscale')
        rate = Gst.ElementFactory.make('videorate')
        self.selector = Gst.ElementFactory.make('input-selector', 'selector')
        q0 = Gst.ElementFactory.make('queue', 'q2branchimg')
        q1 = Gst.ElementFactory.make('queue', 'q2branchdev')
        q2 = Gst.ElementFactory.make('queue', 'q2device')
        qs = Gst.ElementFactory.make('queue', 'q2selector')
        self.identity = Gst.ElementFactory.make('identity', 'idprobe')

        caps_img = Gst.ElementFactory.make('capsfilter', 'capsimage')
        caps_dev = Gst.ElementFactory.make('capsfilter', 'capsdevice')
        caps_rate = Gst.ElementFactory.make('capsfilter', 'capsrate')
        caps_res = Gst.ElementFactory.make('capsfilter', 'capsres')
        text = Gst.ElementFactory.make('textoverlay', 'textimage')

        # Set properties
        self.device.set_property('device', device)
        self.image.set_property('is-live', True)
        self.image.set_property('pattern', "blue")
        text.set_property('text', "No VGA Signal")
        text.set_property('valignment', 1)
        text.set_property('font-desc', "arial,50px")

        q0.set_property('max-size-buffers', 1)
        q1.set_property('max-size-buffers', 1)
        q2.set_property('max-size-buffers', 1)
        qs.set_property('max-size-buffers', 1)

        rate.set_property('silent', True)
        scale.set_property('add-borders', True)

        # CAPS
        filtre_img = Gst.Caps.from_string(
            "video/x-raw-yuv,format=(fourcc)YUY2, width={0}, height={1}, framerate=(fraction){2}, pixel-aspect-ratio=(fraction)1/1"
            .format(size[0], size[1], framerate))
        filtre_dev = Gst.Caps.from_string(
            "video/x-raw-yuv,format=(fourcc)YUY2, framerate=(fraction){0}, pixel-aspect-ratio=(fraction)1/1"
            .format(framerate))
        filtre_rate = Gst.Caps.from_string(
            "video/x-raw-yuv,framerate={0}, pixel-aspect-ratio=(fraction)1/1".
            format(framerate))
        filtre_resolution = Gst.Caps.from_string(
            "video/x-raw-yuv, width={0}, height={1}, pixel-aspect-ratio=(fraction)1/1"
            .format(size[0], size[1]))

        caps_img.set_property('caps', filtre_img)  #device
        caps_dev.set_property('caps', filtre_dev)  #device
        caps_rate.set_property('caps', filtre_rate)
        caps_res.set_property('caps', filtre_resolution)

        # Add elements
        self.add(self.image, caps_img, text, q0, self.device, self.identity,
                 caps_dev, q2, scale, caps_res, rate, caps_rate, q1,
                 self.selector, qs)

        # Link elements and set ghostpad
        Gst.element_link_many(self.image, caps_img, text, q0)
        Gst.element_link_many(self.device, self.identity, caps_dev, q2, scale,
                              caps_res, rate, caps_rate, q1)

        q0.link(self.selector)
        q1.link(self.selector)
        self.selector.link(qs)
        self.add_pad(Gst.GhostPad.new('src', qs.get_pad('src')))

        # Set active pad
        if self.checking():
            self.selector.set_property('active-pad',
                                       self.selector.get_pad('sink1'))
        else:
            self.selector.set_property('active-pad',
                                       self.selector.get_pad('sink0'))
            self.eph_error = True
            self.thread_id = _thread.start_new_thread(self.polling_thread, ())
            self.device.set_state(Gst.State.NULL)
            self.remove(self.device)  #IDEA remove it when at NULL

    # Set probe
        pad = self.identity.get_static_pad("src")
        pad.add_event_probe(self.probe)
Esempio n. 13
0
equalizer.set_property('band2', -24.0)

# Add our elements to the pipeline.
'''
pipeline.add(audio_source, decode, convert, equalizer, audio_sink)
no longer available in Gst-0.10 in gstreamer => 1.0 you need to add
them on by one and ordering counts.
'''
pipeline.add(audio_source)
pipeline.add(decode)
pipeline.add(convert)
pipeline.add(equalizer)
pipeline.add(audio_sink)

# Link our elements together.
'''
gst.element_link_many(audio_source, decode, convert, equalizer, audio_sink)):
=> no longer available in gstreamer => 1.0
you need to use link() which links two parameter.
'''
audio_source.link(decode)
decode.link(convert)
convert.link(equalizer)
equalizer.link(audio_sink)

# Set our pipelines state to Playing.
'''
gst.STATE_PLAYING => gst.State.PLAYING

Just a hint:
check the following documentation whenever you get
Esempio n. 14
0
    def __init__(self, name, device, image=None, driver_type="v4lsrc", size=[1024,768], framerate="25/1"):

        Gst.Bin.__init__(self, name)
        self.eph_error = False
        self.let_pass = False
        self.removing = False

        self.devicepath = device
        self.driver_type = driver_type

        # Create elements
        self.image = Gst.ElementFactory.make('videotestsrc', 'image')
        #for sending eos
        self.src1 = self.image


        self.device = Gst.ElementFactory.make(self.driver_type, 'device')
        scale = Gst.ElementFactory.make('videoscale')
        rate  = Gst.ElementFactory.make('videorate')
        self.selector = Gst.ElementFactory.make('input-selector', 'selector')
        q0 = Gst.ElementFactory.make('queue', 'q2branchimg')
        q1 = Gst.ElementFactory.make('queue', 'q2branchdev')
        q2 = Gst.ElementFactory.make('queue', 'q2device')
        qs = Gst.ElementFactory.make('queue', 'q2selector')
        self.identity = Gst.ElementFactory.make('identity', 'idprobe')

        caps_img = Gst.ElementFactory.make('capsfilter', 'capsimage')
        caps_dev = Gst.ElementFactory.make('capsfilter', 'capsdevice')
        caps_rate = Gst.ElementFactory.make('capsfilter', 'capsrate')
        caps_res = Gst.ElementFactory.make('capsfilter', 'capsres')
        text = Gst.ElementFactory.make('textoverlay', 'textimage')

        # Set properties
        self.device.set_property('device', device)
        self.image.set_property('is-live', True)
        self.image.set_property('pattern', "blue")
        text.set_property('text', "No VGA Signal")
        text.set_property('valignment', 1)
        text.set_property('font-desc', "arial,50px")

        q0.set_property('max-size-buffers', 1)
        q1.set_property('max-size-buffers', 1)
        q2.set_property('max-size-buffers', 1)
        qs.set_property('max-size-buffers', 1)

        rate.set_property('silent',True)
        scale.set_property('add-borders',True)


        # CAPS
        filtre_img = Gst.Caps.from_string(
            "video/x-raw-yuv,format=(fourcc)YUY2, width={0}, height={1}, framerate=(fraction){2}, pixel-aspect-ratio=(fraction)1/1".format(size[0],size[1], framerate))
        filtre_dev = Gst.Caps.from_string(
            "video/x-raw-yuv,format=(fourcc)YUY2, framerate=(fraction){0}, pixel-aspect-ratio=(fraction)1/1".format(framerate))
        filtre_rate = Gst.Caps.from_string("video/x-raw-yuv,framerate={0}, pixel-aspect-ratio=(fraction)1/1".format(framerate))
        filtre_resolution =Gst.Caps.from_string("video/x-raw-yuv, width={0}, height={1}, pixel-aspect-ratio=(fraction)1/1".format(size[0],size[1]))

        caps_img.set_property('caps', filtre_img) #device
        caps_dev.set_property('caps', filtre_dev) #device
        caps_rate.set_property('caps', filtre_rate)
        caps_res.set_property('caps', filtre_resolution)

        # Add elements
        self.add(self.image, caps_img, text, q0,
                 self.device, self.identity, caps_dev, q2, scale, caps_res, rate, caps_rate, q1,
                 self.selector, qs)

        # Link elements and set ghostpad
        Gst.element_link_many(self.image, caps_img, text, q0)
        Gst.element_link_many(self.device, self.identity, caps_dev, q2, scale, caps_res, rate, caps_rate, q1)

        q0.link(self.selector)
        q1.link(self.selector)
        self.selector.link(qs)
        self.add_pad(Gst.GhostPad.new('src', qs.get_pad('src')))

        # Set active pad
        if self.checking():
            self.selector.set_property('active-pad',
                                       self.selector.get_pad('sink1'))
        else:
            self.selector.set_property('active-pad',
                                       self.selector.get_pad('sink0'))
            self.eph_error = True
            self.thread_id=thread.start_new_thread(self.polling_thread, ())
            self.device.set_state(Gst.State.NULL)
            self.remove(self.device)  #IDEA remove it when at NULL

      # Set probe
        pad = self.identity.get_static_pad("src")
        pad.add_event_probe(self.probe)