def _pipelineInit(self, factory, sbin): csp = gst.element_factory_make("ffmpegcolorspace") sink = CairoSurfaceThumbnailSink() scale = gst.element_factory_make("videoscale") scale.props.method = 0 caps = "video/x-raw-rgb,height=(int) %d,width=(int) %d" % (self.theight, self.twidth + 2) filter_ = utils.filter_(caps) self.videopipeline = utils.pipeline({sbin: csp, csp: scale, scale: filter_, filter_: sink, sink: None}) sink.connect("thumbnail", self._thumbnailCb) self.videopipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): self.spacing = 0 self.audioSink = ArraySink() conv = gst.element_factory_make("audioconvert") self.audioPipeline = utils.pipeline({sbin: conv, conv: self.audioSink, self.audioSink: None}) bus = self.audioPipeline.get_bus() bus.add_signal_watch() bus.connect("message::segment-done", self._busMessageSegmentDoneCb) bus.connect("message::error", self._busMessageErrorCb) self._audio_cur = None self.audioPipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): self.spacing = 0 self.audioSink = ArraySink() conv = gst.element_factory_make("audioconvert") self.audioPipeline = utils.pipeline({ sbin : conv, conv : self.audioSink, self.audioSink : None}) bus = self.audioPipeline.get_bus() bus.set_sync_handler(self._bus_message) self._audio_cur = None self.audioPipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): self.spacing = 0 self.audioSink = ArraySink() conv = gst.element_factory_make("audioconvert") self.audioPipeline = utils.pipeline({ sbin: conv, conv: self.audioSink, self.audioSink: None }) bus = self.audioPipeline.get_bus() bus.set_sync_handler(self._bus_message) self._audio_cur = None self.audioPipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): self.spacing = 0 self.audioSink = ArraySink() conv = gst.element_factory_make("audioconvert") self.audioPipeline = utils.pipeline({ sbin: conv, conv: self.audioSink, self.audioSink: None }) bus = self.audioPipeline.get_bus() bus.add_signal_watch() bus.connect("message::segment-done", self._busMessageSegmentDoneCb) bus.connect("message::error", self._busMessageErrorCb) self._audio_cur = None self.audioPipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): csp = gst.element_factory_make("ffmpegcolorspace") sink = CairoSurfaceThumbnailSink() scale = gst.element_factory_make("videoscale") scale.props.method = 0 caps = ("video/x-raw-rgb,height=(int) %d,width=(int) %d" % (self.theight, self.twidth + 2)) filter_ = utils.filter_(caps) self.videopipeline = utils.pipeline({ sbin: csp, csp: scale, scale: filter_, filter_: sink, sink: None }) sink.connect('thumbnail', self._thumbnailCb) self.videopipeline.set_state(gst.STATE_PAUSED)
def _pipelineInit(self, factory, sbin): self.spacing = 0 self.audioSink = ExtractionSink() self.audioSink.set_stopped_cb(self._finishSegment) # This audiorate element ensures that the extracted raw-data # timeline matches the timestamps used for seeking, even if the # audio source has gaps or other timestamp abnormalities. audiorate = gst.element_factory_make("audiorate") conv = gst.element_factory_make("audioconvert") q = gst.element_factory_make("queue") self.audioPipeline = pipeline({ sbin: audiorate, audiorate: conv, conv: q, q: self.audioSink, self.audioSink: None}) bus = self.audioPipeline.get_bus() bus.add_signal_watch() bus.connect("message::error", self._busMessageErrorCb) self._donecb_id = bus.connect("message::async-done", self._busMessageAsyncDoneCb) self.audioPipeline.set_state(gst.STATE_PAUSED)