def _add_source_bin(self, pipeline): if gstreamer.element_factory_exists("appsrc") and \ gstreamer.get_plugin_version("app") >= (0, 10, 22, 0): self.source = gst.element_factory_make('appsrc', 'source') self.source.set_property('do-timestamp', True) self.source.connect('need-data', self.push_buffer) else: #FIXME: fluoverlaysrc only needed on gst-plugins-base < 0.10.22 gobject.type_register(OverlayImageSource) gst.element_register(OverlayImageSource, "fluoverlaysrc", gst.RANK_MARGINAL) self.source = gst.element_factory_make('fluoverlaysrc', 'source') # create the source bin self.sourceBin = gst.Bin() # create the alphacolor element alphacolor = gst.element_factory_make('alphacolor') # add the elements to the source bin and link them self.sourceBin.add_many(self.source, alphacolor) self.source.link(alphacolor) pipeline.add(self.sourceBin) # create the source ghost pad self.sourceBin.add_pad(gst.GhostPad('src', alphacolor.get_pad('src'))) # set the locked state and wait until we get the first caps change # and we know the widht and height of the input stream self.sourceBin.set_locked_state(True)
def register(): gobject.type_register(ResizeSink) gst.element_register (ResizeSink, 'myresize', gst.RANK_MARGINAL) gobject.type_register(CaptureSink) # Register the element into this process' registry. gst.element_register (CaptureSink, 'capturesink', gst.RANK_MARGINAL)
def setup(self, registry): # You will typically only implement one of the following things # in a single extension. # TODO: Edit or remove entirely from .frontend import FoobarFrontend registry.add('frontend', FoobarFrontend) # TODO: Edit or remove entirely from .backend import FoobarBackend registry.add('backend', FoobarBackend) # TODO: Edit or remove entirely from .mixer import FoobarMixer gobject.type_register(FoobarMixer) gst.element_register(FoobarMixer, 'foobarmixer', gst.RANK_MARGINAL)
def setup(self, registry): # You will typically only implement one of the following things # in a single extension. # TODO: Edit or remove entirely from .frontend import FoobarFrontend registry.add('frontend', FoobarFrontend) # TODO: Edit or remove entirely from .backend import FoobarBackend registry.add('backend', FoobarBackend) # TODO: Edit or remove entirely from .mixer import FoobarMixer gobject.type_register(FoobarMixer) gst.element_register(FoobarMixer, 'foobarmixer', gst.RANK_MARGINAL) # TODO: Edit or remove entirely registry.add('http:static', { 'name': self.ext_name, 'path': os.path.join(os.path.dirname(__file__), 'static'), })
def get_pipeline_string(self, properties): # the order here is important; to have our eater be the reference # stream for videomixer it needs to be specified last source_element = "" if gstreamer.element_factory_exists("appsrc") and \ gstreamer.get_plugin_version("app") >= (0, 10, 22, 0): source_element = "appsrc name=source do-timestamp=true" else: #FIXME: fluoverlaysrc only needed on gst-plugins-base < 0.10.22 gobject.type_register(OverlayImageSource) ret = gst.element_register(OverlayImageSource, "fluoverlaysrc", gst.RANK_MARGINAL) source_element = "fluoverlaysrc name=source " pipeline = ( '%s ! alphacolor ! ' 'videomixer name=mix ! @feeder:default@ ' '@eater:default@ ! ffmpegcolorspace ! mix.' % source_element) return pipeline
if not self.options["vumeter"]: level = self.get_by_name("gc-audiotest-level") level.set_property("message", False) def changeValve(self, value): valve1=self.get_by_name('gc-audiotest-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-audiotest-preview") def getSource(self): return self.get_by_name("gc-audiotest-src") def getAudioSink(self): return self.get_by_name("gc-audiotest-preview") def send_event_to_src(self, event): src1 = self.get_by_name("gc-audiotest-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-audiotest-volume") element.set_property("mute", value) gobject.type_register(GCaudiotest) gst.element_register(GCaudiotest, "gc-audiotest-bin") module_register(GCaudiotest, 'audiotest')
def do_change_state(self, transition): #if transition in [gst.STATE_CHANGE_READY_TO_PAUSED, gst.STATE_CHANGE_PAUSED_TO_READY]: # self._reset() return gst.Element.do_change_state(self, transition) def do_set_property(self, key, value): if key.name == 'slicewidth': self.slicewidth = value elif key.name == 'offset': self.offset = value else: logger.error("No property %s" % key.name) gst.element_register(SliceBuffer, 'slicebuffer') if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) mainloop = gobject.MainLoop() files = [a for a in sys.argv[1:] if not '=' in a] params = {} for p in [a for a in sys.argv[1:] if '=' in a]: name, value = p.split('=') params[name] = value # Possible parameters: # width=pixel_width # slicewidth=NNN # offset=column_number
self.emit('result', response) return False def _google_api_transaction(self, filename): url = u'https://www.google.com/speech-api/v1/recognize?client=chromium&lang=en-QA&maxresults=10' headers = { 'Content-Type': 'audio/x-flac; rate=8000;' } fd = open(filename, 'r') files = { 'file': (filename, fd) } try: r = requests.post(url, files=files, headers=headers) text = r.text except: logger.error('Failed to post request:', sys.exc_info()) try: resp = json.loads(text) if ('status' in resp.keys() and resp['status'] == 0): if ('hypotheses' in resp.keys() and len(resp['hypotheses']) > 0): return [resp['hypotheses'][i]['utterance'].upper() for i in range(0,len(resp['hypotheses']))] except: logger.error('Was not able to process API response:', sys.exc_info()[0]) logger.error('Raw text for debug:', text) return None gobject.type_register(GoogleSpeechToTextSink) gst.element_register (GoogleSpeechToTextSink, 'google-speech-to-text', gst.RANK_MARGINAL)
ampli = self.get_by_name("gc-autoaudio-amplify") ampli.set_property("amplification", float(self.options["amplification"])) def changeValve(self, value): valve1=self.get_by_name('gc-autoaudio-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-autoaudio-preview") def getAudioSink(self): return self.get_by_name("gc-autoaudio-preview") def getSource(self): return self.get_by_name("gc-autoaudio-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-autoaudio-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-autoaudio-volume") element.set_property("mute", value) gobject.type_register(GCautoaudio) gst.element_register(GCautoaudio, "gc-alsa-bin") module_register(GCautoaudio, 'alsa')
def getVideoSink(self): return self.get_by_name('gc-blackmagic-preview') def getSource(self): return self.get_by_name('gc-blackmagic-src') def getAudioSink(self): return self.get_by_name('gc-blackmagic-audio-preview') def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-blackmagic-volume") element.set_property("mute", value) def send_event_to_src(self, event): src = self.get_by_name('gc-blackmagic-src') src.set_state(gst.STATE_NULL) src.get_state() src_video = self.get_by_name('gc-blackmagic-idvideo') if self.has_audio: src_audio = self.get_by_name('gc-blackmagic-idaudio') src_audio.send_event(event) src_video.send_event(event) gobject.type_register(GCblackmagic) gst.element_register(GCblackmagic, 'gc-blackmagic-bin') module_register(GCblackmagic, 'blackmagic')
buf, cairo.FORMAT_ARGB32, width, height, 4 * width) ctx = cairo.Context(surface) except: print "Failed to create cairo surface for buffer" import traceback traceback.print_exc() return dim = self.svg.get_dimension_data() scale = cairo.Matrix(1.0 * width / dim[0], 0, 0, 1.0 * height / dim[1], 0, 0) ctx.set_matrix(scale) self.svg.render_cairo(ctx) gst.element_register(SVGOverlay, 'pysvgoverlay') if __name__ == '__main__': mainloop = gobject.MainLoop() if sys.argv[1:]: player = gst.element_factory_make('playbin') player.props.uri = 'file://' + sys.argv[1] bin = gst.Bin() elements = [ gst.element_factory_make('textoverlay'), gst.element_factory_make('queue'), gst.element_factory_make('ffmpegcolorspace'), gst.element_factory_make('videoscale'), gst.element_factory_make('pysvgoverlay', 'overlay'),
elif templ.direction == gst.PAD_SRC: self.add_pad(gst.GhostPad(templ.name_template, self.decoder.get_pad(templ.name_template))) gobject.type_register(DecoderWrapper) class H264DecWrapper(DecoderWrapper): """ Wrapper for ffdec_h264 Element """ __gstdetails__ = ( "ffdec_h264wrapper plugin", "Codec/Decoder/Video", "Wrapper for ffdec_h264, that deletes all timestamps except for keyframes", "Jan Schole <*****@*****.**>") # The decoder to wrap: __decoder_factory__ = gst.element_factory_find('ffdec_h264') # Copy the pad-templates from the decoder: __gsttemplates__ = tuple([templ.get() for templ in __decoder_factory__.get_static_pad_templates()]) def __init__(self, *args, **kwargs): DecoderWrapper.__init__(self, *args, **kwargs) # print "ffdec_h264wrapper initialized" gobject.type_register(H264DecWrapper) gst.element_register(H264DecWrapper, 'ffdec_h264wrapper', gst.RANK_PRIMARY + 1)
return gst.FLOW_OK, buf if __name__ == "__main__": import sys import gobject gobject.threads_init() if len(sys.argv) != 4: print "Usage: %s <ip_address> <user> <pass>" % sys.argv[0] sys.exit(-1) pipeline = gst.Pipeline("pipe") gobject.type_register(KaicongAudioSource) gst.element_register(KaicongAudioSource, 'kaicongaudiosrc', gst.RANK_MARGINAL) src = gst.element_factory_make("kaicongaudiosrc", "audiosrc") src.set_property("ip", sys.argv[1]) src.set_property("user", sys.argv[2]) src.set_property("pwd", sys.argv[3]) src.set_property("on", True) conv = gst.element_factory_make("audioconvert", "audioconv") amp = gst.element_factory_make("audioamplify", "audioamp") amp.set_property("amplification", 20) res = gst.element_factory_make("audioresample", "audioresamp") sink = gst.element_factory_make("autoaudiosink", "audiosink") pipeline.add(src, conv, amp, res, sink) gst.element_link_many(src, conv, amp, res, sink) pipeline.set_state(gst.STATE_PLAYING)
ampli = self.get_by_name("gc-autoaudio-amplify") ampli.set_property("amplification", float(self.options["amplification"])) def changeValve(self, value): valve1 = self.get_by_name('gc-autoaudio-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-autoaudio-preview") def getAudioSink(self): return self.get_by_name("gc-autoaudio-preview") def getSource(self): return self.get_by_name("gc-autoaudio-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-autoaudio-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-autoaudio-volume") element.set_property("mute", value) gobject.type_register(GCautoaudio) gst.element_register(GCautoaudio, "gc-alsa-bin") module_register(GCautoaudio, 'alsa')
self.set_option_in_pipeline('caps', 'gc-v4l2-filter', 'caps', gst.Caps) fr = re.findall("framerate *= *[0-9]+/[0-9]+", self.options['caps']) if fr: newcaps = 'video/x-raw-yuv,' + fr[0] self.set_value_in_pipeline(newcaps, 'gc-v4l2-vrate', 'caps', gst.Caps) for pos in ['right', 'left', 'top', 'bottom']: self.set_option_in_pipeline('videocrop-' + pos, 'gc-v4l2-crop', pos, int) def changeValve(self, value): valve1 = self.get_by_name('gc-v4l2-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-v4l2-preview') def getSource(self): return self.get_by_name('gc-v4l2-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-v4l2-src') src1.send_event(event) gobject.type_register(GCv4l2) gst.element_register(GCv4l2, 'gc-v4l2-bin') module_register(GCv4l2, 'v4l2')
def register_gstreamer_elements(self): from .mixer import RXVMixer gobject.type_register(RXVMixer) gst.element_register(RXVMixer, 'rxv', gst.RANK_MARGINAL)
pipe_config[self.options['cameratype']]['dec']).replace( 'gc-rtpvideo-mux', self.options['videomux'])) bin = gst.parse_bin_from_description(aux, False) self.add(bin) self.set_option_in_pipeline('location', 'gc-rtpvideo-src', 'location') self.set_value_in_pipeline( path.join(self.options['path'], self.options['file']), 'gc-rtpvideo-sink', 'location') def changeValve(self, value): valve1 = self.get_by_name('gc-rtpvideo-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-rtpvideo-preview') def getSource(self): return self.get_by_name('gc-rtpvideo-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-rtpvideo-src') src1.send_event(event) gobject.type_register(GCrtpvideo) gst.element_register(GCrtpvideo, 'gc-rtpvideo-bin') module_register(GCrtpvideo, 'rtpvideo')
aux = self.options['pipestr'].replace('gc-custom-preview', 'sink-' + self.options['name']) #bin = gst.parse_bin_from_description(aux, False) bin = gst.parse_launch("( {} )".format(aux)) self.add(bin) self.set_value_in_pipeline( path.join(self.options['path'], self.options['file']), 'gc-custom-sink', 'location') def changeValve(self, value): valve1 = self.get_by_name('gc-custom-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-custom-preview') def getSource(self): return self.get_by_name('gc-custom-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-custom-src') src1.send_event(event) gobject.type_register(GCcustom) gst.element_register(GCcustom, 'gc-custom-bin') module_register(GCcustom, 'custom')
def register_mixer(mixer_class): gobject.type_register(mixer_class) gst.element_register( mixer_class, mixer_class.__name__.lower(), gst.RANK_MARGINAL)
self.set_value_in_pipeline(path.join(self.options["path"], self.options["file"]), "gc-v4l2-sink", "location") self.set_option_in_pipeline("caps", "gc-v4l2-filter", "caps", gst.Caps) fr = re.findall("framerate *= *[0-9]+/[0-9]+", self.options["caps"]) if fr: newcaps = "video/x-raw-yuv," + fr[0] self.set_value_in_pipeline(newcaps, "gc-v4l2-vrate", "caps", gst.Caps) for pos in ["right", "left", "top", "bottom"]: self.set_option_in_pipeline("videocrop-" + pos, "gc-v4l2-crop", pos, int) def changeValve(self, value): valve1 = self.get_by_name("gc-v4l2-valve") valve1.set_property("drop", value) def getVideoSink(self): return self.get_by_name("gc-v4l2-preview") def getSource(self): return self.get_by_name("gc-v4l2-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-v4l2-src") src1.send_event(event) gobject.type_register(GCv4l2) gst.element_register(GCv4l2, "gc-v4l2-bin") module_register(GCv4l2, "v4l2")
ampli = self.get_by_name("gc-audio-amplify") ampli.set_property("amplification", float(self.options["amplification"])) def changeValve(self, value): valve1 = self.get_by_name('gc-audio-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-audio-preview") def getAudioSink(self): return self.get_by_name("gc-audio-preview") def getSource(self): return self.get_by_name("gc-audio-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-audio-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-audio-volume") element.set_property("mute", value) gobject.type_register(GCpulse) gst.element_register(GCpulse, "gc-pulse-bin") module_register(GCpulse, 'pulse')
if not self.options["vumeter"]: level = self.get_by_name("gc-audiotest-level") level.set_property("message", False) def changeValve(self, value): valve1 = self.get_by_name('gc-audiotest-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-audiotest-preview") def getSource(self): return self.get_by_name("gc-audiotest-src") def getAudioSink(self): return self.get_by_name("gc-audiotest-preview") def send_event_to_src(self, event): src1 = self.get_by_name("gc-audiotest-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-audiotest-volume") element.set_property("mute", value) gobject.type_register(GCaudiotest) gst.element_register(GCaudiotest, "gc-audiotest-bin") module_register(GCaudiotest, 'audiotest')
offset = (nstart - start) * self.bitrate # size nsize = nduration * self.bitrate b2 = inbuf.create_sub(offset, nsize) b2.timestamp = nstart b2.duration = nstop - nstart self.debug("buffer clipped") return self.srcpad.push(b2) self.debug("buffer untouched, just pushing forward") return self.srcpad.push(inbuf) self.debug("buffer dropped") return gst.FLOW_OK gobject.type_register(AudioClipper) gst.element_register(AudioClipper, 'audio-clipper') class ClipperProbe(object): def __init__(self, pad): self._pad = pad self._pad.add_buffer_probe(self._bufferprobe) self._pad.add_event_probe(self._eventprobe) self.segment = gst.Segment() self.segment.init(gst.FORMAT_UNDEFINED) self._pad.connect("notify::caps", self._capsChangedCb) def _capsChangedCb(self, pad, unk): c = pad.get_negotiated_caps() if c is None: return
def changeValve(self, value): valve1 = self.get_by_name('gc-firewireavi-video-valve') valve2 = self.get_by_name('gc-firewireavi-audio-valve') valve1.set_property('drop', value) valve2.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-firewireavi-preview") def getSource(self): return self.get_by_name("gc-firewireavi-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-firewireavi-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-firewireavi-volume") element.set_property("mute", value) def configure(self): ## # v4l2-ctl -d self.options["location"] -s self.options["standard"] # v4l2-ctl -d self.options["location"] -i self.options["input"] pass gobject.type_register(GCfirewireavi) gst.element_register(GCfirewireavi, "gc-firewireavi-bin")
u, r, f, start, s, position = event.parse_new_segment() self._update_sync_point(start, position) if gstreamer.event_is_flumotion_reset(event): self._resetReceived = True self._send_new_segment = True finally: self._lock.release() # forward all the events except the new segment events if event.type != gst.EVENT_NEWSEGMENT: return srcpad.push_event(event) return True gobject.type_register(SyncKeeper) gst.element_register(SyncKeeper, "synckeeper", gst.RANK_MARGINAL) class GenericDecoder(dc.DecoderComponent): """ Generic decoder component using decodebin2. It listen to the custom gstreamer event flumotion-reset, and reset the decoding pipeline by removing the old one and creating a new one. Sub-classes must override _get_feeders_info() and return a list of FeederInfo instances that describe the decoder output. When reset, if the new decoded pads do not match the
new_buf.stamp(buf) return self.srcpad.push(new_buf) def do_change_state(self, state_change): if state_change == gst.STATE_CHANGE_NULL_TO_READY: self._finder = self._create_finder() elif state_change == gst.STATE_CHANGE_READY_TO_NULL: self._finder = None self._previous_img = None self._previous_blob = None return gst.Element.do_change_state(self, state_change) def _create_finder(self): if self.algorithm == self.LUCAS_KANADE: finder = LucasKanadeFinder(self.corner_count, self.corner_quality_level, self.corner_min_distance, self.win_size, self.pyramid_level, self.max_iterations, self.epsilon) elif self.algorithm == self.SURF: finder = SURFFinder() else: raise ValueError("Unknown algorithm") return finder gobject.type_register(OpticalFlowFinder) ret = gst.element_register(OpticalFlowFinder, 'opticalflowfinder')
return None def _test_mixer(self, factory): element = factory.create() if not element: return False try: result = element.set_state(gst.STATE_READY) if result != gst.STATE_CHANGE_SUCCESS: return False # Trust that the default device is sane and just check tracks. return self._test_tracks(element) finally: element.set_state(gst.STATE_NULL) def _test_tracks(self, element): # Only allow elements that have a least one output track. flags = gst.interfaces.MIXER_TRACK_OUTPUT for track in element.list_tracks(): if track.flags & flags: return True return False gobject.type_register(AutoAudioMixer) gst.element_register(AutoAudioMixer, 'autoaudiomixer', gst.RANK_MARGINAL)
ampli.set_property("amplification", float(self.options["amplification"])) def changeValve(self, value): valve1=self.get_by_name('gc-audio-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-audio-preview") def getAudioSink(self): return self.get_by_name("gc-audio-preview") def getSource(self): return self.get_by_name("gc-audio-src") def send_event_to_src(self, event): src1 = self.get_by_name("gc-audio-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-audio-volume") element.set_property("mute", value) gobject.type_register(GCpulse) gst.element_register(GCpulse, "gc-pulse-bin") module_register(GCpulse, 'pulse')
def changeValve(self, value): valve1=self.get_by_name('gc-firewireavi-video-valve') valve2=self.get_by_name('gc-firewireavi-audio-valve') valve1.set_property('drop', value) valve2.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-firewireavi-preview") def getSource(self): return self.get_by_name("gc-firewireavi-src") def send_event_to_src(self,event): src1 = self.get_by_name("gc-firewireavi-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-firewireavi-volume") element.set_property("mute", value) def configure(self): ## # v4l2-ctl -d self.options["location"] -s self.options["standard"] # v4l2-ctl -d self.options["location"] -i self.options["input"] pass gobject.type_register(GCfirewireavi) gst.element_register(GCfirewireavi, "gc-firewireavi-bin")
duration = self._receiver.last_frame if not duration: return True duration = long(duration.hrs * 3600 + duration.mins * 60 + duration.secs) * gst.SECOND query.set_duration(gst.FORMAT_TIME, duration) #debug("Returning %s %d" % (query.parse_duration())) return True gobject.type_register(CCNSrc) gst.element_register(CCNSrc, 'CCNSrc') if __name__ == '__main__': gobject.threads_init() def bus_call(bus, message, loop): t = message.type if t == gst.MESSAGE_EOS: print("End-of-stream") loop.quit() elif t == gst.MESSAGE_ERROR: err, debug = message.parse_error() print("Error: %s: %s" % (err, debug)) loop.quit() return True
self.mute = False def changeValve(self, value): valve1=self.get_by_name('gc-firewire-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name("gc-firewire-preview") def getSource(self): return self.get_by_name("gc-firewire-src") def send_event_to_src(self,event): src1 = self.get_by_name("gc-firewire-src") src1.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-firewire-volume") element.set_property("mute", value) def configure(self): ## # v4l2-ctl -d self.options["location"] -s self.options["standard"] # v4l2-ctl -d self.options["location"] -i self.options["input"] pass gobject.type_register(GCfirewire) gst.element_register(GCfirewire, "gc-firewire-bin")
aux = self.options['pipestr'].replace('gc-custom-preview', 'sink-' + self.options['name']) #bin = gst.parse_bin_from_description(aux, False) bin = gst.parse_launch("( {} )".format(aux)) self.add(bin) self.set_value_in_pipeline(path.join(self.options['path'], self.options['file']), 'gc-custom-sink', 'location') def changeValve(self, value): if value: print "changeValve TRUE " else: print "changeValve FALSE " valve1=self.get_by_name('gc-custom-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-custom-preview') def getSource(self): return self.get_by_name('gc-custom-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-custom-src') src1.send_event(event) gobject.type_register(GCcustom) gst.element_register(GCcustom, 'gc-custom-bin') module_register(GCcustom, 'custom')
_src_template = gst.PadTemplate ("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.caps_from_string ("video/x-raw-gray,bpp=(int)16,depth=(int)16,width=[ 1, 2147483647 ],height=[ 1, 2147483647 ],framerate=[ 0/1, 2147483647/1 ]")) __gsttemplates__ = (_src_template,) def __init__ (self, *args, **kwargs): gst.BaseSrc.__init__(self) gst.info('creating srcpad') self.src_pad = gst.Pad (self._src_template) self.src_pad.use_fixed_caps() def do_create(self, offset, length): depth, timestamp = freenect.sync_get_depth() databuf = numpy.getbuffer(depth) self.buf = gst.Buffer(databuf) self.buf.timestamp = 0 self.buf.duration = pow(2, 63) -1 return gst.FLOW_OK, self.buf # Register element class gobject.type_register(KinectDepthSrc) gst.element_register(KinectDepthSrc, 'kinectdepthsrc', gst.RANK_MARGINAL)
def getVideoSink(self): return self.get_by_name("gc-hauppauge-preview") def getSource(self): return self.get_by_name("gc-hauppauge-file-src") def send_event_to_src(self, event): # IDEA made a common for all our bins src1 = self.get_by_name("gc-hauppauge-device-src") src2 = self.get_by_name("gc-hauppauge-file-src") src3 = self.get_by_name("gc-hauppauge-audio-src") src1.send_event(event) src2.send_event(event) src3.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-hauppauge-volume") element.set_property("mute", value) def configure(self): ## # v4l2-ctl -d self.options["location"] -s self.options["standard"] # v4l2-ctl -d self.options["location"] -i self.options["input"] pass gobject.type_register(GChauppauge) gst.element_register(GChauppauge, "gc-hauppauge-bin") module_register(GChauppauge, 'hauppauge')
fr = re.findall("framerate *= *[0-9]+/[0-9]+", self.options['caps']) if fr: newcaps = 'video/x-raw-yuv,' + fr[0] self.set_value_in_pipeline(newcaps, 'gc-v4l2-vrate', 'caps', gst.Caps) #element2 = self.get_by_name('gc-v4l2-vrate') #element2.set_property('caps', gst.Caps(newcaps)) for pos in ['right','left','top','bottom']: self.set_option_in_pipeline('videocrop-'+pos, 'gc-v4l2-crop', pos, int) #element = self.get_by_name('gc-v4l2-crop') #element.set_property(pos, int(self.options['videocrop-' + pos])) def changeValve(self, value): valve1=self.get_by_name('gc-v4l2-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-v4l2-preview') def getSource(self): return self.get_by_name('gc-v4l2-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-v4l2-src') src1.send_event(event) gobject.type_register(GCv4l2) gst.element_register(GCv4l2, 'gc-v4l2-bin') module_register(GCv4l2, 'v4l2')
pad.set_active(True) self.add_pad(pad) self.inputs[name] = (pad, aconv, aresample, clipper, adderpad) self.pad_count += 1 return pad def do_release_pad(self, pad): self.debug("pad:%r" % pad) name = pad.get_name() if name in self.inputs.keys(): sinkpad, aconv, aresample, clipper, adderpad = self.inputs.pop( name) # we deactivate this pad to make sure that if ever the streaming # thread was doing something downstream (like getting caps) it will # return with GST_FLOW_WRONG_STATE and not GST_FLOW_NOT_LINKED (which is # a fatal return flow). aresample.get_pad("src").set_active(False) self.adder.release_request_pad(adderpad) aresample.get_pad("src").unlink(adderpad) aconv.unlink(aresample) aconv.set_state(gst.STATE_NULL) aresample.set_state(gst.STATE_NULL) self.remove(aconv, aresample) self.remove_pad(sinkpad) self.debug("done") gobject.type_register(SmartAdderBin) gst.element_register(SmartAdderBin, 'smart-adder-bin')
def do_render(self, buffer): print "Buffer timestamp %d %d %d" % (buffer.timestamp, buffer.duration, buffer.flags) hdr = struct.pack(hdr_fmt, len(buffer), buffer.timestamp, buffer.duration, buffer.flags) self.of.write(hdr) self.of.write(buffer.data) return gst.FLOW_OK def do_preroll(self, buf): print "Preroll" return gst.FLOW_OK def do_event(self, ev): print "Got event of type %s" % ev.type return gst.FLOW_OK gst.element_register(VideoSink, 'VideoSink') if __name__ == '__main__': gobject.threads_init() pipeline = gst.parse_launch("autovideosrc ! videorate ! videoscale ! video/x-raw-yuv,width=480,height=360 ! timeoverlay shaded-background=true ! x264enc byte-stream=true bitrate=256 speed-preset=veryfast ! VideoSink") loop = gobject.MainLoop() pipeline.set_state(gst.STATE_PLAYING) try: loop.run() except KeyboardInterrupt: print "Ctrl+C pressed, exitting" pass
def chainfunc(self, pad, buffer): #print 'Capture sink buffer in' try: self.img_cb(buffer) except: traceback.print_exc() os._exit(1) return gst.FLOW_OK def eventfunc(self, pad, event): return True gobject.type_register(CaptureSink) # Register the element into this process' registry. gst.element_register (CaptureSink, 'capturesink', gst.RANK_MARGINAL) class ImageProcessor(QThread): n_frames = pyqtSignal(int) # Number of images processed = pyqtSignal() def __init__(self): QThread.__init__(self) self.running = threading.Event() self.image_requested = threading.Event() self.q = Queue.Queue() self._n_frames = 0 def run(self):
mode = 'pull' __gstdetails__ = ('FGDPsrc', 'Source', 'Flumotion GStreamer data protocol source', 'Flumotion DevTeam') def __init__(self): FGDPBase.__init__(self) # Create elements self.fdelement = gst.element_factory_make('fdsrc') gdpdepay = gst.element_factory_make('gdpdepay') # Add elements to the bin and link them self.add(self.fdelement, gdpdepay) self.fdelement.link(gdpdepay) # Create fd handler proxy FDSrc.__init__(self, self.fdelement) # Create sink pads self._src_pad = gst.GhostPad('src', gdpdepay.get_pad('src')) self.add_pad(self._src_pad) def prepare(self): # Lock the state until we get the first connection and we can pass it # a valid fd, otherwhise it will be using stdin. self.fdelement.set_locked_state(True) gobject.type_register(FGDPSink) gst.element_register(FGDPSink, "fgdpsink", gst.RANK_MARGINAL) gobject.type_register(FGDPSrc) gst.element_register(FGDPSrc, "fgdpsrc", gst.RANK_MARGINAL)
def register(): gobject.type_register(HLSSink) gst.element_register(HLSSink, 'hlssink', gst.RANK_MARGINAL)
if event.type == gst.EVENT_NEWSEGMENT: u, r, f, start, s, position = event.parse_new_segment() self._update_sync_point(start, position) if gstreamer.event_is_flumotion_reset(event): self._resetReceived = True self._send_new_segment = True finally: self._lock.release() # forward all the events except the new segment events if event.type != gst.EVENT_NEWSEGMENT: return srcpad.push_event(event) return True gobject.type_register(SyncKeeper) gst.element_register(SyncKeeper, "synckeeper", gst.RANK_MARGINAL) class GenericDecoder(dc.DecoderComponent): """ Generic decoder component using decodebin2. It listen to the custom gstreamer event flumotion-reset, and reset the decoding pipeline by removing the old one and creating a new one. Sub-classes must override _get_feeders_info() and return a list of FeederInfo instances that describe the decoder output. When reset, if the new decoded pads do not match the
def __init__(self): self.__gobject_init__() self.pad = self.get_pad("src") self.pad.use_fixed_caps() phase = numpy.random.random(2049) * 2.0 * numpy.pi self.spectrum = numpy.exp(phase * 1j) def do_create(self, offset, size): b = gst.Buffer(self.spectrum) b.set_caps(self.pad.get_caps()) return gst.FLOW_OK, b gobject.type_register(Noise) gst.element_register(Noise, "spectrum_noise") class FFT(gst.Element): _sinkpadtemplate = gst.PadTemplate( "sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-float, rate=44100, channels=1, width=64, endianness=1234"), ) _srcpadtemplate = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-spectrum")) __gstdetails__ = ("fft", "Audio/Filter", "fft element", "Leberwurscht") # __gproperties__ = {"":(gobject.TYPE_INT, "mode", "editing mode", 0, MODES_NUM-1, MODE_DEFAULT, gobject.PARAM_READWRITE)} def __init__(self):
def do_set_property(self, property, value): if property.name == 'location': self.depacketizer = CCNVideoDepacketizer(value, 18) elif property.name == 'publisher': self.depacketizer.publisher_id = base64.b64decode(value) elif property.name == 'interest-retry': self.depacketizer.interest_retries = value elif property.name == 'pipeline-size': self.depacketizer.window = value else: raise AttributeError, 'unknown property %s' % property.name def do_set_state(self, state): print "CHANGING STATE %s" % state gst.element_register(VideoSrc, 'VideoSrc') if __name__ == '__main__': import sys gobject.threads_init() if len(sys.argv) != 2: print "Usage: %s <uri>" % sys.argv[0] exit(1) uri = sys.argv[1] pipeline = gst.parse_launch('VideoSrc location=%s ! decodebin ! xvimagesink' % uri) loop = gobject.MainLoop()
source = self.get_by_name('gc-videotest-src') source.set_property('pattern', int(self.options['pattern'])) coloured = False for properties in gobject.list_properties(source): if properties.name == 'foreground-color': coloured = True if self.options["color1"] and coloured: source.set_property('foreground-color', int(self.options['color1'])) #if self.options["color2"]: # source.set_property('background-color', int(self.options['color2'])) def changeValve(self, value): valve1=self.get_by_name('gc-videotest-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-videotest-preview') def getSource(self): return self.get_by_name('gc-videotest-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-videotest-src') src1.send_event(event) gobject.type_register(GCvideotest) gst.element_register(GCvideotest, 'gc-videotest-bin') module_register(GCvideotest, 'videotest')
return self.get_by_name("gc-hauppauge-preview") def getSource(self): return self.get_by_name("gc-hauppauge-file-src") def send_event_to_src(self,event): # IDEA made a common for all our bins src1 = self.get_by_name("gc-hauppauge-device-src") src2 = self.get_by_name("gc-hauppauge-file-src") src3 = self.get_by_name("gc-hauppauge-audio-src") src1.send_event(event) src2.send_event(event) src3.send_event(event) def mute_preview(self, value): if not self.mute: element = self.get_by_name("gc-hauppauge-volume") element.set_property("mute", value) def configure(self): ## # v4l2-ctl -d self.options["location"] -s self.options["standard"] # v4l2-ctl -d self.options["location"] -i self.options["input"] pass gobject.type_register(GChauppauge) gst.element_register(GChauppauge, "gc-hauppauge-bin") module_register(GChauppauge, 'hauppauge')
# clip the buffer offset = (nstart - start) * self.bitrate # size nsize = nduration * self.bitrate b2 = inbuf.create_sub(offset, nsize) b2.timestamp = nstart b2.duration = nstop - nstart self.debug("buffer clipped") return self.srcpad.push(b2) self.debug("buffer untouched, just pushing forward") return self.srcpad.push(inbuf) self.debug("buffer dropped") return gst.FLOW_OK gobject.type_register(AudioClipper) gst.element_register(AudioClipper, 'audio-clipper') class ClipperProbe(object): def __init__(self, pad): self._pad = pad self._pad.add_buffer_probe(self._bufferprobe) self._pad.add_event_probe(self._eventprobe) self.segment = gst.Segment() self.segment.init(gst.FORMAT_UNDEFINED) self._pad.connect("notify::caps", self._capsChangedCb) def _capsChangedCb(self, pad, unk): c = pad.get_negotiated_caps() if c is None: return
track_num_channels = gobject.property(type=int, default=2) track_flags = gobject.property(type=int, default=(gst.interfaces.MIXER_TRACK_MASTER | gst.interfaces.MIXER_TRACK_OUTPUT)) def __init__(self): gst.Element.__init__(self) def list_tracks(self): track = create_track( self.track_label, self.track_initial_volume, self.track_min_volume, self.track_max_volume, self.track_num_channels, self.track_flags) return [track] def get_volume(self, track): return track.volumes def set_volume(self, track, volumes): track.volumes = volumes def set_record(self, track, record): pass gobject.type_register(FakeMixer) gst.element_register(FakeMixer, 'fakemixer', gst.RANK_MARGINAL)
def __init__(self): self.__gobject_init__() self.pad = self.get_pad("src") self.pad.use_fixed_caps() phase = numpy.random.random(2049) * 2. * numpy.pi self.spectrum = numpy.exp(phase * 1j) def do_create(self, offset, size): b = gst.Buffer(self.spectrum) b.set_caps(self.pad.get_caps()) return gst.FLOW_OK, b gobject.type_register(Noise) gst.element_register(Noise, "spectrum_noise") class FFT(gst.Element): _sinkpadtemplate = gst.PadTemplate( "sink", gst.PAD_SINK, gst.PAD_ALWAYS, gst.Caps( "audio/x-raw-float, rate=44100, channels=1, width=64, endianness=1234" )) _srcpadtemplate = gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("audio/x-raw-spectrum")) __gstdetails__ = ("fft", "Audio/Filter", "fft element", "Leberwurscht") # __gproperties__ = {"":(gobject.TYPE_INT, "mode", "editing mode", 0, MODES_NUM-1, MODE_DEFAULT, gobject.PARAM_READWRITE)} def __init__(self):
source.set_property('pattern', int(self.options['pattern'])) coloured = False for properties in gobject.list_properties(source): if properties.name == 'foreground-color': coloured = True if self.options["color1"] and coloured: source.set_property('foreground-color', int(self.options['color1'])) #if self.options["color2"]: # source.set_property('background-color', int(self.options['color2'])) def changeValve(self, value): valve1 = self.get_by_name('gc-videotest-valve') valve1.set_property('drop', value) def getVideoSink(self): return self.get_by_name('gc-videotest-preview') def getSource(self): return self.get_by_name('gc-videotest-src') def send_event_to_src(self, event): src1 = self.get_by_name('gc-videotest-src') src1.send_event(event) gobject.type_register(GCvideotest) gst.element_register(GCvideotest, 'gc-videotest-bin') module_register(GCvideotest, 'videotest')
'CCNx location', 'location of the stream in CCNx network', '', gobject.PARAM_READWRITE) } def do_set_property(self, property, value): if property.name == 'location': self.depacketizer = CCNVideoDepacketizer(value, 18) else: raise AttributeError, 'unknown property %s' % property.name def do_set_state(self, state): print "CHANGING STATE %s" % state gst.element_register(VideoSrc, 'VideoSrc') if __name__ == '__main__': import sys gobject.threads_init() if len(sys.argv) != 2: print "Usage: %s <uri>" % sys.argv[0] exit(1) uri = sys.argv[1] pipeline = gst.parse_launch('VideoSrc location=%s ! decodebin ! xvimagesink' % uri) loop = gobject.MainLoop()
def register_element(element_class): gobject.type_register(element_class) gst.element_register(element_class, element_class.__name__.lower(), gst.RANK_MARGINAL)
mode = 'pull' __gstdetails__ = ('FGDPsrc', 'Source', 'Flumotion GStreamer data protocol source', 'Flumotion DevTeam') def __init__(self): FGDPBase.__init__(self) # Create elements self.fdelement = gst.element_factory_make('fdsrc') gdpdepay = gst.element_factory_make('gdpdepay') # Add elements to the bin and link them self.add_many(self.fdelement, gdpdepay) self.fdelement.link(gdpdepay) # Create fd handler proxy FDSrc.__init__(self, self.fdelement) # Create sink pads self._src_pad = gst.GhostPad('src', gdpdepay.get_pad('src')) self.add_pad(self._src_pad) def prepare(self): # Lock the state until we get the first connection and we can pass it # a valid fd, otherwhise it will be using stdin. self.fdelement.set_locked_state(True) gobject.type_register(FGDPSink) gst.element_register(FGDPSink, "fgdpsink", gst.RANK_MARGINAL) gobject.type_register(FGDPSrc) gst.element_register(FGDPSrc, "fgdpsrc", gst.RANK_MARGINAL)
# return with GST_FLOW_WRONG_STATE and not GST_FLOW_NOT_LINKED (which is # a fatal return flow). aresample.get_pad("src").set_active(False) self.adder.release_request_pad(adderpad) aresample.get_pad("src").unlink(adderpad) aconv.unlink(aresample) aconv.set_state(gst.STATE_NULL) aresample.set_state(gst.STATE_NULL) self.remove(aconv, aresample) self.remove_pad(sinkpad) self.debug("done") gobject.type_register(SmartAdderBin) gst.element_register(SmartAdderBin, 'smart-adder-bin') class SmartVideomixerBin(gst.Bin): __gstdetails__ = ( "Smart Videomixer", "Generic/Video", "Convenience wrapper around videomixer, accepts anything", "Edward Hervey <*****@*****.**>" ) __gsttemplates__ = ( gst.PadTemplate("src", gst.PAD_SRC, gst.PAD_ALWAYS, gst.Caps("video/x-raw-yuv;video/x-raw-rgb")), gst.PadTemplate("sink_%u", gst.PAD_SINK, gst.PAD_REQUEST, gst.Caps("video/x-raw-yuv;video/x-raw-rgb"))
def do_preroll(self, buf): print "Preroll" return gst.FLOW_OK def do_unlock_stop(self): print "Stop Unlock!" return False def do_render_list(self, buffer_list): return gst.BaseSrc.do_render_list(self, buffer_list) def do_query(self, query): print "Query: %s" % query.type return gst.BaseSink.do_query(self, query) gst.element_register(VideoSink, 'VideoSink') if __name__ == '__main__': gobject.threads_init() def usage(): print("Usage: %s <uri>" % sys.argv[0]) sys.exit(1) if (len(sys.argv) != 2): usage() uri = sys.argv[1] pipeline = gst.parse_launch(""" videotestsrc pattern=18 ! video/x-raw-yuv,width=704,height=480 ! videorate !