Beispiel #1
0
 def _create_pipeline(self, source, target, start, end):
     '''
     Creates a pipeline in the form:
     
     gnlcomposition
     -----------------
     | gnlfilesource | --> audioconvert --> lame --> filesink 
     -----------------   
     '''
     self.pipeline = gst.Pipeline('slicer')
     comp = gst.element_factory_make('gnlcomposition', 'mycomposition')
     comp.connect('pad-added', self._on_pad)
     self.convert = gst.element_factory_make('audioconvert', 'convert')
     self.pipeline.add(self.convert)
     mp3_encoder = gst.element_factory_make('lame', 'mp3-encoder')
     self.pipeline.add(mp3_encoder)
     self.convert.link(mp3_encoder)
     out = gst.element_factory_make('filesink', 'out')
     out.set_property('location', target)
     self.pipeline.add(out)
     mp3_encoder.link(out)
     
     audio1 = gst.element_factory_make('gnlfilesource', 'audio1')
     audio1.set_property('location', source)
     audio1.set_property('start', long(start) * gst.SECOND)
     audio1.set_property('duration', long(end-start) * gst.SECOND)
     self.pipeline.add(comp)
     comp.add(audio1)
Beispiel #2
0
    def _gst_init(self):
        # self._videosink will receive the buffers so we can upload them to GPU
        if PY2:
            self._videosink = gst.element_factory_make('appsink', 'videosink')
            self._videosink.set_property('caps', gst.Caps(_VIDEO_CAPS))
        else:
            self._videosink = gst.ElementFactory.make('appsink', 'videosink')
            self._videosink.set_property('caps',
                 gst.caps_from_string(_VIDEO_CAPS))

        self._videosink.set_property('async', True)
        self._videosink.set_property('drop', True)
        self._videosink.set_property('qos', True)
        self._videosink.set_property('emit-signals', True)
        self._videosink.connect('new-' + BUF_SAMPLE, partial(
            _gst_new_buffer, ref(self)))

        # playbin, takes care of all, loading, playing, etc.
        # XXX playbin2 have some issue when playing some video or streaming :/
        #self._playbin = gst.element_factory_make('playbin2', 'playbin')
        if PY2:
            self._playbin = gst.element_factory_make('playbin', 'playbin')
        else:
            self._playbin = gst.ElementFactory.make('playbin', 'playbin')
        self._playbin.set_property('video-sink', self._videosink)

        # gstreamer bus, to attach and listen to gst messages
        self._bus = self._playbin.get_bus()
        self._bus.add_signal_watch()
        self._bus.connect('message', _on_gst_message)
        self._bus.connect('message::eos', partial(
            _on_gst_eos, ref(self)))
Beispiel #3
0
    def do_request_new_pad(self, template, name=None):
        self.debug("template:%r, name:%r" % (template, name))
        if name == None:
            name = "sink_%u" % self.pad_count
        if name in self.inputs.keys():
            return None

        csp = gst.element_factory_make("ffmpegcolorspace", "csp-%d" % self.pad_count)
        capsfilter = gst.element_factory_make("capsfilter", "capsfilter-%d" % self.pad_count)
        # configure the capsfilter caps
        if self.alpha_helper.alpha_count != 0:
            capsfilter.props.caps = gst.Caps('video/x-raw-yuv,format=(fourcc)AYUV')
        else:
            capsfilter.props.caps = gst.Caps('video/x-raw-yuv')

        self.add(csp, capsfilter)

        csp.link_pads_full("src", capsfilter, "sink", gst.PAD_LINK_CHECK_NOTHING)
        csp.sync_state_with_parent()
        capsfilter.sync_state_with_parent()

        videomixerpad = self.videomixer.get_request_pad("sink_%d" % self.pad_count)

        capsfilter.get_pad("src").link_full(videomixerpad, gst.PAD_LINK_CHECK_NOTHING)

        pad = gst.GhostPad(name, csp.get_pad("sink"))
        pad.set_active(True)
        self.add_pad(pad)
        self.inputs[name] = (pad, csp, capsfilter, videomixerpad)
        self.pad_count += 1
        return pad
Beispiel #4
0
	def __init__(self, location, caps):
		gst.Bin.__init__(self)

		# Create elements
		src = gst.element_factory_make('filesrc')
		dec = gst.element_factory_make('decodebin')
		conv = gst.element_factory_make('audioconvert')
		rsmpl = gst.element_factory_make('audioresample')
		ident = gst.element_factory_make('identity')

		# Set 'location' property on filesrc
		src.set_property('location', location)

		# Connect handler for 'new-decoded-pad' signal 
		dec.connect('new-decoded-pad', self.__on_new_decoded_pad)

		# Add elements to bin
		self.add(src, dec, conv, rsmpl, ident)

		# Link *some* elements 
		# This is completed in self.__on_new_decoded_pad()
		src.link(dec)
		conv.link(rsmpl)
		rsmpl.link(ident, caps)

		# Reference used in self.__on_new_decoded_pad()
		self.__apad = conv.get_pad('sink')

		# Add ghost pad
		self.add_pad(gst.GhostPad('src', ident.get_pad('src')))
Beispiel #5
0
    def __init__(
            self, uris, data_callback, error_callback=None, scan_timeout=1000):
        self.data = {}
        self.uris = iter(uris)
        self.data_callback = data_callback
        self.error_callback = error_callback
        self.scan_timeout = scan_timeout
        self.loop = gobject.MainLoop()
        self.timeout_id = None

        self.fakesink = gst.element_factory_make('fakesink')
        self.fakesink.set_property('signal-handoffs', True)
        self.fakesink.connect('handoff', self.process_handoff)

        self.uribin = gst.element_factory_make('uridecodebin')
        self.uribin.set_property(
            'caps', gst.Caps(b'audio/x-raw-int; audio/x-raw-float'))
        self.uribin.connect('pad-added', self.process_new_pad)

        self.pipe = gst.element_factory_make('pipeline')
        self.pipe.add(self.uribin)
        self.pipe.add(self.fakesink)

        bus = self.pipe.get_bus()
        bus.add_signal_watch()
        bus.connect('message::application', self.process_application)
        bus.connect('message::tag', self.process_tags)
        bus.connect('message::error', self.process_error)
Beispiel #6
0
 def __init__(self):
     self.player = gst.element_factory_make("playbin2", "player")
     fakesink = gst.element_factory_make("fakesink", "fakesink")
     self.player.set_property("video-sink", fakesink)
     bus = self.player.get_bus()
     bus.add_signal_watch()
     bus.connect("message", self.on_message)
Beispiel #7
0
    def __init__(self, location):

        gst.Bin.__init__(self)

        self.set_name('ogg_bin')

        vorbisenc = gst.element_factory_make(
            "vorbisenc", "vorbisenc")
        oggmux = gst.element_factory_make(
            "oggmux", "oggmux")
        filesink = gst.element_factory_make(
            "filesink", "filesinkogg")

        self.add(vorbisenc)
        self.add(oggmux)
        self.add(filesink)

        vorbisenc.link(oggmux)
        oggmux.link(filesink)

        filesink.set_property(
            'location', location)

        pad = vorbisenc.get_static_pad("sink")
        self.add_pad(gst.GhostPad("sink", pad))
Beispiel #8
0
    def __init__(self):

        # Create GUI objects
        self.window = gtk.Window()
        self.vbox = gtk.VBox()
        self.da = gtk.DrawingArea()
        self.bb = gtk.HButtonBox()
        self.da.set_size_request(300, 150)
        self.playButton = gtk.Button(stock="gtk - media - play")
        self.playButton.connect("clicked", self.OnPlay)
        self.stopButton = gtk.Button(stock="gtk - media - stop")
        self.stopButton.connect("clicked", self.OnStop)
        self.quitButton = gtk.Button(stock="gtk - quit")
        self.quitButton.connect("clicked", self.OnQuit)
        self.vbox.pack_start(self.da)
        self.bb.add(self.playButton)
        self.bb.add(self.stopButton)
        self.bb.add(self.quitButton)
        self.vbox.pack_start(self.bb)
        self.window.add(self.vbox)

        # Create GStreamer pipeline
        self.pipeline = gst.Pipeline("mypipeline")
        # Set up our video test source
        self.videotestsrc = gst.element_factory_make("videotestsrc", "video")
        # Add it to the pipeline
        self.pipeline.add(self.videotestsrc)
        # Now we need somewhere to send the video
        self.sink = gst.element_factory_make("xvimagesink", "sink")
        # Add it to the pipeline
        self.pipeline.add(self.sink)
        # Link the video source to the sink - xv
        self.videotestsrc.link(self.sink)
        self.window.show_all()
    def __init__(self):
        _base.ExailePlayer.__init__(self)
        self._current_stream = 1
        self._timer_id = 0

        # have to fix the caps because gst cant deal with having them change.
        # TODO: make this a preference and/or autodetect optimal based on the
        #   output device - if its a 48000hz-native chip we dont want to send it
        #   44100hz audio all the time.
        #   Or better yet, fix gst to handle changing caps :D
        self.caps = gst.Caps(
                "audio/x-raw-int, "
                "endianness=(int)1234, "
                "signed=(boolean)true, "
                "width=(int)16, "
                "depth=(int)16, "
                "rate=(int)44100, "
                "channels=(int)2")
        self.pipe = gst.Pipeline()
        self.adder = gst.element_factory_make("adder")
        self.audio_queue = gst.element_factory_make("queue")

        self.streams = [None, None]

        self._load_queue_values()
        self._setup_pipeline()
        self.setup_bus()
Beispiel #10
0
    def __init__(self, filename, threshold=-9.0):
        gst.Pipeline.__init__(self)

        self._filename = filename

        self._thresholddB = threshold
        self._threshold = math.pow(10, self._thresholddB / 10.0)

        self._source = sources.AudioSource(filename)
        self._source.connect('done', self._done_cb)

        self._level = gst.element_factory_make("level")

        self._fakesink = gst.element_factory_make("fakesink")

        self.add(self._source, self._level, self._fakesink)
        self._source.connect("pad-added", self._sourcePadAddedCb)
        self._level.link(self._fakesink)

        # temporary values for each timepoint
        self._rmsdB = {} # hash of channel, rmsdB value
        self._peakdB = 0.0 # highest value over all channels for this time

        # results over the whole file
        self._meansquaresums = [] # list of time -> mean square sum value
        self._peaksdB = [] # list of time -> peak value

        self._lasttime = 0

        # will be set when done
        self.mixin = 0
        self.mixout = 0
        self.length = 0
        self.rms = 0.0
        self.rmsdB = 0.0
 def __init__(self, name, audiosrc, partial_cb, final_cb, lm_path=None, dict_path=None):
   """ Sets up the gstreamer pipeline and registers callbacks.
       Partial and full callbacks must take arguments (name, uttid, text)
   """
   #rospy.Subscriber("chatter", String, callback)
   
   self.name = name
   self.partial_cb = partial_cb
   self.final_cb = final_cb
   self.pipe = gst.Pipeline()
   
   conv = gst.element_factory_make("audioconvert", "audioconv")
   res = gst.element_factory_make("audioresample", "audioresamp")
   
   # Vader controls when sphinx listens for spoken text
   vader = gst.element_factory_make("vader", "vad")
   vader.set_property("auto-threshold", True)
   
   asr = gst.element_factory_make("pocketsphinx", "asr")
   asr.connect('partial_result', self.asr_partial_result)
   asr.connect('result', self.asr_result)
   if lm_path and dict_path:
     asr.set_property('lm', lm_path)
     asr.set_property('dict', dict_path)
   asr.set_property('configured', True)
   
   # we don't do anything with the actual audio data after transcription,
   # but you could e.g. write the audio to a file here instead.
   sink = gst.element_factory_make("fakesink", "fs")
   
   self.pipe.add(audiosrc, conv, res, vader, asr, sink)
   gst.element_link_many(audiosrc, conv, res, vader, asr, sink)
   self.pipe.set_state(gst.STATE_PLAYING)
Beispiel #12
0
def on_audio_decoded(element, pad, bin, muxer):
    name = pad.get_caps()[0].get_name()
    element_name = element.get_name()[:8]
    sink = None

    # only one audio will be muxed
    created = bin.get_by_name("sbs-audio-convert")
    pending = False if created else True

    if name.startswith("audio/x-raw") and pending:
        # db%. audioconvert ! queue ! faac bitrate = 12800 ! queue ! mux.
        convert = gst.element_factory_make("audioconvert", "sbs-audio-convert")
        q1 = gst.element_factory_make("queue", "sbs-audio-queue1")
        f = gst.element_factory_make("faac", "sbs-audio-encoder")
        f.set_property("bitrate", 128000)
        q2 = gst.element_factory_make("queue", "sbs-audio-queue2")

        # link
        bin.add(convert)
        bin.add(q1)
        bin.add(f)
        bin.add(q2)
        convert.link(q1)
        q1.link(f)
        f.link(q2)
        q2.link(muxer)
        # keep activating
        convert.set_state(gst.STATE_PLAYING)
        q1.set_state(gst.STATE_PLAYING)
        f.set_state(gst.STATE_PLAYING)
        q2.set_state(gst.STATE_PLAYING)
        pad.link(convert.get_pad("sink"))

    return sink
Beispiel #13
0
	def __init__(self):
		self.pipeline = gst.Pipeline()
		self.player = gst.element_factory_make("playbin", "player")
		self.vis = gst.element_factory_make("goom", "vis")
		self.videosink = gst.element_factory_make("tee", 'vidtee')
		self.audiosink = gst.element_factory_make("autoaudiosink", 'audiosink')
		self.aqueue = gst.element_factory_make("queue", 'aqueue')
		self.bqueue = gst.element_factory_make("queue", 'bqueue')
		self.avidsink = gst.element_factory_make('autovideosink', 'avidsink')
		self.bvidsink = gst.element_factory_make('autovideosink', 'bvidsink')
		self.acolorspace = gst.element_factory_make("ffmpegcolorspace","acolor")
		self.bcolorspace = gst.element_factory_make("ffmpegcolorspace","bcolor")
		#self.pipeline.add(self.acolorspace, self.bcolorspace, self.player, self.vis, self.videosink, self.audiosink, self.aqueue, self.bqueue, self.avidsink, self.bvidsink)
		self.pipeline.add(self.videosink, self.aqueue, self.bqueue, self.acolorspace, self.bcolorspace, self.avidsink, self.bvidsink)
		self.videosink.link(self.aqueue)
		self.videosink.link(self.bqueue)
		gst.element_link_many(self.aqueue, self.acolorspace, self.avidsink)
		gst.element_link_many(self.bqueue, self.bcolorspace, self.bvidsink)
		self.player.set_property("vis-plugin", self.vis)
		#self.player.set_property("video-sink", self.pipeline)
		self.player.set_property("audio-sink", self.audiosink)
		self.bus = self.player.get_bus()
		#self.bus.add_signal_watch()
		#self.bus.enable_sync_message_emission()
		#self.bus.connect("message", self.on_message)
		print self.player
Beispiel #14
0
    def __init__(self, config, plugman, window_id=None, audio_feedback=None, cli=False):
        self.config = config
        self.plugman = plugman
        self.window_id = window_id
        self.audio_feedback_event = audio_feedback
        self.cli = cli

        self.record_audio = False
        self.record_video = False

        self.current_state = Multimedia.NULL

        # Initialize Player
        self.player = gst.Pipeline('player')
        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect('message', self.on_message)
        bus.connect('sync-message::element', self.on_sync_message)

        # Initialize Entry Points
        self.audio_tee = gst.element_factory_make('tee', 'audio_tee')
        self.video_tee = gst.element_factory_make('tee', 'video_tee')
        self.player.add(self.audio_tee)
        self.player.add(self.video_tee)

        log.debug("Gstreamer initialized.")
Beispiel #15
0
    def createPipeline(self, w):
        """Given a window, creates a pipeline and connects it to the window"""

        # code will make the ximagesink output in the specified window
        def set_xid(window):
            gtk.gdk.threads_enter()
            sink.set_xwindow_id(window.window.xid)
            sink.expose()
            gtk.gdk.threads_leave()

        # this code receives the messages from the pipeline. if we
        # need to set X11 id, then we call set_xid
        def bus_handler(unused_bus, message):
            if message.type == gst.MESSAGE_ELEMENT:
                if message.structure.get_name() == 'prepare-xwindow-id':
                    set_xid(w)
            return gst.BUS_PASS

        # create our pipeline, and connect our bus_handler
        self.pipeline = gst.Pipeline()
        bus = self.pipeline.get_bus()
        bus.set_sync_handler(bus_handler)

        sink = gst.element_factory_make("ximagesink", "sink")
        sink.set_property("force-aspect-ratio", True)
        sink.set_property("handle-expose", True)
        scale = gst.element_factory_make("videoscale", "scale")
        cspace = gst.element_factory_make("ffmpegcolorspace", "cspace")

        # our pipeline looks like this: ... ! cspace ! scale ! sink
        self.pipeline.add(cspace, scale, sink)
        scale.link(sink)
        cspace.link(scale)
        return (self.pipeline, cspace)
Beispiel #16
0
    def __init__(self):

        gst.Bin.__init__(self)

        self.set_name('mp2_bin')

        queue = gst.element_factory_make('queue', "queue")
        queue.set_property("max-size-buffers", 0)
        queue.set_property("max-size-bytes", 0)
        queue.set_property("max-size-time", 0)

        audioconvert = gst.element_factory_make('audioconvert', "audioconvert")
        ffenc_mp2 = gst.element_factory_make('ffenc_mp2', 'ffenc_mp2')

        self.add(queue)
        self.add(audioconvert)
        self.add(ffenc_mp2)

        queue.link(audioconvert)
        audioconvert.link(ffenc_mp2)

        self.add_pad(gst.GhostPad("sink",
            queue.get_static_pad("sink")))
        self.add_pad(gst.GhostPad("src",
            ffenc_mp2.get_static_pad("src")))
Beispiel #17
0
    def __init__(self):
        gst.Bin.__init__(self)

        self.width = 320
        self.height = 240
        self.x_position = 0
        self.y_position = 0
        self.enabled = True
        self.position = 0

        self.videomixer = gst.element_factory_make("videomixer", "videomixer")
        self.add(self.videomixer)
        self.caps = self.make_caps(self.width, self.height)

        self.csp = gst.element_factory_make("ffmpegcolorspace", "pip_csp")
        self.add(self.csp)

        self.A_capsfilter = []
        self.B_capsfilter = []
        self.A_pads = []
        self.B_pads = []

        self.videomixer.link(self.csp)

        src_pad = gst.GhostPad("src", self.csp.src_pads().next())
        self.add_pad(src_pad)

        self.A_number = 0
        self.B_number = 0

        self.a_active = 0
        self._set_active_a(self.a_active)
        self.b_active = 0
        self._set_active_b(self.b_active)
Beispiel #18
0
    def __init__(self):

        gst.Bin.__init__(self)

        self.set_name("Audio_Bin")

        autoaudiosrc = gst.element_factory_make('autoaudiosrc', "autoaudiosrc")
        audiorate = gst.element_factory_make('audiorate', "audiorate")

        capaaudio = gst.Caps(
            "audio/x-raw-int,rate=16000,channels=2,depth=16")
        filtroaudio = gst.element_factory_make("capsfilter", "filtroaudio")
        filtroaudio.set_property("caps", capaaudio)

        audioconvert = gst.element_factory_make('audioconvert', "audioconvert")

        self.add(autoaudiosrc)
        self.add(audiorate)
        self.add(filtroaudio)
        self.add(audioconvert)

        autoaudiosrc.link(audiorate)
        audiorate.link(filtroaudio)
        filtroaudio.link(audioconvert)

        self.add_pad(gst.GhostPad(
            "src", audioconvert.get_static_pad("src")))
Beispiel #19
0
    def __init__(self):

        gst.Bin.__init__(self)

        self.set_name('Vorbis_bin')

        queue = gst.element_factory_make('queue', "queue")
        queue.set_property("max-size-buffers", 0)
        queue.set_property("max-size-bytes", 0)
        queue.set_property("max-size-time", 0)

        audioconvert = gst.element_factory_make('audioconvert', "audioconvert")
        vorbisenc = gst.element_factory_make('vorbisenc', 'vorbisenc')

        self.add(queue)
        self.add(audioconvert)
        self.add(vorbisenc)

        queue.link(audioconvert)
        audioconvert.link(vorbisenc)

        self.add_pad(gst.GhostPad("sink",
            queue.get_static_pad("sink")))
        self.add_pad(gst.GhostPad("src",
            vorbisenc.get_static_pad("src")))
Beispiel #20
0
 def gstreamer_player(self):
     playbin = gst.element_factory_make("playbin2", "player")
             
     if FC().is_eq_enable:
         self.audiobin = gst.Bin('audiobin')
         audiosink = gst.element_factory_make('autoaudiosink', 'audiosink')
 
         self.audiobin.add(audiosink)
         self.audiobin.add_pad(gst.GhostPad('sink', audiosink.get_pad('sink')))
         playbin.set_property('audio-sink', self.audiobin)
         
         self.equalizer = gst.element_factory_make('equalizer-10bands', 'equalizer')
         self.audiobin.add(self.equalizer)
 
         
         self.audiobin.get_pad('sink').set_target(self.equalizer.get_pad('sink'))
         self.equalizer.link(audiosink)
 
     bus = playbin.get_bus()
     bus.add_signal_watch()
     bus.enable_sync_message_emission()
     bus.connect("message", self.on_message)
     bus.connect("sync-message::element", self.on_sync_message)
     logging.debug("LOCAL gstreamer")
     return playbin
Beispiel #21
0
    def __init__(self, position=0, duration=2 * gst.SECOND, fadefromblack=True):
        gst.Bin.__init__(self)
        self.incsp = gst.element_factory_make("ffmpegcolorspace", "incsp")
        self.outcsp = gst.element_factory_make("ffmpegcolorspace", "outcsp")
        self.alpha = gst.element_factory_make("alpha", "alpha")
        self.vmix = gst.element_factory_make("videomixer", "videomix")
        self.vmix.set_property("background", 1)
        self.add(self.incsp, self.alpha, self.vmix, self.outcsp)
        gst.element_link_many(self.incsp, self.alpha, self.vmix, self.outcsp)

        self._sinkpad = gst.GhostPad("sink", self.incsp.get_pad("sink"))
        self._sinkpad.set_active(True)
        self._srcpad = gst.GhostPad("src", self.outcsp.get_pad("src"))
        self._srcpad.set_active(True)

        self.add_pad(self._sinkpad)
        self.add_pad(self._srcpad)

        self.startposition = position
        self.duration = duration
        self.fadefromblack = fadefromblack

        self.alphacontrol = gst.Controller(self.alpha, "alpha")
        self.alphacontrol.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)

        self._resetControllerValues()
Beispiel #22
0
    def make_source(self):
        bin = gst.Bin()
        if CAMERA:
            source = gst.element_factory_make("v4l2src")
            source.set_property("device", CAMERA)
            bin.add(source)
        else:
            source = gst.element_factory_make("videotestsrc")
            source.set_property("is-live", 1)
            bin.add(source)
            overlay = gst.element_factory_make("timeoverlay")
            overlay.set_property("font-desc", "Sans 32")
            bin.add(overlay)
            source.link(overlay)
            source=overlay

        filter = gst.element_factory_make("capsfilter")
        filter.set_property("caps", gst.Caps("video/x-raw-yuv , width=[300,500] , height=[200,500], framerate=[20/1,30/1]"))
        bin.add(filter)
        source.link(filter)

        videoscale = gst.element_factory_make("videoscale")
        bin.add(videoscale)
        filter.link(videoscale)

        bin.add_pad(gst.GhostPad("src", videoscale.get_pad("src")))
        return bin
Beispiel #23
0
    def __init__(self):
        QMainWindow.__init__(self)

        self.initUI()
        
        # Must not be initialized until after layout is set
        self.gstWindowId = None
        engine_config = 'gstreamer'
        if engine_config == 'gstreamer':
            self.source = gst.element_factory_make("v4l2src", "vsource")
            
            self.source.set_property("device", "/dev/video0")
            print 'FD', self.source.get_property("device-fd")
            #self.fd = open('/dev/video0', 'rw')
            #self.source.set_property("uri", 'fd://%s' % self.fd.fileno())
            
            self.setupGst()
        elif engine_config == 'gstreamer-testsrc':
            self.source = gst.element_factory_make("videotestsrc", "video-source")
            self.setupGst()
        else:
            raise Exception('Unknown engine %s' % (engine_config,))
        
        if self.gstWindowId:
            print "Starting gstreamer pipeline"
            self.player.set_state(gst.STATE_PLAYING)
Beispiel #24
0
    def __probe(self):
        """Probes the device to figure out which v4l source to use"""
        # first v4l
        v = gst.Pipeline()
        vsrc = gst.element_factory_make("v4lsrc")
        vsrc.props.device = self._device
        fsink = gst.element_factory_make("fakesink")
        v.add(vsrc, fsink)
        vsrc.link(fsink)
        if v.set_state(gst.STATE_PAUSED) != gst.STATE_CHANGE_FAILURE:
            self.__isv4l2 = False
            self.__probed = True
            v.set_state(gst.STATE_NULL)
            return
        v.set_state(gst.STATE_NULL)

        v = gst.Pipeline()
        vsrc = gst.element_factory_make("v4l2src")
        vsrc.props.device = self._device
        fsink = gst.element_factory_make("fakesink")
        v.add(vsrc, fsink)
        # let's still make sure that it's a v4l2 device :)
        if v.set_state(gst.STATE_PAUSED) != gst.STATE_CHANGE_FAILURE:
            self.__isv4l2 = True
            self.__probed = True
            v.set_state(gst.STATE_NULL)
            return
        v.set_state(gst.STATE_NULL)
        self.warning("Could not probe %s", self._device)
Beispiel #25
0
    def get_videoinput_bin(self):
        """
        Return the video input object in gstreamer bin format.
        """
        bin = gst.Bin()  # Do not pass a name so that we can load this input more than once.

        videosrc = None
        if sys.platform.startswith("linux"):
            videosrc = gst.element_factory_make("v4l2src", "videosrc")
            videosrc.set_property("device", self.device)
        elif sys.platform in ["win32", "cygwin"]:
            videosrc = gst.element_factory_make("dshowvideosrc", "videosrc")
            videosrc.set_property("device-name", self.device)
        bin.add(videosrc)

        colorspace = gst.element_factory_make("ffmpegcolorspace", "colorspace")
        bin.add(colorspace)
        videosrc.link(colorspace)

        # Setup ghost pad
        pad = colorspace.get_pad("src")
        ghostpad = gst.GhostPad("videosrc", pad)
        bin.add_pad(ghostpad)

        return bin
	def decoder_pad_added(self, decoder, pad):
		caps_string = pad.get_caps().to_string()
		if caps_string.startswith('video'):
			pad.link(self.video_input_queue.get_pad('sink'))
			if caps_string.startswith('video/x-h264'):
				#h264parse = gst.element_factory_make('h264parse', 'h264parse')
				#h264parse.set_property('output-format', 1)
				#self.transcoder.add(h264parse)
				#gst.element_link_many(self.video_input_queue, h264parse, self.video_output_queue, self.muxer)
				gst.element_link_many(self.video_input_queue, self.video_output_queue, self.muxer)
				#h264parse.set_state(gst.STATE_PLAYING)
			else:
				video_encoder = gst.element_factory_make('ffenc_mpeg4', 'video-encoder')
				video_encoder.set_property('bitrate', (2048*1000))
				self.transcoder.add(video_encoder)
				gst.element_link_many(self.video_input_queue, video_encoder, self.video_output_queue, self.muxer)
				video_encoder.set_state(gst.STATE_PLAYING)
		elif caps_string.startswith('audio'):
			pad.link(self.audio_input_queue.get_pad('sink'))
			if caps_string.startswith('audio/x-ac3'):
				ac3parse = gst.element_factory_make('ac3parse', 'ac3parse')
				self.transcoder.add(ac3parse)
				gst.element_link_many(self.audio_input_queue, ac3parse, self.audio_output_queue, self.muxer)
				ac3parse.set_state(gst.STATE_PLAYING)
			elif caps_string.startswith('audio/mpeg'):
				gst.element_link_many(self.audio_input_queue, self.audio_output_queue, self.muxer)
			else:
				audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
				audio_encoder = gst.element_factory_make('ffenc_mp2', 'audio-encoder')
				self.transcoder.add(audioconvert, audio_encoder)
				gst.element_link_many(self.audio_input_queue, audioconvert, audio_encoder, self.audio_output_queue, self.muxer)
				audioconvert.set_state(gst.STATE_PLAYING)
				audio_encoder.set_state(gst.STATE_PLAYING)
Beispiel #27
0
 def demux_pad_added(self, element, pad, bool):
     '''Add fake sink to get demux info'''
     caps = pad.get_caps()
     structure = caps[0]
     stream_type = structure.get_name()
     if stream_type.startswith('video'):
         self.have_video = True
         colorspace = gst.element_factory_make('ffmpegcolorspace')
         self.pipeline.add(colorspace)
         colorspace.set_state(gst.STATE_PLAYING)
         pad.link(colorspace.get_pad('sink'))
         self.video = gst.element_factory_make('fakesink')
         self.video.props.signal_handoffs = True
         self.pipeline.add(self.video)
         self.video.set_state(gst.STATE_PLAYING)
         colorspace.link(self.video)
         self.video_cb = self.video.connect('handoff',
             self.get_video_info_cb)
     elif stream_type.startswith('audio'):
         self.have_audio = True
         self.audio = gst.element_factory_make('fakesink')
         self.audio.props.signal_handoffs = True
         self.pipeline.add(self.audio)
         self.audio.set_state(gst.STATE_PLAYING)
         pad.link(self.audio.get_pad('sink'))
         self.audio_cb = self.audio.connect('handoff',
             self.get_audio_info_cb)
Beispiel #28
0
    def link_audio_sink(self, pad):
        "Link the audio sink to the pad"
        print >>sys.stderr, "LINKING AUDIO SINK"
        if not self.adder:
            audiosink = gst.element_factory_make("alsasink")
            audiosink.set_property("buffer-time", 50000)
            self.pipeline.add(audiosink)

            try:
                self.adder = gst.element_factory_make("liveadder")
            except gst.ElementNotFoundError:
                audiosink.set_state(gst.STATE_PLAYING)
                pad.link(audiosink.get_pad("sink"))
                return
            self.pipeline.add(self.adder)
            audiosink.set_state(gst.STATE_PLAYING)
            self.adder.link(audiosink)
            self.adder.set_state(gst.STATE_PLAYING)
        convert1 = gst.element_factory_make("audioconvert")
        self.pipeline.add(convert1)
        resample = gst.element_factory_make("audioresample")
        self.pipeline.add(resample)
        convert2 = gst.element_factory_make("audioconvert")
        self.pipeline.add(convert2)
        convert1.link(resample)
        resample.link(convert2)
        convert2.link(self.adder)
        pad.link(convert1.get_pad("sink"))
        convert2.set_state(gst.STATE_PLAYING)
        resample.set_state(gst.STATE_PLAYING)
        convert1.set_state(gst.STATE_PLAYING)
    def __init__(self):

        gst.Pipeline.__init__(self)

        self.set_name('jamedia_video_pipeline')

        convert = gst.element_factory_make('ffmpegcolorspace', 'convert')
        rate = gst.element_factory_make('videorate', 'rate')
        pantalla = gst.element_factory_make('xvimagesink', "pantalla")
        pantalla.set_property("force-aspect-ratio", True)

        try:  # FIXME: xo no posee esta propiedad
            rate.set_property('max-rate', 30)
        except:
            pass

        self.add(convert)
        self.add(rate)
        self.add(pantalla)

        convert.link(rate)
        rate.link(pantalla)

        self.ghost_pad = gst.GhostPad("sink", convert.get_static_pad("sink"))
        self.ghost_pad.set_target(convert.get_static_pad("sink"))
        self.add_pad(self.ghost_pad)
Beispiel #30
0
    def __init__(self, uri, process = None, hopsize = 512,
            caps = None):
        if uri.startswith('/'):
            from urllib import quote
            uri = 'file://'+quote(uri)
        src = gst.element_factory_make('uridecodebin')
        src.set_property('uri', uri)
        src.connect('pad-added', self.source_pad_added_cb)
        conv = gst.element_factory_make('audioconvert')
        self.conv = conv
        rsmpl = gst.element_factory_make('audioresample')
        capsfilter = gst.element_factory_make('capsfilter')
        if caps:
            capsfilter.set_property('caps', gst.caps_from_string(caps))
        sink = AubioSink("AubioSink", process = process)
        sink.set_property('hopsize', hopsize) # * calcsize('f'))

        self.pipeline = gst.Pipeline()

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self.on_eos)

        self.apad = conv.get_pad('sink')

        self.pipeline.add(src, conv, rsmpl, capsfilter, sink)

        gst.element_link_many(conv, rsmpl, capsfilter, sink)

        self.mainloop = gobject.MainLoop()
        self.pipeline.set_state(gst.STATE_PLAYING)
Beispiel #31
0
    def _set_video_source(self):
        video_src = gst.element_factory_make(self.video_source, 'video_src')
        if (self.video_source_type.startswith('usb')):
            # not sure about device format on windows. for now lets just use the default
            if os.name == 'posix': # only set device for linux systems.
                video_src.set_property('device', self.video_device)
            

            
        video_rate = gst.element_factory_make('videorate', 'video_rate')
        video_rate_cap = gst.element_factory_make('capsfilter',
                                                    'video_rate_cap')
        video_rate_cap.set_property('caps',
                        gst.caps_from_string('video/x-raw-rgb, framerate=10/1'))
        video_scale = gst.element_factory_make('videoscale', 'video_scale')
        video_scale_cap = gst.element_factory_make('capsfilter',
                                                    'video_scale_cap')
        video_cspace = gst.element_factory_make('ffmpegcolorspace',
                                                    'video_cspace')
        self.video_tee = gst.element_factory_make('tee', 'video_tee')

        if self.recording_width != '0':
            self.core.logger.log.debug('Recording will be scaled to %sx%s'
                % (self.recording_width, self.recording_height))
            video_scale_cap.set_property('caps',
                gst.caps_from_string('video/x-raw-rgb, width=%s, height=%s'
                % (self.recording_width, self.recording_height)))

        self.player.add(video_src,
                        video_rate,
                        video_rate_cap,
                        video_scale,
                        video_scale_cap,
                        video_cspace,
                        self.video_tee)
        
        if ( self.icecast ):
            # Add a "tee" component so that the icecast components can be built at the end
            self.src_tee = gst.element_factory_make('tee', 'src_tee')
            self.player.add( self.src_tee )
            video_src.link( self.src_tee )


        if (self.video_source_type == 'firewire'):
            self.dv1394q1 =  gst.element_factory_make('queue', 'dv1394q1')
            self.dv1394q2 =  gst.element_factory_make('queue', 'dv1394q2')
            self.dv1394dvdemux =  gst.element_factory_make('dvdemux',
                                                           'dv1394dvdemux')
            self.dv1394dvdec =  gst.element_factory_make('dvdec', 'dv1394dvdec')
            
            self.player.add(self.dv1394q1,
                            self.dv1394q2,
                            self.dv1394dvdemux,
                            self.dv1394dvdec)
            
            if ( self.icecast ):
                # The "src_tee" was added so link from it
                self.src_tee.link(self.dv1394dvdemux)
            else:                
                video_src.link(self.dv1394dvdemux)
            
                self.dv1394dvdemux.connect('pad-added', self._dvdemux_padded)
                gst.element_link_many(self.dv1394q1, self.dv1394dvdec, video_rate)
        else:
            if ( self.icecast ):
                # The "src_tee" was added so link from it
                self.src_tee.link(video_rate)
            else:
                video_src.link(video_rate)


        gst.element_link_many(video_rate,
                              video_rate_cap,
                              video_scale,
                              video_scale_cap,
                              video_cspace,
                              self.video_tee)
if __name__ == "__main__":
    import sys
    import gobject
    gobject.threads_init()

    if len(sys.argv) != 4:
        print "Usage: %s <ip_address> <user> <pass>" % sys.argv[0]
        sys.exit(-1)

    pipeline = gst.Pipeline("pipe")

    gobject.type_register(KaicongAudioSource)
    gst.element_register(KaicongAudioSource, 'kaicongaudiosrc',
                         gst.RANK_MARGINAL)

    src = gst.element_factory_make("kaicongaudiosrc", "audiosrc")
    src.set_property("ip", sys.argv[1])
    src.set_property("user", sys.argv[2])
    src.set_property("pwd", sys.argv[3])
    src.set_property("on", True)
    conv = gst.element_factory_make("audioconvert", "audioconv")
    amp = gst.element_factory_make("audioamplify", "audioamp")
    amp.set_property("amplification", 20)
    res = gst.element_factory_make("audioresample", "audioresamp")
    sink = gst.element_factory_make("autoaudiosink", "audiosink")

    pipeline.add(src, conv, amp, res, sink)
    gst.element_link_many(src, conv, amp, res, sink)
    pipeline.set_state(gst.STATE_PLAYING)

    main_loop = gobject.MainLoop()
Beispiel #33
0
    def get_output_bin(self, audio=True, video=True, metadata=None):
        bin = gst.Bin()

        if metadata is not None:
            self.set_metadata(metadata)

        # Muxer
        muxer = gst.element_factory_make("flvmux", "muxer")

        # Setup metadata
        # set tag merge mode to GST_TAG_MERGE_REPLACE
        merge_mode = gst.TagMergeMode.__enum_values__[2]

        if metadata is not None:
            # Only set tag if metadata is set
            muxer.merge_tags(self.tags, merge_mode)
        muxer.set_tag_merge_mode(merge_mode)

        bin.add(muxer)

        # RTMP sink
        rtmpsink = gst.element_factory_make('rtmpsink', 'rtmpsink')
        rtmpsink.set_property('location', self.config.url)
        bin.add(rtmpsink)

        #
        # Setup Audio Pipeline if Audio Recording is Enabled
        #
        if audio:
            audioqueue = gst.element_factory_make("queue", "audioqueue")
            bin.add(audioqueue)

            audioconvert = gst.element_factory_make("audioconvert",
                                                    "audioconvert")
            bin.add(audioconvert)

            audiolevel = gst.element_factory_make('level', 'audiolevel')
            audiolevel.set_property('interval', 20000000)
            bin.add(audiolevel)

            audiocodec = gst.element_factory_make(self.config.audio_codec,
                                                  "audiocodec")

            if 'quality' in audiocodec.get_property_names():
                audiocodec.set_property("quality", self.config.audio_quality)
            else:
                log.debug(
                    "WARNING: Missing property: 'quality' on audiocodec; available: "
                    + ','.join(audiocodec.get_property_names()))
            bin.add(audiocodec)

            # Setup ghost pads
            audiopad = audioqueue.get_pad("sink")
            audio_ghostpad = gst.GhostPad("audiosink", audiopad)
            bin.add_pad(audio_ghostpad)

            # Link Elements
            audioqueue.link(audioconvert)
            audioconvert.link(audiolevel)
            audiolevel.link(audiocodec)
            audiocodec.link(muxer)

        #
        # Setup Video Pipeline
        #
        if video:
            videoqueue = gst.element_factory_make("queue", "videoqueue")
            bin.add(videoqueue)

            videocodec = gst.element_factory_make("x264enc", "videocodec")
            videocodec.set_property("bitrate", self.config.video_bitrate)
            if self.config.video_tune != 'none':
                videocodec.set_property('tune', self.config.video_tune)
            bin.add(videocodec)

            # Setup ghost pads
            videopad = videoqueue.get_pad("sink")
            video_ghostpad = gst.GhostPad("videosink", videopad)
            bin.add_pad(video_ghostpad)

            # Link Elements
            videoqueue.link(videocodec)
            videocodec.link(muxer)

        #
        # Link muxer to rtmpsink
        #
        muxer.link(rtmpsink)

        if self.config.streaming_destination == STREAMING_DESTINATION_VALUES[
                1] and self.config.use_justin_api == 'yes':
            self.justin_api.set_channel_status(self.get_talk_status(metadata),
                                               self.get_description(metadata))

        return bin
Beispiel #34
0
    def _set_icecast_streaming(self):
        '''
        Sets up the icecast stream pipeline.
        '''
        icecast = gst.element_factory_make('shout2send', 'icecast')
        icecast.set_property('ip', self.icecast_ip)
        icecast.set_property('port', self.icecast_port)
        icecast.set_property('password', self.icecast_password)
        icecast.set_property('mount', self.icecast_mount)

        # Need to add "ffmpegcolorspace" to the player again, after "src_tee"
        icecast_colorspace = gst.element_factory_make('ffmpegcolorspace', 'icecast_colorspace')

        icecast_queue = gst.element_factory_make('queue', 'icecast_queue')
        icecast_scale = gst.element_factory_make('videoscale', 'icecast_scale')
        icecast_scale_cap = gst.element_factory_make('capsfilter', 'icecast_scale_cap')
        #icecast_gst_caps = gst.Caps('video/x-raw-yuv,width=320,height=240')
        #icecast_scale_cap.set_property('caps', icecast_gst_caps)

        #icecast_video_codec = gst.element_factory_make(self.icecast_video_codec, 'icecast_video_codec')
        #icecast_video_codec.set_property('quality',16)

        icecast_gst_caps = gst.Caps('video/x-raw-yuv,width=' + str(self.icecast_width) + ',height=' + str(self.icecast_height))
        icecast_scale_cap.set_property('caps', icecast_gst_caps)

        icecast_video_codec = gst.element_factory_make(self.icecast_video_codec, 'icecast_video_codec')
        icecast_video_codec.set_property('bitrate',self.icecast_vidbitrate)

        icecast_muxer = gst.element_factory_make(self.icecast_muxer, 'icecast_muxer')

        icecast_audio_src = gst.element_factory_make(self.icecast_audio_src,'icecast_audio_src')
        icecast_queue2 = gst.element_factory_make('queue','icecast_queue2')
        icecast_audioconvert = gst.element_factory_make('audioconvert','icecast_audioconvert')
        icecast_audio_codec = gst.element_factory_make(self.icecast_audio_codec,'icecast_audio_codec')
        icecast_audio_codec.set_property('quality',0.2)
        icecast_queue3 = gst.element_factory_make('queue','icecast_queue3')
        icecast_queue4 = gst.element_factory_make('queue','icecast_queue4')

        self.player.add(icecast,
                        icecast_queue,
                        icecast_queue2,
                        icecast_queue3,
                        icecast_queue4,
                        icecast_colorspace,
                        icecast_video_codec,
                        icecast_muxer,
                        icecast_audio_src,
                        icecast_audioconvert,
                        icecast_audio_codec,
                        icecast_scale,
                        icecast_scale_cap)

        gst.element_link_many(self.src_tee,
                              icecast_queue,
                              icecast_colorspace,
                              icecast_scale,
                              icecast_scale_cap,
                              icecast_video_codec,
                              icecast_muxer)

        gst.element_link_many(icecast_audio_src,
                              icecast_queue2,
                              icecast_audioconvert,
                              icecast_audio_codec,
                              icecast_queue3,
                              icecast_muxer,
                              icecast_queue4,
                              icecast)
Beispiel #35
0
 def _set_audio_feedback(self):
     afqueue = gst.element_factory_make('queue', 'afqueue')
     afsink = gst.element_factory_make('autoaudiosink', 'afsink')
     self.player.add(afqueue, afsink)
     gst.element_link_many(self.audio_tee, afqueue, afsink)
Beispiel #36
0
 def _set_audio_source(self):
     audio_src = gst.element_factory_make(self.audio_source, 'audio_src')
     self.audio_tee = gst.element_factory_make('tee', 'audio_tee')
     self.player.add(audio_src, self.audio_tee)
     audio_src.link(self.audio_tee)
Beispiel #37
0
    def _set_video_feedback(self):
        vpqueue = gst.element_factory_make('queue', 'vpqueue')
        vpsink = gst.element_factory_make('autovideosink', 'vpsink')

        self.player.add(vpqueue, vpsink)
        gst.element_link_many(self.video_tee, vpqueue, vpsink)
Beispiel #38
0
import time
import sys


def on_message(self, bus, message):
    pass


def on_about_to_finish(playbin):
    playbin.set_property(
        'uri',
        'file:///home/leon/Workspaces/py-icecream/samples/gapless/ogg/07. Beethoven.ogg'
    )


playbin = gst.element_factory_make('playbin2')
fakesink = gst.element_factory_make('fakesink')

playbin.set_property('video-sink', fakesink)

playbin.set_property(
    'uri',
    'file:///home/leon/Workspaces/py-icecream/samples/gapless/ogg/06. Beethoven.ogg'
)

bus = playbin.get_bus()
bus.add_signal_watch()
bus.connect("message", on_message)
playbin.connect('about-to-finish', on_about_to_finish)

for i in playbin.elements():
Beispiel #39
0
            self.offset = value
        else:
            logger.error("No property %s" % key.name)


gst.element_register(SliceBuffer, 'slicebuffer')

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG)
    mainloop = gobject.MainLoop()

    files = [a for a in sys.argv[1:] if not '=' in a]
    params = [a for a in sys.argv[1:] if '=' in a]

    if files:
        player = gst.element_factory_make('playbin')
        player.props.uri = 'file://' + files[0]

        bin = gst.Bin()
        elements = [
            gst.element_factory_make('ffmpegcolorspace'),
            gst.element_factory_make('videoscale'),
            gst.element_factory_make('slicebuffer', 'slicer'),
            gst.element_factory_make('capsfilter', 'capsfilter'),
            gst.element_factory_make('ffmpegcolorspace'),
            gst.element_factory_make('xvimagesink'),
        ]
        bin.add(*elements)
        gst.element_link_many(*elements)
        bin.add_pad(
            gst.GhostPad(
Beispiel #40
0
#create the logger
logger = logging.getLogger('Pilot Client log')  #__name__
logging.info('\n\nPilot client has just connected...\n')

#add log handler so it can have max of 32mb file
handler = logging.handlers.RotatingFileHandler('AudioStreamerLogging.log',
                                               maxBytes=2 ^ 15,
                                               backupCount=0)
logger.addHandler(handler)

# create the pipeline and add client [ filesrc ! tcpclientsink ]
pipeline = gst.Pipeline("client")

#player = gst.element_factory_make("playbin", "player")
src = gst.element_factory_make("filesrc", "source")
logger.debug('Debug msg: %s' % src + ' created')

files = []
poppedfiles = []
path = os.path.join("/opt/crat/core/home/sounds/PILOTS/")


def getFiles(path):
    if os.path.isdir(path) == False:
        print "\nThe required folder %s" % path + " doesn't exist\n" + "Please extract all PILOTS songs or contact the admin\n"
        exit(-1)
    #make it even more randomly :D
    random.seed()
    for f in sorted(os.listdir(path), key=lambda k: random.random()):
        f = os.path.join(path, f)
Beispiel #41
0
	def __init__( self, filename, timeout=3000, max_interleave=1.0 ):
		self.is_video = False
		self.is_audio = False
		self.finished = False

		self._success = False
		self._nomorepads = False

		self._timeoutid = 0
		self._timeout = timeout
		self._max_interleave = max_interleave

		if not os.path.isfile(filename):
			self.debug("File '%s' does not exist, finished" % filename)
			self.finished = True
			return
		
		# the initial elements of the pipeline
		self.src = gst.element_factory_make("filesrc")
		self.src.set_property("location", filename)
		self.src.set_property("blocksize", 1000000)
		self.dbin = gst.element_factory_make("decodebin")
		self.add(self.src, self.dbin)
		self.src.link(self.dbin)
		self.typefind = self.dbin.get_by_name("typefind")

		def _timed_out_or_eos(self):
			if (not self.is_audio and not self.is_video) or \
				(self.is_audio and not self.audiocaps) or \
				(self.is_video and not self.videocaps):
				self._finished(False)
			else:
				self._finished(True)

		def _finished(self, success=False):
			self.debug("success:%d" % success)
			self._success = success
			self.bus.remove_signal_watch()
			if self._timeoutid:
				gobject.source_remove(self._timeoutid)
				self._timeoutid = 0
			gobject.idle_add(self._stop)
			return False

		def _stop(self):
			self.debug("success:%d" % self._success)
			self.finished = True
			self.set_state(gst.STATE_READY)
			self.debug("about to emit signal")
			self.emit('discovered', self._success)

		def discover(self):
			"""Find the information on the given file asynchronously"""
			self.debug("starting discovery")
			if self.finished:
				self.emit('discovered', False)
				return

			self.bus = self.get_bus()
			self.bus.add_signal_watch()
			self.bus.connect("message", self._bus_message_cb)

			# 3s timeout
			self._timeoutid = gobject.timeout_add(self._timeout, self._timed_out_or_eos)

			self.info("setting to PLAY")
			if not self.set_state(gst.STATE_PLAYING):
				self._finished()
Beispiel #42
0
def fetchDevices(mid, factories, parameter):
    """
    Fetches the available devices on the system according to the specified
    factories. If the first factory succeeds the other are ignored.

    The result is either:
     - succesful, with a list of tuples with guid and device-name
     - succesful, with an error
     - failed

    @param mid: the id to set on the message.
    @param factories: The gstreamer elements to check
    @type  factories: L{str}
    @param parameter: The parameter that specifies the device
    @type  parameter: str

    @rtype: L{twisted.internet.defer.Deferred} of
            L{flumotion.common.messages.Result}
    """
    result = messages.Result()

    factory = factories.pop()

    try:
        element = gst.element_factory_make(factory)
    except gst.ElementNotFoundError:
        element = None

    if not element:
        log.debug("device-check", "Could not instantiate the %s factory.",
                  factory)
        if not factories:
            log.debug("device-check", "No more factories were specified.")
            m = messages.Error(T_(
                N_("GStreamer error, %s factory could not be found.\n"
                   "Maybe the plugin is not properly installed.")),
                               mid=mid)
            result.add(m)

            return defer.succeed(result)
        else:
            return fetchDevices(mid, factories, parameter)

    element.probe_property_name(parameter)
    values = element.probe_get_values_name(parameter)

    pipeline_str = "%s name=source %s" % (factory, parameter)
    pipeline_str += "=%s ! fakesink"

    devices = []

    for value in values:
        pipeline = gst.parse_launch(pipeline_str % value)
        pipeline.set_state(gst.STATE_READY)
        source = pipeline.get_by_name("source")
        name = source.get_property("device-name")
        log.debug("device-check", "New device found: %s with values=%s", name,
                  value)
        devices.append((name, value))
        pipeline.set_state(gst.STATE_NULL)

    if devices:
        result.succeed(devices)
        return defer.succeed(result)
    else:
        log.debug("device-check", "No devices were found using %s factory.",
                  factory)
        if factories:
            return fetchDevices(mid, factories, parameter)
        else:

            m = messages.Error(T_(N_("No devices were found for %s."),
                                  factory),
                               mid=mid)
            result.add(m)
            return defer.succeed(result)
Beispiel #43
0
def create_decodebin():
    try:
        return gst.element_factory_make("decodebin2")
    except:
        return gst.element_factory_make("decodebin")
Beispiel #44
0
    def execute(self, **kwargs):
        """Speak the provided text through the computer's speakers.

    Keyword arguments:
    text -- the text to speak

    """
        if "text" not in kwargs:
            return ''
        phrase = str(kwargs["text"])

        names = {"callie": "6.5", "lawrence": "8.5"}
        name = "callie"

        #TODO find a better way of implementing TTS
        ttsfd, ttsfile = tempfile.mkstemp(".wav")
        outfile, outname = tempfile.mkstemp(".wav")
        try:

            tts = sp.Popen(
                ['/opt/swift/bin/swift', '-o', ttsfile, '-n', name, phrase],
                stdout=sp.PIPE,
                stderr=sp.PIPE)
            #      cmd = ('/opt/swift/bin/swift "' + phrase + '" -o ' + ttsname + ' && sox -V1 ' +
            #             tmp + ' -t wav ' + tmp2 + ' trim 8 ;')
            #      p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE, shell=True)
            #      out, err = p.communicate()
            #      if len(err) > 0:
            #        return err

            out, err = tts.communicate()
            if not err:
                sox = sp.Popen([
                    'sox', '-V1', ttsfile, '-t', 'wav', outname, 'trim',
                    names[name]
                ],
                               stdout=sp.PIPE,
                               stderr=sp.PIPE)
                out, err = sox.communicate()

            player = gst.element_factory_make("playbin2", "player")
            bus = player.get_bus()
            bus.add_signal_watch()

            mainloop = gobject.MainLoop()

            def quit(bus, message):
                mainloop.quit()

            bus.connect("message::eos", quit)
            bus.connect("message::error", quit)
            player.set_property("uri", 'file://' + outname)
            player.set_state(gst.STATE_PLAYING)

            try:
                mainloop.run()
            finally:
                player.set_state(gst.STATE_NULL)

        finally:
            try:
                os.remove(ttsfile)
            except OSError as err:
                print e
            try:
                os.remove(outname)
            except IOError as err:
                print err
Beispiel #45
0
 def _makeGnlObject(self):
     effect = gst.element_factory_make(
         'gnloperation', "gnloperation: " +
         self.factory.__class__.__name__ + str(TrackEffect.numobjs))
     TrackEffect.numobjs += 1
     return effect
Beispiel #46
0
        self.pipe.set_state(gst.STATE_PLAYING)

    def asr_partial_result(self, asr, text, uttid):
        """ This function is called when pocketsphinx gets a partial
        transcription of spoken audio. 
    """
        self.partial_cb(self.name, uttid, text)

    def asr_result(self, asr, text, uttid):
        """ This function is called when pocketsphinx gets a 
        full result (spoken command with a pause)
    """
        self.final_cb(self.name, uttid, text)


if __name__ == "__main__":
    import threading
    import gobject
    gobject.threads_init()

    def print_cb(name, uttid, text):
        print "(%s) %s: %s" % (uttid, name, text)

    src = gst.element_factory_make("autoaudiosrc")  # Grab a random source
    parser = SpeechParser("parser", src, print_cb, print_cb)

    # This loops the program until Ctrl+C is pressed
    g_loop = threading.Thread(target=gobject.MainLoop().run)
    g_loop.daemon = False
    g_loop.start()
Beispiel #47
0
 def build_pipeline(self):
     self.player = gst.element_factory_make("playbin", "player")
     self.bus = self.player.get_bus()
     self.player.set_state(gst.STATE_READY)
Beispiel #48
0
    def play(self):

        self.emit("preplay")

        self.player = gst.Pipeline("player")

        self.queue_video = gst.element_factory_make("queue", "queue_video")
        self.player.add(self.queue_video)

        self.input_type = 0

        # Source selection

        self.source_pads = {}
        self.audio_pads = {}
        self.pip_pads = {}

        self.output_bins = {}
        type = 0
        source_number = 0
        pip_number = 0

        self.pip = PictureInPicture()

        self.player.add(self.pip)

        for row in self.sources.get_store():
            (name, source) = row
            element = source.create()
            self.player.add(element)

            if element.does_audio():
                if not self.input_type & MEDIA_AUDIO:

                    # The pipeline has audio sources, and this is the first
                    # audio source we add

                    if self.audio_source is None:
                        self.emit("error", "You need to select an audio source")
                        self.emit("stopped")
                        return
                    self.input_type |= MEDIA_AUDIO
                    self.input_selector = gst.element_factory_make(
                            "input-selector", "audio-selector"
                    )
                    self.player.add(self.input_selector)

                audiobin = audioinputbin.AudioInputBin(source)
                self.player.add(audiobin)

                element.audio_pad.link(audiobin.get_static_pad("sink"))
                self.audio_pads[name] = \
                        self.input_selector.get_request_pad("sink%d")
                audiobin.src_pad.link(self.audio_pads[name])

            if element.does_video():
                self.input_type |= MEDIA_VIDEO

                self.source_pads[name] = source_number
                source_number = source_number + 1

                # Thumbnail preview

                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)
                element.video_pad.link(tee.sink_pads().next())

                thumbnail_queue = gst.element_factory_make("queue", None)
                self.player.add(thumbnail_queue)
                self.thumbnails[name] = Preview(self)
                self.player.add(self.thumbnails[name])

                thumbnail_err = gst.element_link_many(
                    tee, thumbnail_queue, self.thumbnails[name]
                )
                if thumbnail_err == False:
                    self.emit("error", "Error conecting thumbnail preview.")

                # Picture in Picture

                self.pip_pads[name] = pip_number
                pip_number = pip_number + 1

                main_queue = gst.element_factory_make("queue", None)
                self.player.add(main_queue)
                pip_queue = gst.element_factory_make("queue", None)
                self.player.add(pip_queue)

                tee.link(main_queue)
                tee.link(pip_queue)
                main_queue.src_pads().next().link(self.pip.get_request_pad_A())
                pip_queue.src_pads().next().link(self.pip.get_request_pad_B())

            if name == self.video_source:
                type |= element.get_type()
            if name == self.audio_source:
                type |= element.get_type()

        self.watermark = gst.element_factory_make(
                "cairoimageoverlay", "cairoimageoverlay"
        )
        self.player.add(self.watermark)

        self.colorspace = gst.element_factory_make(
                "ffmpegcolorspace", "colorspace-imageoverlay-videobalance"
        )
        self.player.add(self.colorspace)

        self.videobalance = gst.element_factory_make(
                "videobalance", "videobalance"
        )
        self.player.add(self.videobalance)
        if self.videobalance_contrast:
            self.videobalance.set_property(
                    "contrast", self.videobalance_contrast
            )
        if self.videobalance_brightness:
            self.videobalance.set_property(
                    "brightness", self.videobalance_brightness
            )
        if self.videobalance_hue:
            self.videobalance.set_property(
                    "hue", self.videobalance_hue
            )
        if self.videobalance_saturation:
            self.videobalance.set_property(
                    "saturation", self.videobalance_saturation
            )

        gst.element_link_many(
                self.pip, self.watermark, self.colorspace, self.videobalance,
                self.queue_video
        )

        self._switch_source()
        self._switch_pip()

        if self.pip_position:
            self.pip.set_property("position", self.pip_position)

        self.effect[MEDIA_VIDEO] = effect.video_effect.VideoEffect(
                self.effect_name[MEDIA_VIDEO]
        )
        self.player.add(self.effect[MEDIA_VIDEO])

        self.overlay = gst.element_factory_make("textoverlay", "overlay")
        self.overlay.set_property("font-desc", self.overlay_font)
        self.overlay.set_property("halign", self.halign)
        self.overlay.set_property("valign", self.valign)
        self.player.add(self.overlay)

        gst.element_link_many(
                self.queue_video, self.effect[MEDIA_VIDEO], self.overlay
        )

        self.preview_tee = multeequeue.MulTeeQueue()
        self.player.add(self.preview_tee)

        self.overlay.link(self.preview_tee)

        if self.input_type & MEDIA_AUDIO:
            self.convert = gst.element_factory_make("audioconvert", "convert")
            self.player.add(self.convert)

            self.effect[MEDIA_AUDIO] = effect.audio_effect.AudioEffect(
                    self.effect_name[MEDIA_AUDIO]
            )
            self.player.add(self.effect[MEDIA_AUDIO])

            self.audio_tee = gst.element_factory_make("tee", "audio_tee")
            self.player.add(self.audio_tee)

            self.volume = volume.Volume()
            self.player.add(self.volume)

            gst.element_link_many(
                    self.input_selector, self.volume,
                    self.effect[MEDIA_AUDIO], self.convert, self.audio_tee
            )
            self.input_selector.set_property(
                    "active-pad", self.audio_pads[self.audio_source]
            )
        added_encoders = {}

        pip_width = 0
        pip_height = 0

        for row in self.outputs.get_store():
            (name, output) = row

            output_bin = outputbin.OutputBin(output)
            self.output_bins[name] = output_bin
            self.player.add(output_bin)

            encoder_name = output.get_config()["parent"]

            encoder_item = self.encoders.get_item(encoder_name)
            if encoder_item is None:
                self.emit("error", "Please, add an encoder.")
                break

            if added_encoders.has_key(encoder_name):
                tee = added_encoders[encoder_name]

                tee.link(output_bin)
            else:
                tee = gst.element_factory_make("tee", None)
                self.player.add(tee)

                converter_item = encoder_item.parent
                converter = converter_item.create()
                if converter_item.config["width"] > pip_width:
                    pip_width = converter_item.config["width"]
                if converter_item.config["height"] > pip_height:
                    pip_height = converter_item.config["height"]
                self.player.add(converter)

                encoder = encoder_item.factory.create(type)
                if encoder.vorbisenc:
                    self.metadata = metadata.Metadata(encoder.vorbisenc)
                    self.metadata.set_tags(self.taglist)
                encoder.config(encoder_item.config)
                self.player.add(encoder)

                added_encoders[encoder_name] = tee
                self.preview_tee.get_src_pad().link(
                        converter.sink_pads().next()
                )
                gst.element_link_many(
                        converter, encoder, tee, output_bin
                )

                if self.input_type & MEDIA_AUDIO:
                    audio_queue = gst.element_factory_make("queue", None)
                    self.player.add(audio_queue)

                    gst.element_link_many(self.audio_tee, audio_queue, encoder)

        self.preview = Preview(self)
        self.player.add(self.preview)
        self.preview_tee.get_src_pad().link(self.preview.sink_pads().next())

        if pip_width == 0:
            pip_width = 320
            pip_height = 240
        self.pip.set_property("width", int(pip_width))
        self.pip.set_property("height", int(pip_height))

        self.video_width = int(pip_width)
        self.video_height = int(pip_height)
        self._set_watermark(self.video_width, self.video_height)

        self.overlay.set_property("text", self.overlay_text)
        if self.volume_value is not None:
            self.volume.set_property("volume", self.volume_value)

        self.emit("pipeline-ready")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
        cr = self.player.set_state(gst.STATE_PLAYING)
        if cr == gst.STATE_CHANGE_SUCCESS:
            self.emit("playing")
        elif cr == gst.STATE_CHANGE_ASYNC:
            self.pending_state = gst.STATE_PLAYING
Beispiel #49
0
    def build_elements(self):
        self.player = gst.Pipeline()
        self.filesrc = gst.element_factory_make("gnomevfssrc", "filesrc")
        self.typefinder = gst.element_factory_make("typefind", "typefinder")
        self.typefinder.connect("have-type", self.__cb_typefound)
        self.player.add(self.filesrc, self.typefinder)

        gst.element_link_many(self.filesrc, self.typefinder)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_message)

        self.mp3audiosink = gst.element_factory_make("dspmp3sink")
        self.mp3audioqueue = gst.element_factory_make("queue")
        self.mp3audiobin = gst.Bin('mp3bin')
        self.mp3audiobin.add(self.mp3audioqueue, self.mp3audiosink)

        self.videosink = gst.element_factory_make("xvimagesink", 'videosink')

        self.hantrovideodec = gst.element_factory_make("hantro4100dec")
        self.hantrovideoqueue = gst.element_factory_make("queue")
        self.hantrovideobin = gst.Bin('hantrobin')
        self.hantrovideobin.add(self.hantrovideoqueue, self.hantrovideodec)

        self.mpegvideodec = gst.element_factory_make("ffdec_mpegvideo")
        self.mpegvideoqueue = gst.element_factory_make("queue")
        self.mpegvideobin = gst.Bin('mpegbin')
        self.mpegvideobin.add(self.mpegvideoqueue, self.mpegvideodec)

        self.aacaudiosink = gst.element_factory_make("dspaacsink")
        self.aacaudioqueue = gst.element_factory_make("queue")
        self.aacaudiobin = gst.Bin('aacbin')
        self.aacaudiobin.add(self.aacaudioqueue, self.aacaudiosink)

        gst.element_link_many(self.hantrovideoqueue, self.hantrovideodec)
        gst.element_link_many(self.mp3audioqueue, self.mp3audiosink)
        gst.element_link_many(self.mpegvideoqueue, self.mpegvideodec)
        gst.element_link_many(self.aacaudioqueue, self.aacaudiosink)

        self.hantrovideobin.add_pad(
            gst.GhostPad('sink', self.hantrovideoqueue.get_pad('sink')))
        self.hantrovideobin.add_pad(
            gst.GhostPad('src', self.hantrovideodec.get_pad('src')))

        self.mp3audiobin.add_pad(
            gst.GhostPad('sink', self.mp3audioqueue.get_pad('sink')))

        self.mpegvideobin.add_pad(
            gst.GhostPad('sink', self.mpegvideoqueue.get_pad('sink')))
        self.mpegvideobin.add_pad(
            gst.GhostPad('src', self.mpegvideodec.get_pad('src')))

        self.aacaudiobin.add_pad(
            gst.GhostPad('sink', self.aacaudioqueue.get_pad('sink')))
Beispiel #50
0
 def _makeGnlObject(self):
     source = gst.element_factory_make(
         'gnlsource', "gnlsource: " + self.factory.__class__.__name__ +
         str(SourceTrackObject.numobjs))
     SourceTrackObject.numobjs += 1
     return source
def extractAudioTrackFile (device, track_number, filename, extra = None):
	sink = gst.element_factory_make ("filesink")
	sink.set_property ("location", filename)
	
	return extractAudioTrack (device, track_number, sink, extra)
Beispiel #52
0
    def __cb_typefound(self, element, prob, caps):
        if str(caps).find("audio/mpeg") is not -1:
            if self.mp3audiobin.get_parent() is None:
                self.player.add(self.mp3audiobin)
                gst.element_link_many(self.typefinder, self.mp3audiobin)
                self.mp3audiobin.set_state(gst.STATE_PLAYING)
            else:
                self.mp3audiobin.set_state(gst.STATE_READY)
                self.mp3audiobin.set_state(gst.STATE_PLAYING)

        elif str(caps).find("application/x-id3") is not -1:
            if self.mp3audiobin.get_parent() is None:
                #                id3lib = gst.element_factory_make("id3demux")
                #                self.player.add(id3lib)
                self.player.add(self.mp3audiobin)
                gst.element_link_many(self.typefinder, id3lib)
                self.player.add(self.mp3audiobin)
                id3lib.connect('pad-added', self.on_pad_added_mp3)

            else:
                self.mp3audiobin.set_state(gst.STATE_READY)
                self.mp3audiobin.set_state(gst.STATE_PLAYING)

        elif str(caps).find("video/x-msvideo") is not -1:
            # Create elements
            if self.hantrovideobin.get_parent() is None:
                demux = gst.element_factory_make("avidemux")
                self.player.add(demux)
                # Link source and demux elements
                gst.element_link_many(self.typefinder, demux)
                # Connect handler for 'pad-added' signal
                demux.connect('pad-added', self.on_pad_added)
                self.player.add(self.hantrovideobin, self.videosink)
                gst.element_link_many(self.hantrovideobin, self.videosink)

                demux.set_state(gst.STATE_PLAYING)

        elif str(caps).find("video/mpeg") is not -1:
            # Create elements
            if self.mpegvideobin.get_parent() is None:
                demux = gst.element_factory_make("mpegdemux")
                self.player.add(demux)
                # Link source and demux elements
                gst.element_link_many(self.typefinder, demux)
                # Connect handler for 'pad-added' signal
                demux.connect('pad-added', self.on_pad_added)
                self.player.add(self.mpegvideobin, self.videosink)
                gst.element_link_many(self.mpegvideobin, self.videosink)

                demux.set_state(gst.STATE_PLAYING)

        elif caps == "adts_mpeg_stream" or caps == "audio/x-ac3" or \
                                        caps == "audio/x-dts":
            if self.aacaudiobin.get_parent() is None:
                self.player.add(self.aacaudiobin)
                gst.element_link_many(self.typefinder, self.aacaudiobin)
                self.aacaudiobin.set_state(gst.STATE_PLAYING)
            else:
                self.aacaudiobin.set_state(gst.STATE_READY)
                self.aacaudiobin.set_state(gst.STATE_PLAYING)

        elif caps == "video/quicktime":
            pass

        else:
            log.error("format not support %s", str(caps))
            self.player.set_state(gst.STATE_NULL)
            self.__av_uri = None
Beispiel #53
0
    def __init__(self, filename):

        self.debug = True
        self.last_ocr = ''

        self.words = ''
        self.frame = 0
        self.seek_sec = 30
        # self.gocr_cmd = ['gocr', '-', '-d', '0', '-a', '95']
        self.ocr_cmd = ['tesseract', '/tmp/image.pnm', '/tmp/text']

        self.base_name = os.path.splitext(filename)[0]
        if self.debug: print(self.base_name)

        pipeline = gst.Pipeline("mypipeline")
        self.pipeline = pipeline

        # source: file
        filesrc = gst.element_factory_make("filesrc", "audio")
        self.filesrc = filesrc
        self.filesrc.set_property("location", filename)
        pipeline.add(filesrc)

        # decoder
        decode = gst.element_factory_make("decodebin", "decode")
        decode.connect("new-decoded-pad", self.OnDynamicPad)
        pipeline.add(decode)
        filesrc.link(decode)

        ffmpegcolorspace = gst.element_factory_make("ffmpegcolorspace",
                                                    "ffmpegcolorspace")
        pipeline.add(ffmpegcolorspace)
        self.ffmpegcolorspace = ffmpegcolorspace

        pnmenc = gst.element_factory_make("pnmenc", "pnmenc")
        pnmenc.set_property('ascii', True)
        pipeline.add(pnmenc)
        self.pnmenc = pnmenc
        ffmpegcolorspace.link(pnmenc)

        sink = gst.element_factory_make("fakesink", "sink")
        sink.set_property('signal-handoffs', True)
        sink.connect('handoff', one_frame, self)
        pipeline.add(sink)
        pnmenc.link(sink)

        # keep refernce to pipleline so it doesn't get destroyed
        self.pipeline = pipeline

        self.time_format = gst.Format(gst.FORMAT_TIME)

        if self.debug:
            # print the pipeline
            elements = list(pipeline.elements())
            elements.reverse()
            print("pipeline elements:", end=' ')
            for e in elements:
                print(e.get_factory().get_name(), end=' ')
            print()

        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)

        pipeline.set_state(gst.STATE_PLAYING)
Beispiel #54
0
    def get_output_bin(self, audio=True, video=True, metadata=None):
        bin = gst.Bin()

        if metadata is not None:
            self.set_metadata(metadata)

        # Muxer
        muxer = gst.element_factory_make("oggmux", "muxer")
        bin.add(muxer)

        icecast = gst.element_factory_make("shout2send", "icecast")
        icecast.set_property("ip", self.config.ip)
        icecast.set_property("port", self.config.port)
        icecast.set_property("password", self.config.password)
        icecast.set_property("mount", self.config.mount)
        bin.add(icecast)

        #
        # Setup Audio Pipeline
        #
        if audio:
            audioqueue = gst.element_factory_make("queue", "audioqueue")
            bin.add(audioqueue)

            audioconvert = gst.element_factory_make("audioconvert",
                                                    "audioconvert")
            bin.add(audioconvert)

            audiocodec = gst.element_factory_make("vorbisenc", "audiocodec")
            audiocodec.set_property("quality", self.config.audio_quality)
            bin.add(audiocodec)

            # Setup metadata
            vorbistag = gst.element_factory_make("vorbistag", "vorbistag")
            # set tag merge mode to GST_TAG_MERGE_REPLACE
            merge_mode = gst.TagMergeMode.__enum_values__[2]

            if metadata is not None:
                # Only set tag if metadata is set
                vorbistag.merge_tags(self.tags, merge_mode)
            vorbistag.set_tag_merge_mode(merge_mode)
            bin.add(vorbistag)

            # Setup ghost pads
            audiopad = audioqueue.get_pad("sink")
            audio_ghostpad = gst.GhostPad("audiosink", audiopad)
            bin.add_pad(audio_ghostpad)

            # Link elements
            audioqueue.link(audioconvert)
            audioconvert.link(audiocodec)
            audiocodec.link(vorbistag)
            vorbistag.link(muxer)

        #
        # Setup Video Pipeline
        #
        if video:
            videoqueue = gst.element_factory_make("queue", "videoqueue")
            bin.add(videoqueue)

            videocodec = gst.element_factory_make("theoraenc", "videocodec")
            videocodec.set_property("bitrate", self.config.video_bitrate)
            bin.add(videocodec)

            videopad = videoqueue.get_pad("sink")
            video_ghostpad = gst.GhostPad("videosink", videopad)
            bin.add_pad(video_ghostpad)

            videoqueue.link(videocodec)
            videocodec.link(muxer)

        #
        # Link muxer to icecast
        #
        muxer.link(icecast)

        return bin
    def __init__(self, filename=None, bands=4096):
        gst.Pipeline.__init__(self)

        # filesrc
        self.filesrc = gst.element_factory_make("filesrc")
        self.add(self.filesrc)

        # decodebin
        decodebin = gst.element_factory_make("decodebin")
        decodebin.connect("new-decoded-pad", self.on_decoded_pad)
        self.add(decodebin)
        self.filesrc.link(decodebin)

        # audioconvert
        self.convert = gst.element_factory_make("audioconvert")
        self.add(self.convert)

        # scaletempo
        scaletempo = gst.element_factory_make("scaletempo")
        self.add(scaletempo)
        self.convert.link(scaletempo)

        # spectrum
        self.spectrum = gst.element_factory_make("spectrum", "spectrum")
        self.add(self.spectrum)
        scaletempo.link(self.spectrum)

        # insert equalizer
        a = gst.element_factory_make("audiorate")
        self.add(a)
        self.spectrum.link(a)

        r = gst.element_factory_make("audioresample")
        self.add(r)
        a.link(r)

        c = gst.element_factory_make("audioconvert")
        self.add(c)
        r.link(c)

        fft = gst.element_factory_make("fft")
        self.add(fft)
        c.link(fft)

        self.eq = gst.element_factory_make("spectrum_equalizer")
        self.add(self.eq)
        fft.link(self.eq)

        ifft = gst.element_factory_make("ifft")
        self.add(ifft)
        self.eq.link(ifft)

        c = gst.element_factory_make("audioconvert")
        self.add(c)
        ifft.link(c)

        # sink
        sink = gst.element_factory_make("gconfaudiosink")
        self.add(sink)
        c.link(sink)

        # set properties
        self.set_bands(bands)
        if not filename == None: self.set_file(filename)

        bus = self.get_bus()
        bus.add_signal_watch()
Beispiel #56
0
	def __init__(self, enableVideo=False):
		self.player = gst.element_factory_make("playbin2", "player")
		fakesink = gst.element_factory_make("fakesink", "fakesink")
		if not enableVideo:
			print('No Video')
			self.player.set_property("video-sink", fakesink)
Beispiel #57
0
DEST_HOST = '127.0.0.1'

AUDIO_SRC = 'audiotestsrc'
AUDIO_ENC = 'alawenc'
AUDIO_PAY = 'rtppcmapay'

RTP_SEND_PORT = 5002
RTCP_SEND_PORT = 5003
RTCP_RECV_PORT = 5007 

# the pipeline to hold everything
pipeline = gst.Pipeline('rtp_server')

# the pipeline to hold everything
audiosrc = gst.element_factory_make(AUDIO_SRC, 'audiosrc')
audioconv = gst.element_factory_make('audioconvert', 'audioconv')
audiores = gst.element_factory_make('audioresample', 'audiores')

# the pipeline to hold everything
audioenc = gst.element_factory_make(AUDIO_ENC, 'audioenc')
audiopay = gst.element_factory_make(AUDIO_PAY, 'audiopay')

# add capture and payloading to the pipeline and link
pipeline.add(audiosrc, audioconv, audiores, audioenc, audiopay)

res = gst.element_link_many(audiosrc, audioconv, audiores, audioenc, audiopay)

# the rtpbin element
rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin')
Beispiel #58
0
 def makeAudioBin(self):
     alsa = gst.element_factory_make("alsasink")
     alsa.set_property("device", "hw:%d,%d" % (self._card, self._device))
     return alsa
Beispiel #59
0
        if t == gst.MESSAGE_EOS:
            print("End-of-stream")
            loop.quit()
        elif t == gst.MESSAGE_ERROR:
            err, debug = message.parse_error()
            print("Error: %s: %s" % (err, debug))
            loop.quit()
        return True

    if len(sys.argv) != 2:
        print "Usage: %s <URI>" % sys.argv[0]
        sys.exit(1)

    dest = sys.argv[1]

    src = gst.element_factory_make("videotestsrc")
    #	src = gst.element_factory_make("v4l2src")
    src_caps = gst.caps_from_string("video/x-raw-yuv,width=704,height=480")

    rate = gst.element_factory_make("videorate")
    rate_caps = gst.caps_from_string("video/x-raw-yuv,framerate=30000/1001")

    overlay = gst.element_factory_make("timeoverlay")
    overlay.set_property('shaded-background', True)
    overlay.set_property('halignment', 'right')
    #	overlay.set_property('valignment', 'bottom')

    encoder = gst.element_factory_make("x264enc")
    encoder.set_property('bitrate', 256)
    encoder.set_property('byte-stream', True)
Beispiel #60
0
mycname = "".join((pwd.getpwuid(os.getuid())[0],
                   "-" ,
                   str(os.getpid()),
                   "@",
                   socket.gethostname()))

gladefile = os.path.join(os.path.dirname(__file__),"rural.glade")

RUNNING_STATUS = False


def make_video_sink(pipeline, xid, name, async=True):
    "Make a bin with a video sink in it, that will be displayed on xid."
    bin = gst.Bin("videosink_%d" % xid)
    sink = gst.element_factory_make("xvimagesink", name)
    sink.set_property("sync", async)
    sink.set_property("async", async)
    bin.add(sink)
    colorspace = gst.element_factory_make("ffmpegcolorspace")
    bin.add(colorspace)
    videoscale = gst.element_factory_make("videoscale")
    bin.add(videoscale)
    videoscale.link(colorspace)
    colorspace.link(sink)
    bin.add_pad(gst.GhostPad("sink", videoscale.get_pad("sink")))
    sink.set_data("xid", xid)
    return bin

class FsUIPipeline:
    "Object to wrap the GstPipeline"