Example #1
1
    def __init__(self, width, height, device):

        self.appsink = gst.parse_launch("appsink drop=true max-buffers=1")
        cf_yuv = gst.parse_launch("capsfilter caps=\"video/x-raw-yuv,width="+str(width)+",height="+str(height)+"\"")

        cf = gst.parse_launch("capsfilter caps=\"video/x-raw-rgb,width="+str(width)+",height="+str(height)+",bpp=24,red_mask=255, green_mask=65280, blue_mask=16711680, endianness=4321\"")
        ff = gst.element_factory_make("ffmpegcolorspace", "converter")
        src = gst.parse_launch("v4l2src device="+device)


        print "creating pipe"
        self.pipe = gst.Pipeline(name="ecvpipe")
        self.pipe.add(src)
        self.pipe.add(cf_yuv)
        self.pipe.add(ff)
        self.pipe.add(cf)
        self.pipe.add(self.appsink)
        print "done"
        src.link(cf_yuv)
        cf_yuv.link(ff)
        ff.link(cf)
        cf.link(self.appsink)
        print "setting state \"playing\""
        self.pipe.set_state(gst.STATE_PLAYING)
        self.imagewidth = width
        self.imageheight = height
Example #2
0
	def reference(self):
		"""
		Make the reference videos.
		
		:returns: Paths to video files (see :attr:`files`) and video size (see :attr:`size`).
		:rtype: tuple
		"""
		VTLOG.info("Making reference...")
		self.pipeline = parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw-yuv,framerate=' + self.conf['framerate'] + '/1  ! filesink name=sink1')
		source = self.pipeline.get_by_name('source')
		sink1 = self.pipeline.get_by_name('sink1')
		location = self.video
		self.files['original'].append(location)
		source.props.location = location
		location = self.conf['tempdir'] + self.conf['num'] + '_ref_original.yuv'
		self.files['original'].append(location)
		sink1.props.location = location
		self.__play()
		self.pipeline = parse_launch('filesrc name=source ! decodebin ! videorate ! video/x-raw-yuv,framerate=' + self.conf['framerate'] + '/1  ! ' + self.encoder + ' bitrate=' + self.bitrate \
				+ ' ! tee name=t ! queue' + self.__add + ' ! filesink name=sink2 t. ! queue ! decodebin ! filesink name=sink3')
		source = self.pipeline.get_by_name('source')
		sink2 = self.pipeline.get_by_name('sink2')
		sink3 = self.pipeline.get_by_name('sink3')
		location = self.video
		source.props.location = location
		location = self.conf['tempdir'] + self.conf['num'] + '_ref.' + self.conf['codec']
		self.files['coded'].append(location)
		sink2.props.location = location
		location = self.conf['tempdir'] + self.conf['num'] + '_ref.yuv'
		self.files['coded'].append(location)
		sink3.props.location = location
		self.__play()
		VTLOG.info("Reference made")
		return self.files, self.size
Example #3
0
    def __init__(self):

        gobject.GObject.__init__(self)

        self.state = STATE_NULL
        self.ready = False
        self.eos = False
        self.update_timeout = None

        self.pipeline = gst.parse_launch('souphttpsrc name=src ! filesink name=sink sync=false')
        self.src = self.pipeline.get_by_name('src')
        self.sink = self.pipeline.get_by_name('sink')
        self.foo = self.pipeline.get_by_name('foo')

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message", self.bus_message_cb)


        self.test_pipeline = gst.parse_launch('filesrc name=test_src ! decodebin2 name=decoder ! fakesink')
        self.test_src = self.test_pipeline.get_by_name('test_src')
        self.test_decoder = self.test_pipeline.get_by_name('decoder')

        self.test_decoder.connect('autoplug-continue', self.autoplug_continue_cb)

        self.test_bus = self.test_pipeline.get_bus()
        self.test_bus.add_signal_watch()
        self.test_bus.connect("message", self.test_bus_message_cb)
Example #4
0
    def videostream(self):
        gtk.gdk.threads_enter()
        nb = self.gm.get_object('dqmnotebook')

        self.webcamarea = gtk.DrawingArea()
        nb.prepend_page(self.webcamarea,gtk.Label('Webcam 2'))
        nb.show_all()
        self.player = gst.parse_launch('souphttpsrc location=http://axisminn02/mjpg/video.mjpg ! decodebin2 ! xvimagesink')
        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.deal_with_message)
        bus.enable_sync_message_emission()
        bus.connect("sync-message::element", self.sync_message)
        self.player.set_state(gst.STATE_PLAYING)

        self.webcamarea2 = gtk.DrawingArea()
        nb.prepend_page(self.webcamarea2,gtk.Label('Webcam 1'))
        nb.show_all()
        self.player2 = gst.parse_launch('souphttpsrc location=http://axisminn01/mjpg/video.mjpg ! decodebin2 ! xvimagesink')
        bus2 = self.player2.get_bus()
        bus2.add_signal_watch()
        bus2.connect("message", self.deal_with_message2)
        bus2.enable_sync_message_emission()
        bus2.connect("sync-message::element", self.sync_message2)
        self.player2.set_state(gst.STATE_PLAYING)

        gtk.gdk.threads_leave()
Example #5
0
    def create_pipeline(self, mimetype):
        if platform.uname()[1].startswith('Nokia'):
            self.bus = None
            self.player = None
            self.source = None
            self.sink = None

            if mimetype == 'application/ogg':
                self.player = gst.parse_launch('gnomevfssrc name=source ! oggdemux ! ivorbisdec ! audioconvert ! dsppcmsink name=sink')
                self.player.set_name('oggplayer')
                self.set_volume = self.set_volume_dsp_pcm_sink
                self.get_volume = self.get_volume_dsp_pcm_sink
            else:
                self.player = gst.parse_launch('gnomevfssrc name=source ! id3lib ! dspmp3sink name=sink')
                self.player.set_name('mp3player')
                self.set_volume = self.set_volume_dsp_mp3_sink
                self.get_volume = self.get_volume_dsp_mp3_sink
            self.source = self.player.get_by_name('source')
            self.sink = self.player.get_by_name('sink')
            self.player_uri = 'location'
        else:
            self.player = gst.element_factory_make('playbin', 'player')
            self.player_uri = 'uri'
            self.source = self.sink = self.player
            self.set_volume = self.set_volume_playbin
            self.get_volume = self.get_volume_playbin

        self.bus = self.player.get_bus()
        self.player_clean = True
Example #6
0
	def __init__(self):
		window = gtk.Window(gtk.WINDOW_TOPLEVEL)
		window.set_title("Webcam-Viewer")
		window.set_default_size(500, 400)
		window.connect("destroy", gtk.main_quit, "WM destroy")
		vbox = gtk.VBox()
		window.add(vbox)
		self.movie_window = gtk.DrawingArea()
		vbox.add(self.movie_window)
		hbox = gtk.HBox()

		vbox.pack_start(hbox, False)
		hbox.set_border_width(10)
		hbox.pack_start(gtk.Label())
		self.button = gtk.Button("Start") 
		self.button.connect("clicked", self.start_stop)
		hbox.pack_start(self.button, False)
		self.button2 = gtk.Button("Quit")
		self.button2.connect("clicked", self.exit)
		hbox.pack_start(self.button2, False)
		hbox.add(gtk.Label())
		window.show_all()
		#Skickar
		options = "v4l2src ! video/x-raw-yuv,width=352,height=288,framerate=8/1 ! hantro4200enc ! rtph263pay ! udpsink host=130.236.218.162 port=5435"
		self.player = gst.parse_launch ( options )
		#visar
		options2 = "udpsrc port=5434 caps=application/x-rtp,clock-rate=90000 ! rtph263depay ! hantro4100dec ! xvimagesink"

		self.player1 = gst.parse_launch( options2 )

		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.enable_sync_message_emission()
		bus.connect("message", self.on_message)
		bus.connect("sync-message::element", self.on_sync_message)
Example #7
0
    def __init__(self, source_pipeline_description, sink_pipeline_description):
        gobject.threads_init()

        imageprocessing = " ! ".join([
                # Buffer the video stream, dropping frames if downstream
                # processors aren't fast enough:
                "queue name=q leaky=2",
                # Convert to a colorspace that templatematch can handle:
                "ffmpegcolorspace",
                # Detect motion when requested:
                "stbt-motiondetect name=motiondetect enabled=false",
                # OpenCV image-processing library:
                "stbt-templatematch name=templatematch method=1",
                ])
        xvideo = " ! ".join([
                # Convert to a colorspace that xvimagesink can handle:
                "ffmpegcolorspace",
                sink_pipeline_description,
                ])
        screenshot = ("appsink name=screenshot max-buffers=1 drop=true "
                      "sync=false")
        pipe = " ".join([
                imageprocessing,
                "! tee name=t",
                "t. ! queue leaky=2 !", screenshot,
                "t. ! queue leaky=2 !", xvideo
                ])

        # Gstreamer loads plugin libraries on demand, when elements that need
        # those libraries are first mentioned. There is a bug in gst-opencv
        # where it erroneously claims to provide appsink, preventing the
        # loading of the real appsink -- so we load it first.
        # TODO: Fix gst-opencv so that it doesn't prevent appsink from being
        #       loaded.
        gst.parse_launch("appsink")

        self.source_pipeline_description = source_pipeline_description
        self.source_bin = self.create_source_bin()
        self.sink_bin = gst.parse_bin_from_description(pipe, True)

        self.pipeline = gst.Pipeline("stb-tester")
        self.pipeline.add(self.source_bin, self.sink_bin)
        gst.element_link_many(self.source_bin, self.sink_bin)

        self.templatematch = self.pipeline.get_by_name("templatematch")
        self.motiondetect = self.pipeline.get_by_name("motiondetect")
        self.screenshot = self.pipeline.get_by_name("screenshot")
        self.bus = self.pipeline.get_bus()
        self.bus.connect("message::error", self.on_error)
        self.bus.connect("message::warning", self.on_warning)
        self.bus.add_signal_watch()
        self.pipeline.set_state(gst.STATE_PLAYING)

        # Handle loss of video (but without end-of-stream event) from the
        # Hauppauge HDPVR capture device.
        self.queue = self.pipeline.get_by_name("q")
        self.underrun_timeout_id = None
        self.queue.connect("underrun", self.on_underrun)
        self.queue.connect("running", self.on_running)
Example #8
0
    def __init__(self):
        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_title("Webcam-Viewer")
        window.set_default_size(500, 400)
        window.connect("destroy", gtk.main_quit, "WM destroy")
        vbox = gtk.VBox()
        window.add(vbox)
        self.movie_window = gtk.DrawingArea()
        vbox.add(self.movie_window)
        hbox = gtk.HBox()
        vbox.pack_start(hbox, False)
        hbox.set_border_width(10)
        hbox.pack_start(gtk.Label())
        self.button = gtk.Button("Start")
        self.button.connect("clicked", self.start_stop)
        hbox.pack_start(self.button, False)
        self.button2 = gtk.Button("Quit")
        self.button2.connect("clicked", self.exit)
        hbox.pack_start(self.button2, False)
        hbox.add(gtk.Label())
        window.show_all()

        #Listening for Input:
#        gst-launch udpsrc port=5434 caps=application/x-rtp,clock-rate=90000 ! rtph263depay ! hantro4100dec ! xvimagesink

        #Sending Video Output:
#        gst-launch v4l2src ! video/x-raw-yuv,width=352,height=288,framerate=8/1 ! hantro4200enc ! rtph263pay ! udpsink host=<other N800's ip> port=5434 

        #Stream to another device
        self.sender = gst.parse_launch("v4l2src ! video/x-raw-yuv,width=320,height=240,framerate=8/1 ! hantro4200enc ! rtph263pay ! udpsink host=130.236.219.107 port=5434")
        
        #Show the incoming video
        self.player = gst.parse_launch("udpsrc port=5432 caps=application/x-rtp,clock-rate=90000 ! rtph263depay ! hantro4100dec ! xvimagesink")
        
        #Stream both audio and video
#        self.player = gst.parse_launch("v4l2src ! video/x-raw-yuv,width=320,height=240,framerate=15/1 ! hantro4200enc stream-type=1 profile-and-level=1001 !video/x-h263,framerate=15/1 ! rtph263ppay mtu=1438 ! udpsink host=130.236.219.107 port=5434 dsppcmsrc ! queue ! audio/x-raw-int,channels=1,rate=8000 ! mulawenc ! rtppcmupay mtu=1438 ! udpsink host=130.236.219.107 port=5432")
            #Even try rate=48000
        
        # Show my webcam
#        self.player = gst.parse_launch ("v4l2src ! video/x-raw-yuv, width=320, height=240, framerate=8/1 ! autovideosink")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
        
        bus2 = self.sender.get_bus()
        bus2.add_signal_watch()
        bus2.enable_sync_message_emission()
        bus2.connect("message", self.on_message)
        bus2.connect("sync-message::element", self.on_sync_message)
Example #9
0
	def stop_stream(self):
		self.button.set_label("Start Stream")
		self.player.set_state(gst.STATE_NULL)
                devname = self.combodevice.get_active_text()
                if self.vdevicetype.get_active_text() == "v4l":
		   self.player = gst.parse_launch ("v4lsrc device="+devname+" !  video/x-raw-yuv,width=320,height=240 ! autovideosink")
                else:
		   self.player = gst.parse_launch ("v4l2src device="+devname+" !  video/x-raw-yuv,width=320,height=240 ! autovideosink")
		bus = self.player.get_bus()
	        bus.add_signal_watch()
		bus.enable_sync_message_emission()
		bus.connect("message", self.on_message)
		bus.connect("sync-message::element", self.on_sync_message)
		self.player.set_state(gst.STATE_PLAYING)
Example #10
0
    def create_pipeline(self, mimetype):
        self.debug("creating pipeline")
        if self._is_not_playbin2_friendly():
            self.bus = None
            self.player = None
            self.source = None
            self.sink = None

            if mimetype == 'application/ogg':
                self.player = gst.parse_launch('gnomevfssrc name=source ! oggdemux ! ivorbisdec ! audioconvert ! dsppcmsink name=sink')
                self.player.set_name('oggplayer')
                self.set_volume = self.set_volume_dsp_pcm_sink
                self.get_volume = self.get_volume_dsp_pcm_sink
            elif mimetype == 'application/flac':
                self.player = gst.parse_launch('gnomevfssrc name=source ! flacdemux ! flacdec ! audioconvert ! dsppcmsink name=sink')
                self.player.set_name('flacplayer')
                self.set_volume = self.set_volume_dsp_pcm_sink
                self.get_volume = self.get_volume_dsp_pcm_sink
            else:
                self.player = gst.parse_launch('gnomevfssrc name=source ! id3lib ! dspmp3sink name=sink')
                self.player.set_name('mp3player')
                self.set_volume = self.set_volume_dsp_mp3_sink
                self.get_volume = self.get_volume_dsp_mp3_sink
            self.source = self.player.get_by_name('source')
            self.sink = self.player.get_by_name('sink')
            self.player_uri = 'location'
            self.mute = self.mute_hack
            self.unmute = self.unmute_hack
            self.get_mute = self.get_mute_hack
        else:
            self.player = gst.element_factory_make('playbin2', 'player')
            self.player_uri = 'uri'
            self.source = self.sink = self.player
            self.set_volume = self.set_volume_playbin
            self.get_volume = self.get_volume_playbin
            self.mute = self.mute_playbin
            self.unmute = self.unmute_playbin
            self.get_mute = self.get_mute_playbin
            audio_sink = gst.element_factory_make(self.audio_sink_name)
            self._set_props(audio_sink, self.audio_sink_options)
            self.player.set_property("audio-sink", audio_sink)
            video_sink = gst.element_factory_make(self.video_sink_name)
            self._set_props(video_sink, self.video_sink_options)
            self.player.set_property("video-sink", video_sink)

        self.bus = self.player.get_bus()
        self.player_clean = True
        self.bus.connect('message', self.on_message)
        self.bus.add_signal_watch()
        self.update_LC = LoopingCall(self.update)
Example #11
0
    def __init__(self, projectDir, posX, posY):

      self.projectDir = projectDir

      primaryLocation = "\""+projectDir+"/primary-dut.webm\""
      secondaryLocation = "\""+projectDir+"/secondary-dut.webm\""
      outLocation = "\""+projectDir+"/user-testing.webm\""
      finalLocation = "\""+projectDir+"/final.webm\""

      #posX, posY = self.get_primary_video_info (primaryLocation)

      gstPipe = """filesrc
      location="""+secondaryLocation+""" name=filein !
      matroskademux name=demux1 ! queue !
      vp8dec ! videorate force-fps=15/1 !
      video/x-raw-yuv,width=320,height=240,framerate=15/1 !
      queue ! videomixer name=mix
      sink_0::xpos=0 sink_0::ypos=0
      sink_1::xpos="""+str (posX)+"""
      sink_1::ypos="""+str (posY)+""" ! vp8enc
      quality=10 speed=2 threads=4 ! webmmux name=outmux !
      filesink location="""+outLocation+"""
      filesrc  location="""+primaryLocation+"""
      ! matroskademux name=demux2 ! queue !
      vp8dec ! videorate force-fps=15/1 ! videoscale add-borders=1 !
      video/x-raw-yuv,framerate=15/1 ! mix."""

      self.element = gst.parse_launch (gstPipe)

      pipebus = self.element.get_bus ()

      pipebus.add_signal_watch ()
      pipebus.connect ("message", self.pipe1_changed_cb)

      #second pass add audio - we could do this in the above pipeline but due to a bug it doesn't quite work..
      gstPipe = """filesrc
      location="""+secondaryLocation+""" ! queue ! matroskademux !
      vorbisparse ! audio/x-vorbis !  queue ! outmux.audio_0
      filesrc location="""+outLocation+""" ! queue !
      matroskademux ! video/x-vp8 ! queue ! outmux.video_0 webmmux
      name=outmux ! filesink location="""+finalLocation+""""""

      self.element2 = gst.parse_launch (gstPipe)

      pipebus2 = self.element2.get_bus ()

      pipebus2.add_signal_watch ()
      pipebus2.connect ("message", self.pipe2_changed_cb)
Example #12
0
	def __init__(self):
		window = gtk.Window(gtk.WINDOW_TOPLEVEL)
		window.set_title("Awesome AP")
		window.set_default_size(500, 400)
		window.connect("destroy", gtk.main_quit, "WM destroy")
		vbox = gtk.VBox()
		window.add(vbox)
		self.movie_window = gtk.DrawingArea()
		vbox.add(self.movie_window)
		hbox = gtk.HBox()
		vbox.pack_start(hbox, False)
		hbox.set_border_width(10)
		hbox.pack_start(gtk.Label())
		self.button = gtk.Button("Start")
		self.button.connect("clicked", self.start_stop)
		hbox.pack_start(self.button, False)
		self.button2 = gtk.Button("Quit")
		self.button2.connect("clicked", self.exit)
		hbox.pack_start(self.button2, False)
		hbox.add(gtk.Label())
		window.show_all()
		#HOSTAR!
		
		#filesrc location="$1" ! \
    #audio/x-iLBC,rate=8000,channels=1,mode=20 ! \
    #dspilbcsink

		
		
		
		
		self.player = gst.parse_launch("udpsrc port=4999 ! audio/x-iLBC,rate=8000,channels=1,mode=20 ! dspilbcsink")
		print "skickar ljud"
		#CONNECTOR
		self.player1 = gst.parse_launch("dspilbcsrc dtx=0 ! audio/x-iLBC,rate=8000,channels=1,mode=20 ! udpsink host=130.236.218.184 port=5000")
		print "lyssnar video"
		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.enable_sync_message_emission()
		bus.connect("message", self.on_message)
		bus.connect("sync-message::element", self.on_sync_message)
		print "startar bus"
		bus1 = self.player1.get_bus()
		bus1.add_signal_watch()
		bus1.enable_sync_message_emission()
		bus1.connect("message", self.on_message)
		bus1.connect("sync-message::element", self.on_sync_message)
		print "startar bus1"
Example #13
0
    def init_gst(self):
        """Initialize the speech components"""
        self.pipeline = gst.parse_launch('gconfaudiosrc ! audioconvert ! audioresample '
                                         +
                                         '! vader name=vad auto-threshold=true '
                                         + '! pocketsphinx name=asr ! fakesink')
        asr = self.pipeline.get_by_name('asr')
        asr.connect('partial_result', self.asr_partial_result)
        asr.connect('result', self.asr_result)
        # Turtle Grammar
        # asr.set_property(
        #     'lm', '/usr/share/pocketsphinx/model/lm/en/turtle.DMP')
        # asr.set_property(
        #     'dict', '/usr/share/pocketsphinx/model/lm/en/turtle.dic')
        # asr.set_property(
        #     'hmm', '/usr/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k')

        #English Continuous
        asr.set_property('lm', '/usr/share/pocketsphinx/model/lm/en_US/hub4.5000.DMP')
        asr.set_property('dict', '/usr/share/pocketsphinx/model/lm/en_US/hub4.5000.dic')
        asr.set_property('hmm', '/usr/share/pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k')

        asr.set_property('configured', True)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::application', self.application_message)

        self.pipeline.set_state(gst.STATE_PLAYING)
Example #14
0
    def __init__(self, options={}):
        base.Base.__init__(self, options)
        gst.Bin.__init__(self, self.options["name"])

        aux = (
            pipestr.replace("gc-v4l2-preview", "sink-" + self.options["name"])
            .replace("gc-v4l2-enc", self.options["videoencoder"])
            .replace("gc-v4l2-mux", self.options["muxer"])
        )

        if "image/jpeg" in self.options["caps"]:
            aux = aux.replace("gc-v4l2-dec", "jpegdec max-errors=-1 ! queue !")
        else:
            aux = aux.replace("gc-v4l2-dec", "")

        # bin = gst.parse_bin_from_description(aux, True)
        bin = gst.parse_launch("( {} )".format(aux))
        self.add(bin)

        self.set_option_in_pipeline("location", "gc-v4l2-src", "device")

        self.set_value_in_pipeline(path.join(self.options["path"], self.options["file"]), "gc-v4l2-sink", "location")

        self.set_option_in_pipeline("caps", "gc-v4l2-filter", "caps", gst.Caps)
        fr = re.findall("framerate *= *[0-9]+/[0-9]+", self.options["caps"])
        if fr:
            newcaps = "video/x-raw-yuv," + fr[0]
            self.set_value_in_pipeline(newcaps, "gc-v4l2-vrate", "caps", gst.Caps)

        for pos in ["right", "left", "top", "bottom"]:
            self.set_option_in_pipeline("videocrop-" + pos, "gc-v4l2-crop", pos, int)
Example #15
0
	def __init__(self, uri):
		self.pipeline = gst.parse_launch("""
			filesrc location=/home/enzo/Videos/Part_Of_Me_15.mp4 !
			typefind ! qtdemux name=mux mux.video_00 ! queue ! 
			VideoSink location=%s
		""" % uri)
		
		freshness = 30 * 30
		self.loop = gobject.MainLoop()

		self.publisher = utils.RepoSocketPublisher()	

		self._handle = pyccn.CCN()
		self._get_handle = pyccn.CCN()

		self._basename = pyccn.Name('/test/VideoSite')
		self._user_basename = self._basename.append('User')
		self._register_info_name = self._user_basename.append('Register_Info')
		# '/test/VideoSite/User/Register_Info'

		self._key = pyccn.CCN.getDefaultKey()
		# _name_key is the name of the content object for client to get the public key
		self._name_key = self._basename.append('Key')
		self._signed_info = pyccn.SignedInfo(self._key.publicKeyID, pyccn.KeyLocator(self._name_key), freshness = freshness)
		self._singed_info_frames = pyccn.SignedInfo(self._key.publicKeyID, pyccn.KeyLocator(self._name_key), freshness = 1)

		# client list
		self.client = []

		# key for encrypting content
		self._dataKey_flag = False # if True, data key has been published
#		self._data_key = os.urandom(16)
		self._data_key = '123456789asdfghj'
		self._name_of_dataKey = self._basename.append('Movie')
		self._name_of_dataKey = self._name_of_dataKey.append('DataKey')
Example #16
0
def main(args):
   "Tagsetter test, test result with:"
   "gst-launch -t playbin uri=file://$PWD/test.avi"

   # create a new bin to hold the elements
   bin = gst.parse_launch('audiotestsrc num-buffers=100 ! ' +
                          'lame ! ' +
                          'avimux name=mux ! ' +
                          'filesink location=test.avi')

   mux = bin.get_by_name('mux')

   bus = bin.get_bus()
   bus.add_signal_watch()
   bus.connect('message::eos', on_eos)

   # prepare
   bin.set_state(gst.STATE_READY)
   
   # send tags
   l = gst.TagList()
   l[gst.TAG_ARTIST] = "Unknown Genius"
   l[gst.TAG_TITLE] = "Unnamed Artwork"
   mux.merge_tags(l, gst.TAG_MERGE_APPEND)

   # start playing
   bin.set_state(gst.STATE_PLAYING)

   try:
      mainloop.run()
   except KeyboardInterrupt:
      pass

   # stop the bin
   bin.set_state(gst.STATE_NULL)
Example #17
0
    def start_recognizer(self):
        rospy.loginfo("Starting recognizer... ")

        self.pipeline = gst.parse_launch(self.launch_config)
        self.asr = self.pipeline.get_by_name('asr')
        self.asr.connect('partial_result', self.asr_partial_result)
        self.asr.connect('result', self.asr_result)
        self.asr.set_property('configured', True)
        self.asr.set_property('dsratio', 1)


        # Configure language model
        if rospy.has_param(self._lm_param):
            lm = rospy.get_param(self._lm_param)
        else:
            rospy.logerr('Recognizer not started. Please specify a language model file.')
            return

        if rospy.has_param(self._dic_param):
            dic = rospy.get_param(self._dic_param)
        else:
            rospy.logerr('Recognizer not started. Please specify a dictionary.')
            return

        self.asr.set_property('lm', lm)
        self.asr.set_property('dict', dic)
        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus_id = self.bus.connect('message::application', self.application_message)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.started = True
Example #18
0
	def __init__(self, stepper):
		self.stepper = stepper
		listener_desc = 'pulsesrc device="alsa_output.pci-0000_00_07.0.analog-stereo.monitor" ! spectrum bands=8 ! fakesink'
		self.listener = gst.parse_launch(listener_desc)
		bus = self.listener.get_bus()
		bus.add_signal_watch()
		bus.connect("message", self.on_message)
    def setup(self):
        self.srcs.reverse()

        for src in self.srcs:
            self.pipe.append(' '.join([src['src'].pipe, '! ' + self.name + '.' + src['sink']]))

        print ' '.join(self.pipe)
        self.process = gst.parse_launch(' '.join(self.pipe))
        mixer = self.process.get_by_name("mixer")

        for src in self.srcs:
            src['pad'] = mixer.get_pad(src['sink'])
            src['control'] = gst.Controller(src['pad'], "xpos", "ypos", "alpha")

            src['control'].set_interpolation_mode("xpos", gst.INTERPOLATE_LINEAR)
            src['control'].set("xpos", 5 * gst.SECOND, src['src'].xpos)
            self.osc.add_method('/'+src['sink']+'/xpos', 'i', self.osc_callback)

            src['control'].set_interpolation_mode("ypos", gst.INTERPOLATE_LINEAR)
            src['control'].set("ypos", 5 * gst.SECOND, src['src'].ypos)
            self.osc.add_method('/'+src['sink']+'/ypos', 'i', self.osc_callback)

            src['control'].set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)
            src['control'].set("alpha", 5 * gst.SECOND, 1.0)
            self.osc.add_method('/'+src['sink']+'/alpha', 'f', self.osc_callback)
Example #20
0
def extractAudioTrack (device, track_number, sink, extra = None):
    """
    Exctracts an audio track from a certain device. The 'extra' field is used
    to send extra properties to the 'cdparanoia' element.
    """
    
    bin = gst.parse_launch ("cdparanoia ! wavenc")
    bin.set_state (gst.STATE_PAUSED)
    
    TRACK_FORMAT = gst.format_get_by_nick ("track")
    assert TRACK_FORMAT != 0
    PLAY_TRACK = TRACK_FORMAT | gst.SEEK_METHOD_SET | gst.SEEK_FLAG_FLUSH

    
    elements = bin.get_list ()
    cdparanoia = elements[0]
    wavenc = elements[-1]
    
    cdparanoia.set_property ("device", device)
    
    if extra is not None:
        for key, value in extra.iteritems ():
            cdparanoia.set_property (key, value)
            
    src = cdparanoia.get_pad ("src")
    evt = gst.event_new_segment_seek (PLAY_TRACK, track_number, track_number + 1)
    src.send_event (evt)
    
    bin.add (sink)
    wavenc.link (sink)
    
    return GstOperation(sink, bin)
Example #21
0
    def _stop_recording_audio(self):
        self._pipeline.remove(self._audiobin)

        audio_path = os.path.join(Instance.instancePath, "output.wav")
        if not os.path.exists(audio_path) or os.path.getsize(audio_path) <= 0:
            # FIXME: inform model of failure?
            return

        if self._audio_pixbuf:
            self.model.still_ready(self._audio_pixbuf)

        line = 'filesrc location=' + audio_path + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
        audioline = gst.parse_launch(line)

        taglist = self._get_tags(constants.TYPE_AUDIO)

        if self._audio_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._audio_pixbuf)
            taglist[gst.TAG_EXTENDED_COMMENT] = "coverart=" + pixbuf_b64

        vorbis_enc = audioline.get_by_name('audioVorbisenc')
        vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)

        audioFilesink = audioline.get_by_name('audioFilesink')
        audioOggFilepath = os.path.join(Instance.instancePath, "output.ogg")
        audioFilesink.set_property("location", audioOggFilepath)

        audioBus = audioline.get_bus()
        audioBus.add_signal_watch()
        self._audio_transcode_handler = audioBus.connect('message', self._onMuxedAudioMessageCb, audioline)
        self._transcode_id = gobject.timeout_add(200, self._transcodeUpdateCb, audioline)
        audioline.set_state(gst.STATE_PLAYING)
Example #22
0
    def GenerateWaveform(self):
        """
		Renders the level information for the GUI.
		"""
        pipe = """filesrc name=src ! decodebin ! audioconvert ! level message=true name=level_element ! fakesink"""
        self.loadingPipeline = gst.parse_launch(pipe)

        filesrc = self.loadingPipeline.get_by_name("src")
        level = self.loadingPipeline.get_by_name("level_element")

        filesrc.set_property("location", self.GetAbsFile())
        level.set_property("interval", int(self.LEVEL_INTERVAL * gst.SECOND))

        self.bus = self.loadingPipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message::element", self.bus_message)
        self.bus.connect("message::tag", self.bus_message_tags)
        self.bus.connect("message::state-changed", self.bus_message_statechange)
        self.bus.connect("message::eos", self.bus_eos)
        self.bus.connect("message::error", self.bus_error)

        self.levels_list = LevelsList.LevelsList()
        self.isLoading = True
        self.emit("loading")

        self.loadingPipeline.set_state(gst.STATE_PLAYING)
Example #23
0
    def play(self):
        if not self.parsed:
            command = " ! ".join(self.command)
            debug("launching: '%s'" % command)
            try:
                self.pipeline = gst.parse_launch(command)
                bus = self.pipeline.get_bus()
                assert not self.connected_signals
                self.connected_signals = []
                for name, signal, callback in self.signals:
                    if name:
                        element = self.pipeline.get_by_name(name)
                    else:
                        element = bus
                    sid = element.connect(signal, callback)
                    self.connected_signals.append((element, sid))

                self.parsed = True

            except gobject.GError, e:
                show_error("GStreamer error when creating pipeline", str(e))
                self.error = str(e)
                self.eos = True
                self.done()
                return

            bus.add_signal_watch()
            watch_id = bus.connect("message", self.on_message)
            self.watch_id = watch_id
Example #24
0
    def __init__ (self, source):
        super (IsWavPcm, self).__init__ ()
    
        bin = gst.parse_launch (
            "typefind name=iwp_typefind ! \
            wavparse name=iwp_wavparse ! "
            + WavPcmParse +
            " ! fakesink name=iwp_fakesink"
        )
        
        self.oper = GstOperation(pipeline = bin)
        self.oper.listeners.append (self)
 
        decoder = bin.get_by_name ("iwp_typefind")
        
        sink = bin.get_by_name ("iwp_fakesink")
        self.oper.query_element = sink
        sink.set_property ("signal-handoffs", True)
        sink.connect ("handoff", self.on_handoff)
        
        waveparse = bin.get_by_name ("iwp_wavparse")
        waveparse.connect (NEW_PAD_SIGNAL, self.on_new_pad)
        
        self.oper.bin.add (source)
        source.link (decoder)
        
        self.isPcm = False
Example #25
0
    def copyThumbPic(self, fsink, buffer, pad, user_data=None):
        if not self._thumb_exposure_open:
            return

        self._thumb_exposure_open = False
        loader = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
        loader.write(buffer)
        loader.close()
        self.thumbBuf = loader.get_pixbuf()
        self.model.still_ready(self.thumbBuf)

        self._thumb_element('thumb_tee').unlink(self._thumb_element('thumb_queue'))

        oggFilepath = os.path.join(Instance.instancePath, "output.ogg") #ogv
        wavFilepath = os.path.join(Instance.instancePath, "output.wav")
        muxFilepath = os.path.join(Instance.instancePath, "mux.ogg") #ogv

        muxline = gst.parse_launch('filesrc location=' + str(oggFilepath) + ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoraparse ! oggmux name=muxOggmux ! filesink location=' + str(muxFilepath) + ' name=muxFilesink filesrc location=' + str(wavFilepath) + ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.')
        taglist = self._get_tags(constants.TYPE_VIDEO)
        vorbis_enc = muxline.get_by_name('muxVorbisenc')
        vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)

        muxBus = muxline.get_bus()
        muxBus.add_signal_watch()
        self._video_transcode_handler = muxBus.connect('message', self._onMuxedVideoMessageCb, muxline)
        self._mux_pipes.append(muxline)
        #add a listener here to monitor % of transcoding...
        self._transcode_id = gobject.timeout_add(200, self._transcodeUpdateCb, muxline)
        muxline.set_state(gst.STATE_PLAYING)
Example #26
0
    def __init__(self, options={}): 
        base.Base.__init__(self, options)
        gst.Bin.__init__(self, self.options['name'])

        aux = pipestr.replace("gc-hauppauge-preview", "sink-" + self.options['name'])

        #bin = gst.parse_bin_from_description(aux, True)
        bin = gst.parse_launch("( {} )".format(aux))
        self.add(bin)

        sink = self.get_by_name("gc-hauppauge-device-src")
        sink.set_property("device", self.options["locprevideo"])       

        sink = self.get_by_name("gc-hauppauge-file-src")
        sink.set_property("location", self.options["location"])


        sink = self.get_by_name("gc-hauppauge-audio-src") 
        sink.set_property("location", self.options["locpreaudio"])

        if self.options["player"] == False:
            self.mute = True
            element = self.get_by_name("gc-hauppauge-volume")
            element.set_property("mute", True)
        else:
            self.mute = False

        sink = self.get_by_name("gc-hauppauge-sink")
        sink.set_property('location', path.join(self.options['path'], self.options['file']))

        if self.options["vumeter"] == False:
            level = self.get_by_name("gc-hauppauge-level")
            level.set_property("message", False) 
Example #27
0
	def __init__(self):
		window = gtk.Window(gtk.WINDOW_TOPLEVEL)
		window.set_title("Webcam-Viewer")
		window.set_default_size(500, 400)
		window.connect("destroy", gtk.main_quit, "WM destroy")
		vbox = gtk.VBox()
		window.add(vbox)
		self.movie_window = gtk.DrawingArea()
		vbox.add(self.movie_window)
		hbox = gtk.HBox()
		vbox.pack_start(hbox, False)
		hbox.set_border_width(10)
		hbox.pack_start(gtk.Label())
		self.button = gtk.Button("Start")
		self.button.connect("clicked", self.start_stop)
		hbox.pack_start(self.button, False)
		self.button2 = gtk.Button("Quit")
		self.button2.connect("clicked", self.exit)
		hbox.pack_start(self.button2, False)
		hbox.add(gtk.Label())
		window.show_all()

		# Set up the gstreamer pipeline
		self.player = gst.parse_launch ("v4l2src ! autovideosink")

		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.enable_sync_message_emission()
		bus.connect("message", self.on_message)
		bus.connect("sync-message::element", self.on_sync_message)
Example #28
0
 def start(self):
     self.pipeline = gst.parse_launch(
         "audiotestsrc ! audioconvert ! vorbisenc ! "
         "oggmux ! gdppay ! multifdsink name=sink")
     self.sink = self.pipeline.get_by_name('sink')
     self.sink.connect('client-removed', self.client_removed_handler)
     self.pipeline.set_state(gst.STATE_PLAYING)
Example #29
0
    def setup(self, channels=None, samplerate=None, nframes=None):
        super(WavEncoder, self).setup(channels, samplerate, nframes)
        # TODO open file for writing
        # the output data format we want
        self.pipe = ''' appsrc name=src
                  ! audioconvert 
                  ! wavenc
                  '''
        if self.filename and self.streaming:
            self.pipe += ''' ! tee name=t
            ! queue ! filesink location=%s
            t. ! queue ! appsink name=app sync=False
            ''' % self.filename
            
        elif self.filename :
            self.pipe += '! filesink location=%s ' % self.filename
        else:
            self.pipe += '! appsink name=app sync=False'
            
        self.pipeline = gst.parse_launch(self.pipe)
        # store a pointer to appsrc in our encoder object
        self.src = self.pipeline.get_by_name('src')
        # store a pointer to appsink in our encoder object
        self.app = self.pipeline.get_by_name('app')
        
        srccaps = gst.Caps("""audio/x-raw-float,
            endianness=(int)1234,
            channels=(int)%s,
            width=(int)32,
            rate=(int)%d""" % (int(channels), int(samplerate)))
        self.src.set_property("caps", srccaps)

        # start pipeline
        self.pipeline.set_state(gst.STATE_PLAYING)
Example #30
0
    def _video_eos(self):
        self._pipeline.set_state(gst.STATE_NULL)
        self._pipeline.get_by_name("tee").unlink(self._videobin)
        self._pipeline.remove(self._videobin)
        self._pipeline.remove(self._audiobin)

        self.model.shutter_sound()

        if len(self._thumb_pipes) > 0:
            thumbline = self._thumb_pipes[-1]
            thumbline.get_by_name('thumb_fakesink').disconnect(self._thumb_handoff_handler)

        ogg_path = os.path.join(Instance.instancePath, "output.ogg") #ogv
        if not os.path.exists(ogg_path) or os.path.getsize(ogg_path) <= 0:
            # FIXME: inform model of failure?
            return

        line = 'filesrc location=' + ogg_path + ' name=thumbFilesrc ! oggdemux name=thumbOggdemux ! theoradec name=thumbTheoradec ! tee name=thumb_tee ! queue name=thumb_queue ! ffmpegcolorspace name=thumbFfmpegcolorspace ! jpegenc name=thumbJPegenc ! fakesink name=thumb_fakesink'
        thumbline = gst.parse_launch(line)
        thumb_queue = thumbline.get_by_name('thumb_queue')
        thumb_queue.set_property("leaky", True)
        thumb_queue.set_property("max-size-buffers", 1)
        thumb_tee = thumbline.get_by_name('thumb_tee')
        thumb_fakesink = thumbline.get_by_name('thumb_fakesink')
        self._thumb_handoff_handler = thumb_fakesink.connect("handoff", self.copyThumbPic)
        thumb_fakesink.set_property("signal-handoffs", True)
        self._thumb_pipes.append(thumbline)
        self._thumb_exposure_open = True
        thumbline.set_state(gst.STATE_PLAYING)
Example #31
0
    def gst_play(self, fname, volume=100, balance=0):
        """Play the given file through gstreamer.
        """
        if fname.startswith('file:') or fname.startswith('http:'):
            uri = fname
        elif config.data.os == 'win32':
            uri = 'file:' + urllib.pathname2url(fname)
        else:
            uri = 'file://' + os.path.abspath(fname)
        pipe = gst.parse_launch('uridecodebin name=decode uri=%s ! audioconvert ! audiopanorama panorama=%f ! audioamplify name=amplify amplification=%f ! autoaudiosink' % (uri, float(balance), int(volume) / 100.0 ))
        bus = pipe.get_bus()
        bus.add_signal_watch()

        def eos_cb(b, m):
            if m.src == pipe:
                pipe.set_state(gst.STATE_NULL)

        bus.connect('message::eos', eos_cb)
        pipe.set_state(gst.STATE_PLAYING)
        # FIXME: since we do not reuse the pipeline, we maybe should clean it up on state_change -> READY
        return True
Example #32
0
    def stop_recording_audio(self):
        # We should be able to simply pause and remove the audiobin, but
        # this seems to cause a gstreamer segfault. So we stop the whole
        # pipeline while manipulating it.
        # http://dev.laptop.org/ticket/10183
        self._pipeline.set_state(gst.STATE_NULL)
        self.model.shutter_sound()
        self._pipeline.remove(self._audiobin)

        audio_path = os.path.join(Instance.instancePath, "output.wav")
        if not os.path.exists(audio_path) or os.path.getsize(audio_path) <= 0:
            # FIXME: inform model of failure?
            return

        if self._audio_pixbuf:
            self.model.still_ready(self._audio_pixbuf)

        line = 'filesrc location=' + audio_path + ' name=audioFilesrc ! wavparse name=audioWavparse ! audioconvert name=audioAudioconvert ! vorbisenc name=audioVorbisenc ! oggmux name=audioOggmux ! filesink name=audioFilesink'
        audioline = gst.parse_launch(line)

        taglist = self._get_tags(constants.TYPE_AUDIO)

        if self._audio_pixbuf:
            pixbuf_b64 = utils.getStringEncodedFromPixbuf(self._audio_pixbuf)
            taglist[gst.TAG_EXTENDED_COMMENT] = "coverart=" + pixbuf_b64

        vorbis_enc = audioline.get_by_name('audioVorbisenc')
        vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)

        audioFilesink = audioline.get_by_name('audioFilesink')
        audioOggFilepath = os.path.join(Instance.instancePath, "output.ogg")
        audioFilesink.set_property("location", audioOggFilepath)

        audioBus = audioline.get_bus()
        audioBus.add_signal_watch()
        self._audio_transcode_handler = audioBus.connect(
            'message', self._onMuxedAudioMessageCb, audioline)
        self._transcode_id = gobject.timeout_add(200, self._transcodeUpdateCb,
                                                 audioline)
        audioline.set_state(gst.STATE_PLAYING)
Example #33
0
    def start_recognizer(self):
        rospy.loginfo("开始语音识别... ")
        # rospy.loginfo("Starting recognizer... ")

        self.pipeline = gst.parse_launch(self.launch_config)  # 解析 麦克风配置
        self.asr = self.pipeline.get_by_name('asr')  # 自动语音识别 模型
        self.asr.connect('partial_result', self.asr_partial_result)  # 后面的函数
        self.asr.connect('result', self.asr_result)
        #self.asr.set_property('configured', True) # 需要开启配置  hmm模型
        self.asr.set_property('dsratio', 1)

        # 配置语言模型
        if rospy.has_param(self._lm_param):
            lm = rospy.get_param(self._lm_param)
        else:
            rospy.logerr('请配置一个语言模型 lm.')
            return

        if rospy.has_param(self._dic_param):
            dic = rospy.get_param(self._dic_param)
        else:
            rospy.logerr('请配置一个语言字典 dic.')
            return

        if rospy.has_param(self._hmm_param):
            hmm = rospy.get_param(self._hmm_param)
        else:
            rospy.logerr('请配置一个语言识别模型 hmm.')
            return

        self.asr.set_property('lm', lm)  # 设置asr的语言模型
        self.asr.set_property('dict', dic)  # 设置asr的字典
        self.asr.set_property('hmm', hmm)  # 设置asr的识别模型

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus_id = self.bus.connect('message::application',
                                       self.application_message)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.started = True
Example #34
0
    def __init__(self, parent):
        gobject.threads_init()
        Frame.__init__(self, parent)    
	
        # Parent Object
        self.parent = parent
        self.parent.title("Lip Reader")
        self.parent.geometry("840x640+0+0")
        self.parent.resizable(width=FALSE, height=FALSE)

        # Video Box
        self.movie_window = Canvas(self, width=640, height=440, bg="black")
        self.movie_window.pack(side=TOP, expand=YES, fill=BOTH)

        # Buttons Box
        self.ButtonBox = Frame(self, relief=RAISED, borderwidth=1)
        self.ButtonBox.pack(side=BOTTOM, expand=YES, fill=BOTH)

        self.closeButton = Button(self.ButtonBox, text="Close", command=self.quit)
        self.closeButton.pack(side=RIGHT, padx=5, pady=5)

	self.proButton = Button(self.ButtonBox, text="Process", command=self.process)
        self.proButton.pack(side=RIGHT, padx=5, pady=5)

        self.gotoButton = Button(self.ButtonBox, text="Start", command=self.start_stop)
        self.gotoButton.pack(side=LEFT, padx=5, pady=5)

	#self.capture = 
	#self.capture = Label(self, text = "Shoot a Video").grid(row = 0, column = 2, sticky = W+E+N+S) 
	
        # Set up the gstreamer pipeline
        #self.player = gst.parse_launch ("v4l2src ! video/x-raw-yuv,width=640,height=480 ! ffmpegcolorspace ! xvimagesink  ")

	self.player = gst.parse_launch ("v4l2src ! tee name=t ! queue ! xvimagesink t. ! queue ! videorate ! video/x-raw-yuv, width=640, height=480, framerate=2/1 ! jpegenc ! multifilesink location=%05d.jpg  ")
	print "started"
	bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
Example #35
0
		def __init__ (self, parent=None):
			global homeDir
			QMainWindow.__init__(self, parent)
			homeDir = os.environ['HOME'] + '/Documentos/audio/'
			self.ui = Ui_MainWindow()
			self.ui.setupUi(self)
			self.centrar()
			self.lista_dispositivos()
			oparser = OptionParser()
			oparser.add_option("-f", "--file", dest="path",help="save to FILE", metavar="FILE")
			oparser.add_option("-d", "--device", dest="device",help="Use device DEVICE", metavar="DEVICE")
			(options, args) = oparser.parse_args()
			self.ctimer = QtCore.QTimer()
			self.ctimer2 = QtCore.QTimer()
			self.ui.lineEdit.setText(homeDir)
			self.basedatos(0)
			self.setWindowIcon(QtGui.QIcon('/usr/usr/share/GrabadorRadio/logo2.png'))
			query = QSqlQuery(db)
			if query.exec_("SELECT * FROM radios_nombre"):
				while query.next():
					self.ui.comboBox_2.addItem(query.value(1).toString())
			db.close()
			db.removeDatabase("rad0") 			
			QtCore.QObject.connect(self.ui.pushButton,QtCore.SIGNAL("clicked()"), self.salir)
			QtCore.QObject.connect(self.ui.pushButton_4,QtCore.SIGNAL("clicked()"), self.constant)
			QtCore.QObject.connect(self.ui.pushButton_2,QtCore.SIGNAL("clicked()"), self.ruta)
			QtCore.QObject.connect(self.ctimer, QtCore.SIGNAL("timeout()"), self.constantUpdate)
			QtCore.QMetaObject.connectSlotsByName(self)

			#self.basedatos()
			self.player = gst.element_factory_make('playbin', 'player')
			try:
             # alsasink pulsesink osssink autoaudiosink
				device = gst.parse_launch('alsasink')
			except gobject.GError:
				print 'Error: could not launch audio sink'
			else:
				self.player.set_property('audio-sink', device)
			gobject.threads_init()
Example #36
0
    def __init__(self, loop, filename, frame_queue):
        self.loop = loop
        self.frame_queue = frame_queue

        size = 'width=' + str(COLS) + ',height=' + str(ROWS)

        if filename == "--video":
            graph = 'v4l2src ! ffmpegcolorspace ! videoscale ! alphacolor ! video/x-raw-rgb,' + size + ',framerate=25/1 ! fakesink name=sink sync=1'
        else:
            graph = 'filesrc location=' + filename + ' ! decodebin name=decoder\n'
            graph += 'decoder. ! ffmpegcolorspace ! videoscale ! alphacolor ! video/x-raw-rgb,' + size + ',framerate=25/1 ! fakesink name=sink sync=1\n'
            graph += 'decoder. ! audioconvert ! audioresample ! alsasink'

        self.pipeline = gst.parse_launch(graph)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message::eos", self.bus_watch)

        fakesink = self.pipeline.get_by_name('sink')
        fakesink.props.signal_handoffs = True
        fakesink.connect("handoff", self.callback)
Example #37
0
def get_peaks(filename):
    global do_run

    pipeline_txt = ('filesrc location="%s" ! decodebin ! audioconvert ! '
                    'audio/x-raw-int,channels=1,rate=44100,endianness=1234,'
                    'width=32,depth=32,signed=(bool)True !'
                    'level name=level interval=1000000000 !'
                    'fakesink' % filename)
    pipeline = gst.parse_launch(pipeline_txt)

    level = pipeline.get_by_name('level')
    bus = pipeline.get_bus()
    bus.add_signal_watch()

    peaks = []
    do_run = True

    def show_peak(bus, message):
        global do_run
        if message.type == gst.MESSAGE_EOS:
            pipeline.set_state(gst.STATE_NULL)
            do_run = False
            return
        # filter only on level messages
        if message.src is not level or \
           not message.structure.has_key('peak'):
            return
        peaks.append(message.structure['peak'][0])

    # connect the callback
    bus.connect('message', show_peak)

    # run the pipeline until we got eos
    pipeline.set_state(gst.STATE_PLAYING)
    ctx = gobject.gobject.main_context_default()
    while ctx and do_run:
        ctx.iteration()

    return peaks
Example #38
0
    def start_recognizer(self):
        rospy.loginfo("Starting recognizer... ")

        self.pipeline = gst.parse_launch(self.launch_config)
        self.asr = self.pipeline.get_by_name('asr')
        self.asr.connect('partial_result', self.asr_partial_result)
        self.asr.connect('result', self.asr_result)
        #self.asr.set_property('configured', True)

        self.asr.set_property('dsratio', 1)

        # Configure language model
        if rospy.has_param(self._lm_param):
            lm = rospy.get_param(self._lm_param)
        else:
            rospy.logerr('Recognizer not started. Please specify a language model file.')
            return

        if rospy.has_param(self._dic_param):
            dic = rospy.get_param(self._dic_param)
        else:
            rospy.logerr('Recognizer not started. Please specify a dictionary.')
            return

        if rospy.has_param(self._hmm_param):
            hmm = rospy.get_param(self._hmm_param)
        else:
            rospy.logerr('what is param hmm?')
            return

        self.asr.set_property('lm', lm)
        self.asr.set_property('dict', dic)
        self.asr.set_property('hmm', hmm)

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus_id = self.bus.connect('message::application', self.application_message)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.started = True
Example #39
0
    def start(self, request=None):
        self.info("PCMTranscoder start %r %r", request, self.uri)
        self.pipeline = gst.parse_launch(
            "%s ! decodebin ! audioconvert name=conv" % self.uri)

        conv = self.pipeline.get_by_name('conv')
        caps = gst.Caps(
            "audio/x-raw-int,rate=44100,endianness=4321,channels=2,width=16,depth=16,signed=true"
        )
        #FIXME: UGLY. 'filter' is a python builtin!
        filter = gst.element_factory_make("capsfilter", "filter")
        filter.set_property("caps", caps)
        self.pipeline.add(filter)
        conv.link(filter)

        sink = DataSink(destination=self.destination, request=request)
        self.pipeline.add(sink)
        filter.link(sink)
        self.pipeline.set_state(gst.STATE_PLAYING)

        d = request.notifyFinish()
        d.addBoth(self.requestFinished)
Example #40
0
    def pipeline(self):
        if self._pipeline is None:
            gobject.threads_init()

            command = " ".join(self.pipeline_command)
            log.info('Pipeline: %s', command)
            self._pipeline = gst.parse_launch(command)

            sphinx = self._pipeline.get_by_name('sphinx')
            sphinx.connect('partial_result', self.sphinx_partial_result)
            sphinx.connect('result', self.sphinx_result)
            sphinx.set_property('configured', True)

            self.monitor = self._pipeline.get_by_name('monitor')

            bus = self._pipeline.get_bus()
            bus.add_signal_watch()
            bus.connect('message', self.on_level)

            self.pipeline.set_state(gst.STATE_PAUSED)

        return self._pipeline
Example #41
0
    def testPadMonitorActivation(self):
        pipeline = gst.parse_launch(
            'fakesrc num-buffers=1 ! identity name=id ! fakesink')
        identity = pipeline.get_by_name('id')

        srcpad = identity.get_pad('src')
        monitor = padmonitor.PadMonitor(srcpad, "identity-source",
                                        lambda name: None, lambda name: None)
        self.assertEquals(monitor.isActive(), False)

        self._run_pipeline(pipeline)
        # Now give the reactor a chance to process the callFromThread()
        d = defer.Deferred()

        def finishTest():
            self.assertEquals(monitor.isActive(), True)
            monitor.detach()
            d.callback(True)

        reactor.callLater(0.1, finishTest)

        return d
Example #42
0
    def init_camera(self):
        # TODO: This doesn't work when camera resolution is resized at runtime.
        if self._pipeline:
            self.release()

        video_src = self._video_src
        if video_src == 'v4l2src':
            video_src += ' device=/dev/video%d' % self._index
        elif video_src == 'dc1394src':
            video_src += ' camera-number=%d' % self._index

        GL_CAPS = 'video/x-raw-rgb,red_mask=(int)0xff0000,' + \
                  'green_mask=(int)0x00ff00,blue_mask=(int)0x0000ff'
        pl = '%s ! decodebin name=decoder ! ffmpegcolorspace ! appsink ' + \
             'name=camerasink emit-signals=True caps=%s'
        self._pipeline = gst.parse_launch(pl % (video_src, GL_CAPS))
        self._camerasink = self._pipeline.get_by_name('camerasink')
        self._camerasink.connect('new-buffer', self._gst_new_buffer)
        self._decodebin = self._pipeline.get_by_name('decoder')

        if self._camerasink and not self.stopped:
            self.start()
    def testPadMonitorTimeout(self):
        padmonitor.PadMonitor.PAD_MONITOR_PROBE_INTERVAL = 0.2
        padmonitor.PadMonitor.PAD_MONITOR_CHECK_INTERVAL = 0.5

        pipeline = gst.parse_launch(
            'fakesrc num-buffers=1 ! identity name=id ! fakesink')
        identity = pipeline.get_by_name('id')

        srcpad = identity.get_pad('src')

        # Now give the reactor a chance to process the callFromThread()

        def finished():
            monitor.detach()
            d.callback(True)

        def hasInactivated(name):
            # We can't detach the monitor from this callback safely, so do
            # it from a reactor.callLater()
            reactor.callLater(0, finished)

        def hasActivated():
            self.assertEquals(monitor.isActive(), True)
            # Now, we don't send any more data, and after our 0.5 second
            # timeout we should go inactive. Pass our test if that happens.
            # Otherwise trial will time out.

        monitor = padmonitor.PadMonitor(srcpad, "identity-source",
                                        lambda name: None,
                                        hasInactivated)
        self.assertEquals(monitor.isActive(), False)

        self._run_pipeline(pipeline)

        d = defer.Deferred()

        reactor.callLater(0.2, hasActivated)

        return d
Example #44
0
    def __init__(self, padre):
        self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        self.window.set_title("Imagen de usuario")
        self.window.set_decorated(False)
        self.window.set_default_size(260, 232)
        self.window.connect("destroy", gtk.main_quit, "WM destroy")
        self.padre = padre
        vbox = gtk.Fixed()
        self.window.add(vbox)
        self.movie_window = gtk.DrawingArea()
        self.movie_window.set_size_request(260, 200)
        vbox.put(self.movie_window, 0, 0)
        hbox = gtk.HBox()
        hbox.set_size_request(260, 32)
        vbox.put(hbox, 0, 200)

        self.button3 = gtk.Button("Capturar imagen")
        self.button3.connect("clicked", self.take_snapshot)
        hbox.pack_start(self.button3, True)

        # Set up the gstreamer pipeline

        self.player = gst.parse_launch("v4l2src ! autovideosink")
        #self.player = gst.parse_launch ("v4l2src ! videorate ! video/x-raw-yuv,framerate=30/1,width=320,height=240 ! tee name=t_vid ! queue ! videoflip method=horizontal-flip ! xvimagesink sync=false t_vid. ! queue ! mux. alsasrc ! audio/x-raw-int,rate=48000,channels=2,depth=16 ! queue ! audioconvert ! queue ! mux. avimux name=mux")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)

        self.window.set_border_width(3)
        self.window.set_position(gtk.WIN_POS_CENTER_ALWAYS)
        self.window.show_all()

        self.start_stop(None)
        gtk.gdk.threads_init()
        time.sleep(0.1)
        gtk.main()
Example #45
0
    def __init__(self):
        """ Initialize the speech pipeline components. """
        rospy.init_node('recognizer')
        self.pub = rospy.Publisher('~output',String)
        rospy.on_shutdown(self.shutdown)

        # services to start/stop recognition
        rospy.Service("~start", Empty, self.start)
        rospy.Service("~stop", Empty, self.stop)

        # configure pipeline
        self.pipeline = gst.parse_launch('gconfaudiosrc ! audioconvert ! audioresample '
                                         + '! vader name=vad auto-threshold=true '
                                         + '! pocketsphinx name=asr ! fakesink')
        asr = self.pipeline.get_by_name('asr')
        asr.connect('partial_result', self.asr_partial_result)
        asr.connect('result', self.asr_result)
        asr.set_property('configured', True)
        asr.set_property('dsratio', 1)

        # parameters for lm and dic
        try:
            lm_ = rospy.get_param('~lm')
        except:
            rospy.logerr('Please specify a language model file')
            return
        try:
            dict_ = rospy.get_param('~dict')
        except:
            rospy.logerr('Please specify a dictionary')
            return
        asr.set_property('lm',lm_)
        asr.set_property('dict',dict_)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::application', self.application_message)
        self.start(None)
        gtk.main()
Example #46
0
    def init_gst(self):
        """Initialize the speech components"""
        self.pipeline = gst.parse_launch(
            'gconfaudiosrc ! audioconvert ! audioresample ' +
            '! vader name=vad auto-threshold=true ' +
            '! pocketsphinx name=asr ! fakesink')
        asr = self.pipeline.get_by_name('asr')
        asr.connect('partial_result', self.asr_partial_result)
        asr.connect('result', self.asr_result)

        if self.ps_lm is not None:
            asr.set_property('lm', self.ps_lm)
        if self.ps_dict is not None:
            asr.set_property('dict', self.ps_dict)

        asr.set_property('configured', True)

        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect('message::application', self.application_message)

        self.pipeline.set_state(gst.STATE_PAUSED)
Example #47
0
def main(nom):
	#pipeline pour le format ogg
	#PIPELINE = "filesrc location=\"test.avi\" ! decodebin ! ffmpegcolorspace ! theoraenc quality=32 ! oggmux ! filesink name=sortie"

	#pipeline pour le format mp4
	PIPELINE = "filesrc location=\"/home/matthieu/nao/public/videoNao/record.avi\" ! decodebin ! ffmpegcolorspace ! x264enc ! ffmux_mp4 ! filesink name=sortie"

	#pipeline à lancer dans le terminal pour la meme chose format ogg
	#"gst-launch filesrc location=\"test.avi\" ! decodebin ! ffmpegcolorspace ! theoraenc quality=32 ! oggmux ! filesink location=\"test.ogg\""

	FICHIER = "/home/matthieu/nao/public/videoNao/"+nom
	pipeline = gst.parse_launch(PIPELINE)
	filesink = pipeline.get_by_name("sortie")
	filesink.set_property("location", FICHIER)
	bus = pipeline.get_bus()
	bus.add_signal_watch()
	bus.connect( 'message', onSendMessage )
	pipeline.set_state(gst.STATE_PLAYING)
	time.sleep(5)

	pipeline.set_state(gst.STATE_NULL)
	print "c'est fini"
Example #48
0
def source_to_wav(source, sink):
    """
    Converts a given source element to wav format and sends it to sink element.
    
    To convert a media file to a wav using gst-launch:
    source ! decodebin ! audioconvert ! audioscale !$_WAV_PCM_PARSE ! wavenc
    """

    bin = gst.parse_launch("decodebin name=stw_decodebin !"
                           "audioconvert ! " + _WAV_PCM_PARSE +
                           " ! wavenc name=stw_wavenc")
    oper = GstOperation(sink, bin)

    decoder = bin.get_by_name("stw_decodebin")
    encoder = bin.get_by_name("stw_wavenc")

    oper.bin.add(source)
    oper.bin.add(sink)
    source.link(decoder)
    encoder.link(sink)

    return oper
Example #49
0
    def start_visualization(self):
        # TODO: overlay the data somehow, maybe with some cv+gstreamer thing?
        
        # set up connections
        peer = (self.ip, self.port)
        iptype = socket.AF_INET6 if ':' in peer[0] else socket.AF_INET
        vid_socket = socket.socket(iptype, socket.SOCK_DGRAM)
        data_socket = socket.socket(iptype, socket.SOCK_DGRAM)
        
        # set up keepalive
        self._viz_running = True
        self.vid_thread = threading.Thread(self.send_keepalive_msg, [vid_socket, KA_VIDEO_MSG, peer])
        self.vid_thread.start()
        self.data_thread = threading.Thread(self.send_keepalive_msg, [data_socket, KA_DATA_MSG, peer])
        self.data_thread.start()
        
        # set up gstreamer (blocked here so the rest of the class can run without it installed)
        global pygst
        global gst
        if pygst is None:
            import pygst
            pygst.require('0.10')
            import gst
        
        PIPELINE_DEF = "udpsrc do-timestamp=true name=src blocksize=1316 closefd=false buffer-size=5600 !" \
               "mpegtsdemux !" \
               "queue !" \
               "ffdec_h264 max-threads=0 !" \
               "ffmpegcolorspace !" \
               "xvimagesink name=video"

        self._pipeline = None
        try:
            self._pipeline = gst.parse_launch(PIPELINE_DEF)
        except Exception, e:
            print e
            self.stop_visualization()
            return
Example #50
0
    def __init__(self, streamname='bbc'):
        self.pipeline = None
        self.streamname = streamname
        streams = {
            'bbc': 'http://bbcwssc.ic.llnwd.net/stream/bbcwssc_mp1_ws-eieuk',
            'bbc4':
            "http://wm-live.bbc.net.uk/wms/bbc_ami/radio4/radio4_bb_live_int_ep1_sl0?BBC-UID=b4adbcdf5ba7eea337440a77610973cee5bd1676f0b0524162a83074f35cc068",
            'test': 'rtmp://216.246.37.52/fmr',
            'capetalk': 'mms://46.4.25.237/capetalk-live',
            'whatsplaying': 'http://commando.nitric.co.za/whatsplaying',
            'npr': 'http://69.166.45.60/nwpr'
        }

        url = streams.get(
            streamname,
            "http://commando.nitric.co.za/twohundred/s" + streamname)
        print "Starting stream pipeline: " + url
        self.pipeline = gst.parse_launch('''
		souphttpsrc location="%s" !
		decodebin ! audio/x-raw-int !
		appsink name=sink sync=False''' % url)
        #http://mp32.bbc.streamuk.com:80/
        #mms://a243.l3944038972.c39440.g.lm.akamaistream.net/D/243/39440/v0001/reflector:38972
        self.sink = self.pipeline.get_by_name('sink')
        self.pipeline.set_state(gst.STATE_PLAYING)

        self.sink.connect('new-buffer', self.on_new_buffer)
        self.sink.set_property('emit-signals', True)

        self.streamrate = 22050.0  # Hz

        if streamname == '220':
            self.streamrate = 88200  # Hz

        self.bufsize = 576

        self.buffer = numpy.zeros(1024)
        self.queue = Queue.Queue()
Example #51
0
    def __init__(self, infile, outfile):
        self.player = gst.parse_launch(
            'filesrc name=video_in' + ' ! decodebin name=decoder' +
            ' matroskamux name=mux' + ' ! filesink name=video_out' +
            ' queue name=q_audio_in' + ' ! audioconvert' + ' ! vorbisenc' +
            ' ! queue name=q_audio_out' + ' ! mux.' +
            ' queue name=q_orig_video_in' + ' ! ffmpegcolorspace' +
            ' ! pngenc compression-level=1 snapshot=false' +
            ' ! queue name=q_orig_video_out' + ' ! appsink name=png_sink' +
            ' appsrc name=png_src'
            +  # TODO: capsset image/svg+xml ! rsvgdec; for now, output is png; URGENT: set framerate, res, etc.
            ' ! queue name=q_new_video_in' + ' ! pngdec' +
            ' ! ffmpegcolorspace' + ' ! theoraenc' +
            ' ! queue name=q_new_video_out' + ' ! mux.')
        self.player.set_state(gst.STATE_NULL)
        self.player.get_by_name('video_in').set_property('location', infile)
        self.player.get_by_name('video_out').set_property('location', outfile)

        self.decoder = self.player.get_by_name('decoder')
        self.decoder.connect('new-decoded-pad', self.on_new_decoded_pad)

        self.png_sink = self.player.get_by_name('png_sink')
        self.png_sink.set_property('drop', False)
        self.png_sink.set_property('max_buffers', 1)

        self.png_src = self.player.get_by_name('png_src')

        player_bus = self.player.get_bus()
        player_bus.add_signal_watch()
        player_bus.connect('message', self.on_message)

        self.app_caps = False
        self.ready_lock = threading.Lock()
        self.ready_lock.acquire()

        self.time_format = gst.Format(gst.FORMAT_TIME)

        self.infile = infile
    def __init__(self):
        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_title("Webcam-Viewer")
        window.set_default_size(400, 350)
        window.connect("destroy", gtk.main_quit, "WM destroy")
        vbox = gtk.VBox()
        window.add(vbox)
        self.movie_window = gtk.DrawingArea()
        vbox.add(self.movie_window)
        hbox = gtk.HBox()
        vbox.pack_start(hbox, False)
        hbox.set_border_width(10)
        hbox.pack_start(gtk.Label())
        self.button = gtk.Button("Start")
        self.button.connect("clicked", self.start_stop)
        hbox.pack_start(self.button, False)
        self.button2 = gtk.Button("Quit")
        self.button2.connect("clicked", self.exit)
        hbox.pack_start(self.button2, False)
        hbox.add(gtk.Label())
        window.show_all()

        # Set up the gstreamer pipeline
        #src.set_property("multicast-iface", "eth0")
        # src.set_property("caps", Gst.caps_from_string( "application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)MP2T-ES, payload=(int)33"     ))
        self.player = gst.parse_launch(
            "udpsrc uri=udp://0.0.0.0:5000  caps = \"application/x-rtp, media=(string)video, clock-rate=(int)90000, encoding-name=(string)MP4V-ES, payload=(int)96\" ! rtpmp4vdepay ! ffdec_mpeg4 ! ffmpegcolorspace ! ximagesink"
        )
        #self.player = gst.parse_launch("udpsrc port="+self.MYPORT+" ! audio/x-iLBC,rate=8000,channels=1,mode=20 ! dspilbcsink")
        #self.player = gst.parse_launch("dspilbcsrc dtx=0 ! audio/x-iLBC,rate=8000,channels=1,mode=20  ! udpsink host="+self.HOSTIP+" port= "+self.HOSTPORT)
        #self.player = gst.parse_launch("v4l2src device=/dev/video1  ! ximagesink")
        #self.player = gst.parse_launch("udpsrc port="+self.MYPORT+" caps=application/x-rtp,clock-rate=90000 ! rtph263depay ! hantro4100dec ! xvimagesink")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)
Example #53
0
    def copyThumbPic(self, fsink, buffer, pad, user_data=None):
        if not self._thumb_exposure_open:
            return

        self._thumb_exposure_open = False
        loader = gtk.gdk.pixbuf_loader_new_with_mime_type("image/jpeg")
        loader.write(buffer)
        loader.close()
        self.thumbBuf = loader.get_pixbuf()
        self.model.still_ready(self.thumbBuf)

        self._thumb_element('thumb_tee').unlink(
            self._thumb_element('thumb_queue'))

        oggFilepath = os.path.join(Instance.instancePath, "output.ogg")  #ogv
        wavFilepath = os.path.join(Instance.instancePath, "output.wav")
        muxFilepath = os.path.join(Instance.instancePath, "mux.ogg")  #ogv

        muxline = gst.parse_launch(
            'filesrc location=' + str(oggFilepath) +
            ' name=muxVideoFilesrc ! oggdemux name=muxOggdemux ! theoraparse ! oggmux name=muxOggmux ! filesink location='
            + str(muxFilepath) + ' name=muxFilesink filesrc location=' +
            str(wavFilepath) +
            ' name=muxAudioFilesrc ! wavparse name=muxWavparse ! audioconvert name=muxAudioconvert ! vorbisenc name=muxVorbisenc ! muxOggmux.'
        )
        taglist = self._get_tags(constants.TYPE_VIDEO)
        vorbis_enc = muxline.get_by_name('muxVorbisenc')
        vorbis_enc.merge_tags(taglist, gst.TAG_MERGE_REPLACE_ALL)

        muxBus = muxline.get_bus()
        muxBus.add_signal_watch()
        self._video_transcode_handler = muxBus.connect(
            'message', self._onMuxedVideoMessageCb, muxline)
        self._mux_pipes.append(muxline)
        #add a listener here to monitor % of transcoding...
        self._transcode_id = gobject.timeout_add(200, self._transcodeUpdateCb,
                                                 muxline)
        muxline.set_state(gst.STATE_PLAYING)
Example #54
0
    def build_pipeline(self):
        if self.videofile is None:
            return
        videofile = self.videofile
        if config.data.player['audio-record-device'] not in ('default', ''):
            audiosrc = 'alsasrc device=' + config.data.player[
                'audio-record-device']
        else:
            audiosrc = 'alsasrc'
        videosrc = 'autovideosrc'
        videosink = 'autovideosink'

        if not config.data.player['record-video']:
            # Generate black image
            videosrc = 'videotestsrc pattern=2'

        self.pipeline = gst.parse_launch(
            '%(videosrc)s name=videosrc ! video/x-raw-yuv,width=352,pixel-aspect-ratio=(fraction)1/1 ! queue ! tee name=tee ! ffmpegcolorspace ! theoraenc drop-frames=1 ! queue ! oggmux name=mux ! filesink location=%(videofile)s  %(audiosrc)s name=audiosrc ! audioconvert ! audiorate ! queue ! vorbisenc quality=0.5 ! mux.  tee. ! queue ! %(videosink)s name=sink sync=false'
            % locals())
        self.imagesink = self.pipeline.get_by_name('sink')
        self.videosrc = self.pipeline.get_by_name('videosrc')
        self.audiosrc = self.pipeline.get_by_name('audiosrc')
        self.player = self.pipeline

        # Asynchronous XOverlay support.
        bus = self.pipeline.get_bus()
        bus.enable_sync_message_emission()

        def on_sync_message(bus, message):
            if message.structure is None:
                return
            if message.structure.get_name(
            ) == 'prepare-xwindow-id' and self.xid is not None:
                message.src.set_xwindow_id(self.xid)
                if hasattr(message.src.props, 'force-aspect-ratio'):
                    message.src.set_property("force-aspect-ratio", True)

        bus.connect('sync-message::element', on_sync_message)
Example #55
0
def sourceToWav (source, sink):
	"""
	Converts a given source element to wav format and sends it to sink element.
	
	To convert a media file to a wav using gst-launch:
	source ! decodebin ! audioconvert ! audioscale !
	         audio/x-raw-int, channels=2, rate=44100, width=16 ! wavenc
	"""
	bin = gst.parse_launch (
	    "decodebin ! audioconvert ! audioscale ! "
	    "audio/x-raw-int, channels=2,rate=44100, width=16 ! wavenc"
    )
	oper = GstOperation(sink, bin)
	
	elements = bin.get_list ()
	decoder = elements[0]
	encoder = elements[-1]
	
	oper.bin.add_many (source, sink)
	source.link (decoder)
	encoder.link (sink)
	
	return oper
Example #56
0
    def __init__(self, callback=None):
        self.is_playing = False
        self.filename = None

        self.bin = gst.parse_launch("gnomevfssrc name=source ! dspmp3sink")
        self.source = self.bin.get_by_name("source")
        self.bus = self.bin.get_bus()
        self.bus.enable_sync_message_emission()
        self.bus.add_signal_watch()

        if callback:

            def on_message(bus, message):
                t = message.type
                if t == gst.MESSAGE_EOS:
                    self.is_playing = False
                    self.filename = None
                    callback(self)
                elif t == gst.MESSAGE_ERROR:
                    err, debug = message.parse_error()
                    print "Error: %s" % err, debug

            self.bus.connect("message", on_message)
Example #57
0
	def __init__ (self, source):
		operations.Operation.__init__ (self)
		self.__can_start = True
		bin = gst.parse_launch ("decodebin ! fakesink")
		                        
		self.__oper = GstOperation(pipeline = bin)
		self.__oper.listeners.append (self)
		
		self.__metadata = {}
		self.__error = None
		
		bin.connect ("found-tag", self.__on_found_tag)
		
		# Last element of the bin is the sinks
		sink = bin.get_list ()[-1]
		sink.set_property ("signal-handoffs", True)
		# handoffs are called when it processes one chunk of data
		sink.connect ("handoff", self.__on_handoff)
		self.__oper.element = sink
		
		# connect source to the first element on the pipeline
		source.link (bin.get_list ()[0])
		bin.add (source)
Example #58
0
 def videoSetup(self):
     with_audio = ""
     if not self.mute:
         with_audio = "! queue ! audioconvert ! audiorate ! audioresample ! autoaudiosink"
     s = "filesrc name=input ! decodebin2 name=dbin dbin. ! ffmpegcolorspace ! video/x-raw-rgb ! fakesink name=output signal-handoffs=true sync=true dbin. %s" % with_audio
     self.player = gst.parse_launch(s)
     self.input = self.player.get_by_name('input')
     self.fakeSink = self.player.get_by_name('output')
     self.input.set_property("location", self.videoSrc)
     self.fakeSink.connect("handoff", self.newFrame)
     # Catch the end of file as well as errors
     # FIXME:
     #  Messages are sent if i use the following in run():
     #   gobject.MainLoop().get_context().iteration(True)
     #  BUT the main python thread then freezes after ~5 seconds...
     #  unless we use gobject.idle_add(self.player.elements)
     bus = self.player.get_bus()
     bus.add_signal_watch()
     bus.enable_sync_message_emission()
     bus.connect("message", self.onMessage)
     # Required to prevent the main python thread from freezing, why?!
     # Thanks to max26199 for finding this!
     gobject.idle_add(self.player.elements)
Example #59
0
    def __init__(self):
        window = gtk.Window(gtk.WINDOW_TOPLEVEL)
        window.set_decorated(False)
        window.set_title("SelfCam")
        # You can change Width and Height below
        window.set_default_size(400, 300)
        window.connect("destroy", gtk.main_quit, "WM destroy")
        vbox = gtk.VBox()
        window.add(vbox)
        self.cam_view = gtk.DrawingArea()
        vbox.add(self.cam_view)

        window.show_all()

        self.player = gst.parse_launch("v4l2src ! autovideosink")

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect('message', self.on_message)
        bus.connect('sync-message::element', self.on_sync_message)

        self.player.set_state(gst.STATE_PLAYING)
Example #60
0
    def start_pipeline(self, channels, samplerate):
        self.pipeline = gst.parse_launch(self.pipe)
        # store a pointer to appsrc in our encoder object
        self.src = self.pipeline.get_by_name('src')

        if self.streaming:
            import Queue
            self._streaming_queue = Queue.Queue(QUEUE_SIZE)
            # store a pointer to appsink in our encoder object
            self.app = self.pipeline.get_by_name('app')
            self.app.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS)
            self.app.set_property("drop", False)
            self.app.set_property('emit-signals', True)
            self.app.connect("new-buffer", self._on_new_buffer_streaming)
            #self.app.connect('new-preroll', self._on_new_preroll_streaming)

        srccaps = gst.Caps("""audio/x-raw-float,
            endianness=(int)1234,
            channels=(int)%s,
            width=(int)32,
            rate=(int)%d""" % (int(channels), int(samplerate)))
        self.src.set_property("caps", srccaps)
        self.src.set_property('emit-signals', True)
        self.src.set_property('num-buffers', -1)
        self.src.set_property('block', False)
        self.src.set_property('do-timestamp', True)

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect("message", self._on_message_cb)

        self.mainloop = gobject.MainLoop()
        self.mainloopthread = MainloopThread(self.mainloop)
        self.mainloopthread.start()

        # start pipeline
        self.pipeline.set_state(gst.STATE_PLAYING)