コード例 #1
0
    def constructPipeline(self):
        self.pipeline = gst.Pipeline()
        self.filesrc = gst.element_factory_make("gnlfilesource")

        # Set properties of filesrc element
        # Note: the gnlfilesource signal will be connected
        # in self.connect_signals()
        self.filesrc.set_property("uri",
                                  "file:///" + self.inFileLocation)
        self.filesrc.set_property("media-start",
                                  self.media_start_time*gst.SECOND)
        self.filesrc.set_property("media-duration",
                                  self.media_duration*gst.SECOND)

        self.audioconvert = gst.element_factory_make("audioconvert")

        self.encoder = gst.element_factory_make("lame", "mp3_encoder")

        self.filesink = gst.element_factory_make("filesink")

        self.filesink.set_property("location",
                                   self.outFileLocation)

        #Add elements to the pipeline
        self.pipeline.add(self.filesrc, self.audioconvert,
                          self.encoder, self.filesink)
        # Link elements
        gst.element_link_many(self.audioconvert,self.encoder,
                              self.filesink)
コード例 #2
0
 def __init__(self, name, audiosrc, partial_cb, final_cb, lm_path=None, dict_path=None):
   """ Sets up the gstreamer pipeline and registers callbacks.
       Partial and full callbacks must take arguments (name, uttid, text)
   """
   #rospy.Subscriber("chatter", String, callback)
   
   self.name = name
   self.partial_cb = partial_cb
   self.final_cb = final_cb
   self.pipe = gst.Pipeline()
   
   conv = gst.element_factory_make("audioconvert", "audioconv")
   res = gst.element_factory_make("audioresample", "audioresamp")
   
   # Vader controls when sphinx listens for spoken text
   vader = gst.element_factory_make("vader", "vad")
   vader.set_property("auto-threshold", True)
   
   asr = gst.element_factory_make("pocketsphinx", "asr")
   asr.connect('partial_result', self.asr_partial_result)
   asr.connect('result', self.asr_result)
   if lm_path and dict_path:
     asr.set_property('lm', lm_path)
     asr.set_property('dict', dict_path)
   asr.set_property('configured', True)
   
   # we don't do anything with the actual audio data after transcription,
   # but you could e.g. write the audio to a file here instead.
   sink = gst.element_factory_make("fakesink", "fs")
   
   self.pipe.add(audiosrc, conv, res, vader, asr, sink)
   gst.element_link_many(audiosrc, conv, res, vader, asr, sink)
   self.pipe.set_state(gst.STATE_PLAYING)
コード例 #3
0
    def __init__(self, client):
        self.client = client

        pipeline = gst.element_factory_make("pipeline", "player")
        filesrc = gst.element_factory_make("filesrc", "player.filesrc")
        decodebin = gst.element_factory_make("decodebin", "player.decodebin")
        audioconvert = gst.element_factory_make("audioconvert",
                                                "player.audioconvert")
        try:
            sink = gst.element_factory_make("pulsesink", "player.pulsesink")
        except:
            sink = gst.element_factory_make("osxaudiosink",
                                            "player.osxaudiosink")

        decodebin.connect("new-decoded-pad",
                          lambda bin, pad, islast, sink: pad.link(sink),
                          audioconvert.get_pad("sink"))

        pipeline.add(filesrc, decodebin, audioconvert, sink)
        gst.element_link_many(filesrc, decodebin)
        gst.element_link_many(audioconvert, sink)

        bus = pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message::eos", self.player_eos_cb)
        bus.connect("message::error", self.player_eos_cb)
        bus.connect("message::state-changed", self.player_state_changed_cb)

        self.pipeline = pipeline
        self.is_playable = False
        self.is_playing = False
        self.playback_tracker_id = 0
コード例 #4
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def _add_audiosink(self, audio_sink, buffer_position):
        '''Sets up the new audiosink and syncs it'''

        self.add(audio_sink)
        audio_sink.sync_state_with_parent()
        gst.element_link_many(self._elements[-1], audio_sink)

        if buffer_position is not None:

            # buffer position is the output from get_position. If set, we
            # seek to that position.

            # TODO: this actually seems to skip ahead a tiny bit. why?

            # Note! this is super important in paused mode too, because when
            #       we switch the sinks around the new sink never goes into
            #       the paused state because there's no buffer. This forces
            #       a resync of the buffer, so things still work.

            seek_event = gst.event_new_seek(1.0, gst.FORMAT_DEFAULT,
                                            gst.SEEK_FLAG_FLUSH,
                                            gst.SEEK_TYPE_SET,
                                            buffer_position[0],
                                            gst.SEEK_TYPE_NONE, 0)

            self.send_event(seek_event)

        self.audio_sink = audio_sink
コード例 #5
0
ファイル: aubioinput.py プロジェクト: Objzilla/aubio
    def __init__(self, uri, process = None, hopsize = 512,
            caps = None):
        if uri.startswith('/'):
            from urllib import quote
            uri = 'file://'+quote(uri)
        src = gst.element_factory_make('uridecodebin')
        src.set_property('uri', uri)
        src.connect('pad-added', self.source_pad_added_cb)
        conv = gst.element_factory_make('audioconvert')
        self.conv = conv
        rsmpl = gst.element_factory_make('audioresample')
        capsfilter = gst.element_factory_make('capsfilter')
        if caps:
            capsfilter.set_property('caps', gst.caps_from_string(caps))
        sink = AubioSink("AubioSink", process = process)
        sink.set_property('hopsize', hopsize) # * calcsize('f'))

        self.pipeline = gst.Pipeline()

        self.bus = self.pipeline.get_bus()
        self.bus.add_signal_watch()
        self.bus.connect('message', self.on_eos)

        self.apad = conv.get_pad('sink')

        self.pipeline.add(src, conv, rsmpl, capsfilter, sink)

        gst.element_link_many(conv, rsmpl, capsfilter, sink)

        self.mainloop = gobject.MainLoop()
        self.pipeline.set_state(gst.STATE_PLAYING)
コード例 #6
0
    def _addCommonAudioElements(self, audio_bin, output_stream):
        self.debug("Adding volume element")
        # add a volume element
        audio_bin.aconv = gst.element_factory_make("audioconvert",
                                                   "internal-aconv")
        audio_bin.ares = gst.element_factory_make("audioresample",
                                                  "internal-audioresample")
        # Fix audio jitter of up to 40ms
        audio_bin.arate = gst.element_factory_make("audiorate",
                                                   "internal-audiorate")
        audio_bin.arate.props.tolerance = 40 * gst.MSECOND
        audio_bin.volume = gst.element_factory_make("volume",
                                                    "internal-volume")
        audio_bin.add(audio_bin.volume, audio_bin.ares, audio_bin.aconv,
                      audio_bin.arate)
        #if child_bin:
        #    gst.element_link_many(audio_bin.aconv, audio_bin.ares, audio_bin.arate, audio_bin.child, audio_bin.volume)
        #    audio_bin.child.sync_state_with_parent()
        #else:
        gst.element_link_many(audio_bin.aconv, audio_bin.ares, audio_bin.arate,
                              audio_bin.volume)

        audio_bin.aconv.sync_state_with_parent()
        audio_bin.ares.sync_state_with_parent()
        audio_bin.arate.sync_state_with_parent()
        audio_bin.volume.sync_state_with_parent()
コード例 #7
0
    def _init_video_sink(self):
        self.bin = gst.Bin()
        videoscale = gst.element_factory_make('videoscale')
        self.bin.add(videoscale)
        pad = videoscale.get_pad('sink')
        ghostpad = gst.GhostPad('sink', pad)
        self.bin.add_pad(ghostpad)
        videoscale.set_property('method', 0)

        caps_string = 'video/x-raw-yuv, '
        r = self.videowidget.get_allocation()
        if r.width > 500 and r.height > 500:
            # Sigh... xvimagesink on the XOs will scale the video to fit
            # but ximagesink in Xephyr does not.  So we live with unscaled
            # video in Xephyr so that the XO can work right.
            w = 480
            h = float(w) / float(float(r.width) / float(r.height))
            caps_string += 'width=%d, height=%d' % (w, h)
        else:
            caps_string += 'width=480, height=360'

        caps = gst.Caps(caps_string)
        self.filter = gst.element_factory_make('capsfilter', 'filter')
        self.bin.add(self.filter)
        self.filter.set_property('caps', caps)

        conv = gst.element_factory_make('ffmpegcolorspace', 'conv')
        self.bin.add(conv)
        videosink = gst.element_factory_make('autovideosink')
        self.bin.add(videosink)
        gst.element_link_many(videoscale, self.filter, conv, videosink)
        self.player.set_property('video-sink', self.bin)
コード例 #8
0
    def constructPipeline(self):
        """
        Build the GStreamer pipeline, add and link various
        GStreamer elements.
        """
        self.pipeline = gst.Pipeline()
        self.fakesink = gst.element_factory_make("fakesink")
        filesrc = gst.element_factory_make("filesrc")

        filesrc.set_property("location", self.inFileLocation)

        autoaudiosink = gst.element_factory_make("autoaudiosink")

        self.decodebin = gst.element_factory_make("decodebin")

        self.audioconvert = gst.element_factory_make("audioconvert")

        self.encoder = gst.element_factory_make("lame", "mp3_encoder")

        self.filesink = gst.element_factory_make("filesink")
        self.filesink.set_property("location", self.outFileLocation)

        self.pipeline.add(filesrc, self.decodebin, self.audioconvert,
                          self.encoder, self.fakesink)

        gst.element_link_many(filesrc, self.decodebin)
        gst.element_link_many(self.audioconvert, self.encoder, self.fakesink)
コード例 #9
0
    def extractAudio(self):
        """
        Extract a piece of audio from the input audio.
        @see: self.run() which calls this method.
        """
        if not self.seek_done:
            time.sleep(0.1)
            self.duration = (self.pipeline.query_duration(
                gst.FORMAT_TIME, None)[0])
            self.duration = self.duration / gst.SECOND

            if self.start_time > self.duration:
                print(
                    "\n start time specified"
                    " is more than the total audio duration"
                    " resetting the start time to 0 sec")
                self.start_time = 0.0

            self.pipeline.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH,
                                      self.start_time * gst.SECOND)

            self.pipeline.set_state(gst.STATE_PAUSED)
            self.seek_done = True
            self.pipeline.remove(self.fakesink)

            self.pipeline.add(self.filesink)
            gst.element_link_many(self.encoder, self.filesink)
            self.pipeline.set_state(gst.STATE_PLAYING)

        time.sleep(0.1)
        try:
            self.position = self.pipeline.query_position(
                gst.FORMAT_TIME, None)[0]
            self.position = self.position / gst.SECOND
        except gst.QueryError:
            # The pipeline has probably reached
            # the end of the audio, (and thus has 'reset' itself.)
            if self.duration is None:
                self.error_msg = ("\n Error cutting the audio file."
                                  " Unable to determine the audio duration.")
                self.pipeline.set_state(gst.STATE_NULL)
                self.is_playing = False
            if (self.position <= self.duration and self.position >
                (self.duration - 10)):
                # Position close to the end of file.
                # Do nothing to avoid a possible traceback.
                #The audio cutting should work
                pass
            else:
                self.error_msg = " Error cutting the audio file"
                self.pipeline.set_state(gst.STATE_NULL)
                self.is_playing = False

        if not self.end_time is None:
            if self.position >= self.end_time:
                self.pipeline.set_state(gst.STATE_NULL)
                self.is_playing = False

        if self.verbose_mode:
            print "\n Current play time: =", self.position
コード例 #10
0
    def on_realize(self, widget, data=None):
        # xid must be retrieved first in GUI-thread and before creating pipeline to prevent racing conditions
        self.movie_xid = self.movie_window.window.xid
        # The pipeline
        self.player = gst.Pipeline()

        # Create bus and connect several handlers
        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)

        self.audio_composition = gst.element_factory_make(
            "gnlcomposition", "audio-composition")
        self.video_composition = gst.element_factory_make(
            "gnlcomposition", "video-composition")

        # Create sinks
        self.audiosink = gst.element_factory_make('autoaudiosink')
        self.videosink = gst.element_factory_make('autovideosink')

        # pipeline elements
        self.audioconvert = gst.element_factory_make('audioconvert')
        self.audioresample = gst.element_factory_make('audioresample')
        self.videoscale = gst.element_factory_make('videoscale')
        self.ffmpegcolorspace = gst.element_factory_make('ffmpegcolorspace')

        # Connect handler for 'pad-added' signal
        def on_pad_added(element, pad, sink_pad):
            caps = pad.get_caps()
            name = caps[0].get_name()

            if name == 'video/x-raw-rgb':
                if not sink_pad.is_linked():  # Only link once
                    pad.link(sink_pad)

            elif 'audio/x-raw-float' or name == 'audio/x-raw-int':
                if not sink_pad.is_linked():  # Only link once
                    pad.link(sink_pad)

        self.key_seek = KeySeekElement.KeySeekElement()
        self.audio_composition.connect('pad-added', on_pad_added,
                                       self.key_seek.get_pad('keyseek-sink'))
        self.video_composition.connect('pad-added', on_pad_added,
                                       self.key_seek.get_pad('secondary-sink'))

        # Add elements to pipeline
        self.player.add(self.audio_composition, self.audioconvert,
                        self.audioresample, self.audiosink,
                        self.video_composition, self.key_seek,
                        self.ffmpegcolorspace, self.videoscale, self.videosink)
        self.key_seek.get_pad('secondary-src').link(
            self.ffmpegcolorspace.get_pad('sink'))
        gst.element_link_many(self.ffmpegcolorspace, self.videoscale,
                              self.videosink)
        self.key_seek.get_pad('keyseek-src').link(
            self.audioconvert.get_pad('sink'))
        gst.element_link_many(self.audioconvert, self.audioresample,
                              self.audiosink)
コード例 #11
0
    def setupFadeBin(self):
        """
        Creates a GStreamer bin element and adds
        elements necessary for fade out effect implementation
        Also creates and adds ghost pads.
        @see: self.addFadingEffect()
        """
        self.audioconvert = gst.element_factory_make("audioconvert")
        self.volume = gst.element_factory_make("volume")
        self.audioconvert2 = gst.element_factory_make("audioconvert")

        self.fadeBin = gst.element_factory_make("bin", "fadeBin")
        self.fadeBin.add(self.audioconvert, self.volume, self.audioconvert2)

        gst.element_link_many(self.audioconvert, self.volume,
                              self.audioconvert2)

        #Create Ghost pads for fadeBin
        sinkPad = self.audioconvert.get_pad("sink")
        self.fadeBinSink = gst.GhostPad("sink", sinkPad)
        self.fadeBinSrc = gst.GhostPad("src",
                                       self.audioconvert2.get_pad("src"))

        self.fadeBin.add_pad(self.fadeBinSink)
        self.fadeBin.add_pad(self.fadeBinSrc)
コード例 #12
0
    def start(self, input_dir, output_file):
        '''
        Process the input files and output the video file
        '''
        self.normalize_images(input_dir)
        no_files = len(glob.glob(os.path.join(input_dir, '*.jpg')))
        if self.verbose:
            logging.debug('Number of files to encode as video: %s', no_files)

        pipeline = gst.Pipeline("pipeline")

        source = self.get_element("multifilesrc")
        source_location = os.path.join(input_dir, "%04d.jpg")
        if self.verbose:
            logging.debug("Source location: %s", source_location)
        source.set_property('location', source_location)
        source.set_property('index', 1)
        source_caps = gst.Caps()
        source_caps.append('image/jpeg,framerate=(fraction)4/1')
        source.set_property('caps', source_caps)

        decoder = self.get_element("jpegdec")

        # Attempt to auto detect the chosen encoder/mux based on output_file
        encoder = None
        container = None

        for container_name in self.CONTAINER_ENCODER_MAPPING:
            if output_file.endswith('.%s' % container_name):

                enc_name = self.CONTAINER_ENCODER_MAPPING[container_name]
                enc_name_gst = self.ENCODER_MAPPING[enc_name]
                encoder = self.get_element(enc_name_gst)

                cont_name_gst = self.CONTAINER_MAPPING[container_name]
                container = self.get_element(cont_name_gst)

        # If auto detection fails, choose from the list of preferred codec/mux
        if encoder is None:
            encoder = self.get_element(self.get_encoder_name())
        if container is None:
            container = self.get_element(self.get_container_name())

        output = self.get_element("filesink")
        output.set_property('location', output_file)

        pipeline.add_many(source, decoder, encoder, container, output)
        gst.element_link_many(source, decoder, encoder, container, output)

        pipeline.set_state(gst.STATE_PLAYING)
        while True:
            if source.get_property('index') <= no_files:
                if self.verbose:
                    logging.debug("Currently processing image number: %s",
                                  source.get_property('index'))
                time.sleep(1)
            else:
                break
        time.sleep(3)
        pipeline.set_state(gst.STATE_NULL)
コード例 #13
0
ファイル: encoders.py プロジェクト: mmilkin/jukebox
    def encode(self):
        import pygst
        pygst.require('0.10')
        import gst

        self.encoder = gst.Pipeline('encoder')

        decodebin = gst.element_factory_make('uridecodebin', 'uridecodebin')
        uri = yield self.song.get_uri()
        decodebin.set_property('uri', uri)
        decodebin.connect('pad-added', self.on_new_pad)

        audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')

        lame = gst.element_factory_make('lamemp3enc', 'lame')
        lame.set_property('quality', 1)

        sink = gst.element_factory_make('appsink', 'appsink')
        sink.set_property('emit-signals', True)
        sink.connect('new-buffer', self.data_ready)

        self.encoder.add(decodebin, audioconvert, lame, sink)
        gst.element_link_many(audioconvert, lame, sink)

        self.encoder.set_state(gst.STATE_PAUSED)

        bus = self.encoder.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_message)

        result = yield self.done
        defer.returnValue(result)
コード例 #14
0
ファイル: dvenc.py プロジェクト: lucasa/landell-fgdp
 def __init__(self, type):
     Encoder.__init__(self, type)
     print "dv"
     ffmux = gst.element_factory_make("ffmux_dv", "ffmux")
     self.add(ffmux)
     if type & INPUT_TYPE_AUDIO:
         audioconvert = gst.element_factory_make(
             "audioconvert", "audioconvert"
         )
         self.add(audioconvert)
         queue_audio = gst.element_factory_make(
                 "queue", "queue_audio_enc"
         )
         self.add(queue_audio)
         gst.element_link_many(
                 audioconvert, queue_audio, ffmux
         )
         self.audio_pad.set_target(audioconvert.sink_pads().next())
     if type & INPUT_TYPE_VIDEO:
         dvenc = gst.element_factory_make("ffenc_dvvideo", "dvenc")
         self.add(dvenc)
         queue_video = gst.element_factory_make(
                 "queue", "queue_video_enc"
         )
         self.add(queue_video)
         gst.element_link_many(dvenc, queue_video, ffmux)
         self.video_pad.set_target(dvenc.sink_pads().next())
     self.source_pad.set_target(ffmux.src_pads().next())
コード例 #15
0
	def __init__ (self, p, uri, uri2, vol = 1.0):
		gst.Bin.__init__(self)
		self.uri2 = uri2

#		bus = p.get_bus()
#		bus.add_signal_watch()
#		bus.connect("message", self.get_message)

		self.gnomevfssrc = gst.element_factory_make("gnomevfssrc")
		if checkURL(uri):
			self.gnomevfssrc.set_property("location", uri)
		else:
			self.gnomevfssrc.set_property("location", uri2)
			
		mad = gst.element_factory_make("mad")
		audioconvert = gst.element_factory_make("audioconvert")
		audioresample = gst.element_factory_make("audioresample")
		self.current_vol = vol
		self.target_vol = vol
		self.volume = gst.element_factory_make("volume")
		self.volume.set_property("volume", self.current_vol)
		self.add(self.gnomevfssrc, mad, audioconvert,
			audioresample, self.volume)
		gst.element_link_many(self.gnomevfssrc, mad,
			audioconvert, audioresample, self.volume)
		pad = self.volume.get_pad("src")
		ghostpad = gst.GhostPad("src", pad)
		self.add_pad(ghostpad)
コード例 #16
0
ファイル: Gstreamer.py プロジェクト: Happy-Ferret/screenlets
    def __init__(self):
        self.player = gst.element_factory_make("playbin", "player")
        fakesink = gst.element_factory_make('fakesink', "my-fakesink")
        self.player.set_property("video-sink", fakesink)
        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect('message', self.on_message)

        self.player = gst.Pipeline("player")
        source = gst.element_factory_make("filesrc", "file-source")
        self.player.add(source)
        demuxer = gst.element_factory_make("oggdemux", "demuxer")
        self.player.add(demuxer)
        demuxer.connect("pad-added", self.demuxer_callback)
        self.audio_decoder = gst.element_factory_make("vorbisdec",
                                                      "vorbis-decoder")
        self.player.add(self.audio_decoder)
        audioconv = gst.element_factory_make("audioconvert", "converter")
        self.player.add(audioconv)
        audiosink = gst.element_factory_make("autoaudiosink", "audio-output")
        self.player.add(audiosink)

        gst.element_link_many(source, demuxer)
        gst.element_link_many(self.audio_decoder, audioconv, audiosink)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)
コード例 #17
0
	def __init__(self, message_callback, sync_message_callback):
		self.player = gst.element_factory_make("playbin2", "player")
		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.enable_sync_message_emission()
		bus.connect("message", self._on_message)
		bus.connect("sync-message::element", self._on_sync_message)

		timeoverlay = gst.element_factory_make("timeoverlay")
		colorspace = gst.element_factory_make('ffmpegcolorspace', 'color_pis')
		custom = CustomElement()
		autovideosink = gst.element_factory_make("autovideosink")

		my_bin = gst.Bin("my-bin")
		my_bin.add(timeoverlay, colorspace, custom, autovideosink)
		gst.element_link_many(timeoverlay, colorspace, custom, autovideosink)

		timeoverlay_video_sink_pad = timeoverlay.get_pad("video_sink")
		ghostpad_sink = gst.GhostPad("sink", timeoverlay_video_sink_pad)
		my_bin.add_pad(ghostpad_sink)


		# Because playbin2 is a pipeline, we set our bin as the video sink
		self.player.set_property("video-sink", my_bin)

		self.message_callback = message_callback
		self.sync_message_callback = sync_message_callback
コード例 #18
0
ファイル: operation.py プロジェクト: ironss/pitivi-tweaking
    def _makeBin(self, *args):
        b = gst.Bin()
        idt = gst.element_factory_make("identity", "single-segment")
        idt.props.single_segment = True
        idt.props.silent = True
        aconv = gst.element_factory_make("audioconvert", "aconv")
        ares = gst.element_factory_make("audioresample", "ares")
        arate = gst.element_factory_make("audiorate", "arate")
        b.add(idt, aconv, ares, arate)
        gst.element_link_many(idt, aconv, ares, arate)

        gsink = gst.GhostPad("sink", idt.get_pad("sink"))
        gsink.set_active(True)
        b.add_pad(gsink)

        # if we have an output stream specified, we add a capsfilter
        if len(self.output_streams):
            cfilter = gst.element_factory_make("capsfilter")
            cfilter.props.caps = self.output_streams[0].caps
            b.add(cfilter)
            arate.link(cfilter)

            gsrc = gst.GhostPad("src", cfilter.get_pad("src"))
        else:
            gsrc = gst.GhostPad("src", arate.get_pad("src"))

        gsrc.set_active(True)
        b.add_pad(gsrc)
        return b
コード例 #19
0
	def decoder_pad_added(self, decoder, pad):
		caps_string = pad.get_caps().to_string()
		if caps_string.startswith('video'):
			pad.link(self.video_input_queue.get_pad('sink'))
			if caps_string.startswith('video/x-h264'):
				#h264parse = gst.element_factory_make('h264parse', 'h264parse')
				#h264parse.set_property('output-format', 1)
				#self.transcoder.add(h264parse)
				#gst.element_link_many(self.video_input_queue, h264parse, self.video_output_queue, self.muxer)
				gst.element_link_many(self.video_input_queue, self.video_output_queue, self.muxer)
				#h264parse.set_state(gst.STATE_PLAYING)
			else:
				video_encoder = gst.element_factory_make('ffenc_mpeg4', 'video-encoder')
				video_encoder.set_property('bitrate', (2048*1000))
				self.transcoder.add(video_encoder)
				gst.element_link_many(self.video_input_queue, video_encoder, self.video_output_queue, self.muxer)
				video_encoder.set_state(gst.STATE_PLAYING)
		elif caps_string.startswith('audio'):
			pad.link(self.audio_input_queue.get_pad('sink'))
			if caps_string.startswith('audio/x-ac3'):
				ac3parse = gst.element_factory_make('ac3parse', 'ac3parse')
				self.transcoder.add(ac3parse)
				gst.element_link_many(self.audio_input_queue, ac3parse, self.audio_output_queue, self.muxer)
				ac3parse.set_state(gst.STATE_PLAYING)
			elif caps_string.startswith('audio/mpeg'):
				gst.element_link_many(self.audio_input_queue, self.audio_output_queue, self.muxer)
			else:
				audioconvert = gst.element_factory_make('audioconvert', 'audioconvert')
				audio_encoder = gst.element_factory_make('ffenc_mp2', 'audio-encoder')
				self.transcoder.add(audioconvert, audio_encoder)
				gst.element_link_many(self.audio_input_queue, audioconvert, audio_encoder, self.audio_output_queue, self.muxer)
				audioconvert.set_state(gst.STATE_PLAYING)
				audio_encoder.set_state(gst.STATE_PLAYING)
コード例 #20
0
ファイル: operation.py プロジェクト: ironss/pitivi-tweaking
    def _makeBin(self, *args):
        b = gst.Bin()
        idt = gst.element_factory_make("identity", "single-segment")
        idt.props.single_segment = True
        idt.props.silent = True
        csp = gst.element_factory_make("ffmpegcolorspace", "csp")
        vrate = gst.element_factory_make("videorate", "vrate")

        b.add(idt, csp, vrate)
        gst.element_link_many(idt, csp, vrate)

        gsink = gst.GhostPad("sink", idt.get_pad("sink"))
        gsink.set_active(True)
        b.add_pad(gsink)

        # if we have an output stream specified, we add a capsfilter
        vscale = SmartVideoScale()
        vscale.set_caps(self.output_streams[0].caps)
        b.add(vscale)
        vrate.link(vscale)
        self.debug("output_streams:%d", len(self.output_streams))
        if len(self.output_streams) and self.output_streams[0].caps.is_fixed():
            idt = gst.element_factory_make("capsfilter")
            idt.props.caps = self.output_streams[0].caps
            b.add(idt)
            vscale.link(idt)

            gsrc = gst.GhostPad("src", idt.get_pad("src"))
        else:
            gsrc = gst.GhostPad("src", vscale.get_pad("src"))

        gsrc.set_active(True)
        b.add_pad(gsrc)
        return b
コード例 #21
0
    def _makeBin(self, *args):
        b = gst.Bin()
        idt = gst.element_factory_make("identity", "single-segment")
        idt.props.single_segment = True
        aconv = gst.element_factory_make("audioconvert", "aconv")
        ares = gst.element_factory_make("audioresample", "ares")
        arate = gst.element_factory_make("audiorate", "arate")
        b.add(idt, aconv, ares, arate)
        gst.element_link_many(idt, aconv, ares, arate)

        gsink = gst.GhostPad("sink", idt.get_pad("sink"))
        gsink.set_active(True)
        b.add_pad(gsink)

        # if we have an output stream specified, we add a capsfilter
        if len(self.output_streams):
            cfilter = gst.element_factory_make("capsfilter")
            cfilter.props.caps = self.output_streams[0].caps
            b.add(cfilter)
            arate.link(cfilter)

            gsrc = gst.GhostPad("src", cfilter.get_pad("src"))
        else:
            gsrc = gst.GhostPad("src", arate.get_pad("src"))

        gsrc.set_active(True)
        b.add_pad(gsrc)
        return b
コード例 #22
0
    def add_videofile(self, filepath, alpha_value):
        src = gst.element_factory_make('filesrc')
	#src.set_property('location', '/home/hero/Videos/veejay/test.avi')
	#filepath = ('/home/hero/Videos/veejay/test.avi')
        src.props.location = args[0]
        decodebin = gst.element_factory_make('decodebin2')
        ffcol = gst.element_factory_make("ffmpegcolorspace")

        vscale = gst.element_factory_make('videoscale')

        alpha = gst.element_factory_make('alpha')
        alpha.props.alpha = alpha_value

        def pad_added(elem, pad, target):
            print "pad_added", pad, target
            if str(pad.get_caps()).startswith('video/'):
                elem.link(target)
                src.seek_simple(gst.FORMAT_TIME, gst.SEEK_FLAG_SEGMENT, 0)

        decodebin.connect('pad-added', pad_added, vscale)

        self.add(src, decodebin, ffcol, vscale, alpha)
        src.link(decodebin)
        gst.element_link_many(vscale, ffcol, alpha, self.mixer)

        return (src, alpha)
コード例 #23
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def __init__(self, player, pre_elems=[]):
        gst.Bin.__init__(self, name='mainbin-%s' % player._name)

        self.__player = player
        self._elements = pre_elems[:]

        self.pp = Postprocessing(player)
        self._elements.append(self.pp)

        self.tee = gst.element_factory_make("tee")
        self._elements.append(self.tee)

        #self.queue = gst.element_factory_make("queue")
        #self._elements.append(self.queue)

        self.add(*self._elements)
        gst.element_link_many(*self._elements)

        self.audio_sink = None
        self.__audio_sink_lock = threading.Lock()
        self.setup_audiosink()

        self.sinkpad = self._elements[0].get_static_pad("sink")
        self.add_pad(gst.GhostPad('sink', self.sinkpad))

        self.sinkqueue = gst.element_factory_make("queue")
        self.sinkhandler = SinkHandler(player, 'playback_audio_sink')
        self.add(self.sinkhandler)
        self.add(self.sinkqueue)
        gst.element_link_many(self.tee, self.sinkqueue, self.sinkhandler)
コード例 #24
0
ファイル: cli_try.py プロジェクト: gsy/gmusic
    def __init__(self):
        
        gobject.GObject.__init__(self)        
        self.state = 0
        self.player = gst.Pipeline("stream-player")

        self.filesrc = gst.element_factory_make("filesrc", "file-source")
        self.preroll_queue = gst.element_factory_make("queue", "preroll-queue")
        self.decoder = gst.element_factory_make("decodebin2", "decoder")
        self.decoder.connect("new-decoded-pad", self.onDynamicPad)
        self.convertor = gst.element_factory_make("audioconvert", "audioconvert")
        self.rgvolume = gst.element_factory_make("rgvolume", "rgvolume")
        self.audioresample = gst.element_factory_make("audioresample", "audioresample")
        self.queue = gst.element_factory_make("queue", "queue")
        self.volume = gst.element_factory_make("volume", "volume")
        self.sink = gst.element_factory_make("alsasink", "sink")

        self.player.add_many(self.filesrc, self.preroll_queue, self.decoder, self.convertor, self.rgvolume, self.audioresample, self.queue, self.volume, self.sink)
        
        gst.element_link_many(self.filesrc, self.preroll_queue,
                              self.decoder)
        
        gst.element_link_many(self.convertor, self.rgvolume,
                              self.audioresample, self.queue,
                              self.volume)
        
        self.ghost_src_pad = gst.GhostPad("src", self.volume.get_pad("src"))
コード例 #25
0
ファイル: tee2.py プロジェクト: sbrookfield/soundblizzard
	def __init__(self):
		self.pipeline = gst.Pipeline()
		self.player = gst.element_factory_make("playbin", "player")
		self.vis = gst.element_factory_make("goom", "vis")
		self.videosink = gst.element_factory_make("tee", 'vidtee')
		self.audiosink = gst.element_factory_make("autoaudiosink", 'audiosink')
		self.aqueue = gst.element_factory_make("queue", 'aqueue')
		self.bqueue = gst.element_factory_make("queue", 'bqueue')
		self.avidsink = gst.element_factory_make('autovideosink', 'avidsink')
		self.bvidsink = gst.element_factory_make('autovideosink', 'bvidsink')
		self.acolorspace = gst.element_factory_make("ffmpegcolorspace","acolor")
		self.bcolorspace = gst.element_factory_make("ffmpegcolorspace","bcolor")
		#self.pipeline.add(self.acolorspace, self.bcolorspace, self.player, self.vis, self.videosink, self.audiosink, self.aqueue, self.bqueue, self.avidsink, self.bvidsink)
		self.pipeline.add(self.videosink, self.aqueue, self.bqueue, self.acolorspace, self.bcolorspace, self.avidsink, self.bvidsink)
		self.videosink.link(self.aqueue)
		self.videosink.link(self.bqueue)
		gst.element_link_many(self.aqueue, self.acolorspace, self.avidsink)
		gst.element_link_many(self.bqueue, self.bcolorspace, self.bvidsink)
		self.player.set_property("vis-plugin", self.vis)
		#self.player.set_property("video-sink", self.pipeline)
		self.player.set_property("audio-sink", self.audiosink)
		self.bus = self.player.get_bus()
		#self.bus.add_signal_watch()
		#self.bus.enable_sync_message_emission()
		#self.bus.connect("message", self.on_message)
		print self.player
コード例 #26
0
ファイル: gstreamer.py プロジェクト: thechronos/freeseer
    def _set_audio_encoder(self):
        '''
        Sets the audio encoder pipeline
        '''
        
        audioenc_queue = gst.element_factory_make('queue',
                                                        'audioenc_queue')
        audioenc_convert = gst.element_factory_make('audioconvert',
                                                        'audioenc_convert')
        audioenc_level = gst.element_factory_make('level', 'audioenc_level')
        audioenc_level.set_property('interval', 20000000)
        audioenc_codec = gst.element_factory_make(self.recording_audio_codec,
                                                        'audioenc_codec')

        # create a VorbisTag element and merge tags from tag list
        audioenc_tags = gst.element_factory_make("vorbistag", "audioenc_tags")

        # set tag merge mode to GST_TAG_MERGE_REPLACE
        merge_mode = gst.TagMergeMode.__enum_values__[2]

        audioenc_tags.merge_tags(self.tags, merge_mode)
        audioenc_tags.set_tag_merge_mode(merge_mode)
        self.player.add(audioenc_queue,
                        audioenc_convert,
                        audioenc_level,
                        audioenc_codec,
                        audioenc_tags)

        gst.element_link_many(self.audio_tee,
                              audioenc_queue,
                              audioenc_convert,
                              audioenc_level,
                              audioenc_codec,
                              audioenc_tags,
                              self.mux)
コード例 #27
0
ファイル: PlayingAudio.py プロジェクト: sumchege/vdwll
    def constructPipeline(self):
        if self.use_parse_launch:
            myPipelineString = (
            "filesrc location=C:/AudioFiles/my_music.mp3 "
            "! decodebin ! audioconvert ! autoaudiosink")
            self.player = gst.parse_launch(myPipelineString)
        else:
            # Create the pipeline instance
            self.player = gst.Pipeline()

            # Define pipeline elements
            self.filesrc = gst.element_factory_make("filesrc")

            self.filesrc.set_property("location", "C:/AudioFiles/my_music.mp3")

            # Note: the decodebin signal is connected in self.connect_signals()
            self.decodebin = gst.element_factory_make("decodebin","decodebin")

            self.audioconvert = gst.element_factory_make("audioconvert",
                                                        "audioconvert")

            self.audiosink = gst.element_factory_make("autoaudiosink",
                                                      "a_a_sink")

            # Add elements to the pipeline
            self.player.add(self.filesrc, self.decodebin,
                            self.audioconvert, self.audiosink)

            # Link elements in the pipeline.
            gst.element_link_many(self.filesrc, self.decodebin)
            gst.element_link_many(self.audioconvert, self.audiosink)
コード例 #28
0
ファイル: pipeline.py プロジェクト: machrider18/playitslowly
    def __init__(self, sink):
        gst.Pipeline.__init__(self)
        try:
            self.playbin = gst.element_factory_make("playbin2")
        except gst.ElementNotFoundError:
            self.playbin = gst.element_factory_make("playbin")
        self.add(self.playbin)

        bin = gst.Bin("speed-bin")
        try:
            self.speedchanger = gst.element_factory_make("pitch")
        except gst.ElementNotFoundError:
            mygtk.show_error(
                _(u"You need to install the gstreamer soundtouch elements for "
                  "play it slowly to. They are part of gstreamer-plugins-bad. Consult the "
                  "README if you need more information.")).run()
            raise SystemExit()

        bin.add(self.speedchanger)

        self.audiosink = sink

        bin.add(self.audiosink)
        convert = gst.element_factory_make("audioconvert")
        bin.add(convert)
        gst.element_link_many(self.speedchanger, convert)
        gst.element_link_many(convert, self.audiosink)
        sink_pad = gst.GhostPad("sink", self.speedchanger.get_pad("sink"))
        bin.add_pad(sink_pad)
        self.playbin.set_property("audio-sink", bin)
        bus = self.playbin.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)

        self.eos = lambda: None
コード例 #29
0
 def _restartSourceBin(self):
     self._successiveUnderruns += 1
     if self._successiveUnderruns > 3:
         self._debugger.ddebug("Too many underruns.")
         sys.exit(1)
     gst.element_unlink_many(self._sourceBin, self._sinkBin)
     self._sourceBin.set_state(gst.STATE_NULL)
     self._sinkBin.set_state(gst.STATE_READY)
     self._pipeline.remove(self._sourceBin)
     self._sourceBin = None
     self._debug(
         "Attempting to recover from video loss: Stopping source pipeline and waiting 5s..."
     )
     time.sleep(5)
     self._debug("Restarting source pipeline...")
     self._sourceBin = self._createSourceBin()
     self._pipeline.add(self._sourceBin)
     gst.element_link_many(self._sourceBin, self._sinkBin)
     self._sourceBin.set_state(gst.STATE_PLAYING)
     self._sinkBin.set_state(gst.STATE_PLAYING)
     self._pipeline.set_state(gst.STATE_PLAYING)
     self._startTimestamp = None
     self._debug("Restarted source pipeline")
     self._underrunTimeout.start()
     # stop the timeout from running again
     return False
コード例 #30
0
ファイル: gstreamer.py プロジェクト: evgenyvinnik/freeseer
    def _set_video_encoder(self):
        videoenc_queue = gst.element_factory_make("queue", "videoenc_queue")
        videoenc_codec = gst.element_factory_make(self.recording_video_codec, "videoenc_codec")
        videoenc_codec.set_property("bitrate", self.recording_video_bitrate)

        self.player.add(videoenc_queue, videoenc_codec)
        gst.element_link_many(self.video_tee, videoenc_queue, videoenc_codec, self.mux)
    def __init__(self):
        """
        Used to play video received as a RTP Stream from the network
        """

        self.pipeline = gst.Pipeline("mypipeline")
        udp=gst.element_factory_make("udpsrc", "udp-packet-receiver")
        self.pipeline.add(udp)
        self.pipeline.get_by_name("udp-packet-receiver").set_property("port", 5001)
    
        caps = gst.Caps("application/x-rtp, media=(string)audio, clock-rate=44100, width=16, height=16, encoding-name=(string)L16,encoding-params=(string)1, channels=(int)1, channel-position=(int)1, payload=(int)96")
        filter = gst.element_factory_make("capsfilter", "filter")
        filter.set_property("caps", caps)
        self.pipeline.add(filter)
        
        jitter=gst.element_factory_make("gstrtpjitterbuffer", "jitter")
        self.pipeline.add(jitter)
        self.pipeline.get_by_name("jitter").set_property("do-lost", "true")
        
        rtpaudiodepayloader=gst.element_factory_make("rtpL16depay", "rtpdeaudio")
        self.pipeline.add(rtpaudiodepayloader)
                          
        conv = gst.element_factory_make("audioconvert", "converter")
        self.pipeline.add(conv)

        sink=gst.element_factory_make("alsasink", "Sink")
        self.pipeline.add(sink)
        self.pipeline.get_by_name("Sink").set_property("sync", "false")

        gst.element_link_many(udp, filter, jitter, rtpaudiodepayloader,conv, sink)


        self.pipeline.set_state(gst.STATE_PLAYING)
コード例 #32
0
ファイル: gstreamer.py プロジェクト: evgenyvinnik/freeseer
    def _set_audio_encoder(self):
        """
        Sets the audio encoder pipeline
        """

        audioenc_queue = gst.element_factory_make("queue", "audioenc_queue")
        audioenc_convert = gst.element_factory_make("audioconvert", "audioenc_convert")
        audioenc_level = gst.element_factory_make("level", "audioenc_level")
        audioenc_level.set_property("interval", 20000000)
        audioenc_codec = gst.element_factory_make(self.recording_audio_codec, "audioenc_codec")

        # create a VorbisTag element and merge tags from tag list
        audioenc_tags = gst.element_factory_make("vorbistag", "audioenc_tags")

        # set tag merge mode to GST_TAG_MERGE_REPLACE
        merge_mode = gst.TagMergeMode.__enum_values__[2]

        audioenc_tags.merge_tags(self.tags, merge_mode)
        audioenc_tags.set_tag_merge_mode(merge_mode)

        self.player.add(audioenc_queue, audioenc_convert, audioenc_level, audioenc_codec, audioenc_tags)

        gst.element_link_many(
            self.audio_tee, audioenc_queue, audioenc_convert, audioenc_level, audioenc_codec, audioenc_tags, self.mux
        )
コード例 #33
0
ファイル: Convert.py プロジェクト: rodbegbie/MetaRipper
def convert(flacfile, mp3file):
    print "Converting %s... " % flacfile
    src = gst.element_factory_make("filesrc", "src")
    src.set_property("location", flacfile)
    src_pad = src.get_pad("src")

    flac = gst.element_factory_make("flacdec", "decoder")
       
    mp3 = gst.element_factory_make("lame", "encoder")
    mp3.set_property("bitrate", 192)
    mp3.set_property("quality", 2)
    
    sink = gst.element_factory_make("filesink", "sink")
    sink.set_property("location", mp3file)

    bin = gst.Pipeline()
    bin.add_many(src,flac,mp3,sink)
    gst.element_link_many(src,flac,mp3,sink)
    bin.connect("error", error_cb)
    
    bin.set_state(gst.STATE_PAUSED)
    
    res = bin.set_state(gst.STATE_PLAYING);
    while bin.iterate():
        pass
    print "Done.\n"
コード例 #34
0
ファイル: Player.py プロジェクト: dwyerj878/tunes
 def __init__(self):
     self.logger = logging.getLogger('tunes.player')
     self.logger.debug("Creating Player")
     self.pipeline = gst.Pipeline("pipeline")
     self.volume = 50
     self.mf = None
     
     source = gst.element_factory_make("filesrc", "filesrc")
     decoder = gst.element_factory_make("decodebin", "decodebin")
     conv = gst.element_factory_make("audioconvert", "audioconvert")
     pitch = gst.element_factory_make("pitch", "pitch")
     volume = gst.element_factory_make("volume",  "volume")
     resample = gst.element_factory_make("audioresample", "audioresample")
     sink = gst.element_factory_make("autoaudiosink", "autoaudiosink")
     
     
     self.pipeline.add(source, decoder, conv, volume,  pitch,  resample,  sink)
     gst.element_link_many(source, decoder);
     gst.element_link_many(conv, pitch,  volume, resample,  sink)
     decoder.connect("new-decoded-pad", self.on_new_decoded_pad)
     self.pipeline.set_state(gst.STATE_PAUSED)
     
     self.bus = self.pipeline.get_bus()
     self.bus.add_signal_watch()
     self.bus.connect("message", self.on_message)
     self.logger.debug("Player created")
コード例 #35
0
ファイル: glive.py プロジェクト: yashagrawal3/record-activity
    def _create_videobin(self):
        queue = gst.element_factory_make("queue", "videoqueue")
        queue.set_property("max-size-time", 5000000000) # 5 seconds
        queue.set_property("max-size-bytes", 33554432) # 32mb
        queue.connect("overrun", self._log_queue_overrun)

        scale = gst.element_factory_make("videoscale", "vbscale")

        scalecapsfilter = gst.element_factory_make("capsfilter", "scalecaps")

        scalecaps = gst.Caps('video/x-raw-yuv,width=160,height=120')
        scalecapsfilter.set_property("caps", scalecaps)

        colorspace = gst.element_factory_make("ffmpegcolorspace", "vbcolorspace")

        enc = gst.element_factory_make("theoraenc", "vbenc")
        enc.set_property("quality", 16)

        mux = gst.element_factory_make("oggmux", "vbmux")

        sink = gst.element_factory_make("filesink", "vbfile")
        sink.set_property("location", os.path.join(Instance.instancePath, "output.ogg"))

        self._videobin = gst.Bin("videobin")
        self._videobin.add(queue, scale, scalecapsfilter, colorspace, enc, mux, sink)

        queue.link(scale)
        scale.link_pads(None, scalecapsfilter, "sink")
        scalecapsfilter.link_pads("src", colorspace, None)
        gst.element_link_many(colorspace, enc, mux, sink)

        pad = queue.get_static_pad("sink")
        self._videobin.add_pad(gst.GhostPad("sink", pad))
コード例 #36
0
ファイル: video_player.py プロジェクト: niavok/perroquet
    def __init__(self):

        self.player = gst.Pipeline()
        self.playbin = gst.element_factory_make("playbin2", "player")

        # Disable the subtitle display if there is embeded subtitles
        # (for example, in mkv files)
        #
        # Flags activates some things
        # (1 << 0) : video
        # (1 << 1) : audio
        # (1 << 4) : software volume
        #
        # The default value is 0, 1, 2, 4. (1 << 2) display the subtitles
        #
        # For more details, see the doc
        # http://www.gstreamer.net/data/doc/gstreamer/head/gst-plugins-base-plugins/html/gst-plugins-base-plugins-playbin2.html#GstPlayFlags
        # http://www.gstreamer.net/data/doc/gstreamer/head/gst-plugins-base-plugins/html/gst-plugins-base-plugins-playbin2.html#GstPlayBin2--flags
        self.playbin.set_property("flags", (1 << 0)|(1 << 1)|(1 << 4))
        self.playbin.set_property("current-audio", 0)


        self.player.add(self.playbin)
        self.logger = logging.Logger("VideoPlayer")
        self.logger.setLevel(defaultLoggingLevel)
        self.logger.addHandler(defaultLoggingHandler)
        #Audio
        audiobin = gst.Bin("audio-speed-bin")
        try:
            self.audiospeedchanger = gst.element_factory_make("pitch")
            self.canChangeSpeed = True
        except gst.ElementNotFoundError:
            self.logger.warn(_(u"You need to install the gstreamer soundtouch elements to "
                             "use slowly play feature."))
            self.canChangeSpeed = False

        #Try to use the pitch element only if it is available
        if self.canChangeSpeed and config.get("interface_use_speed_change"):
            audiobin.add(self.audiospeedchanger)

            self.audiosink = gst.element_factory_make("autoaudiosink")

            audiobin.add(self.audiosink)
            convert = gst.element_factory_make("audioconvert")
            audiobin.add(convert)
            gst.element_link_many(self.audiospeedchanger, convert, self.audiosink)
            sink_pad = gst.GhostPad("sink", self.audiospeedchanger.get_pad("sink"))
            audiobin.add_pad(sink_pad)
            self.playbin.set_property("audio-sink", audiobin)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.enable_sync_message_emission()
        bus.connect("message", self.on_message)
        bus.connect("sync-message::element", self.on_sync_message)

        self.time_format = gst.Format(gst.FORMAT_TIME)
        self.timeToSeek = -1
        self.speed = 1.0
        self.nextCallbackTime = -1
    def __init__(self):
        """
        This constructor builds the gstreamer pipeline that receives an RTP stream from the server and dumps it to my customized media player
        """

        self.pipeline = gst.Pipeline("mypipeline")
        
        udp=gst.element_factory_make("udpsrc", "udp-packet-receiver")
        self.pipeline.add(udp)
        self.pipeline.get_by_name("udp-packet-receiver").set_property("port", 5001)
        caps = gst.Caps("application/x-rtp, media=(string)audio, clock-rate=44100, width=16, height=16, encoding-name=(string)L16,encoding-params=(string)1, channels=(int)1, channel-position=(int)1, payload=(int)96")
        filter = gst.element_factory_make("capsfilter", "filter")
        filter.set_property("caps", caps)
        self.pipeline.add(filter)
        
        jitter=gst.element_factory_make("gstrtpjitterbuffer", "jitter")
        self.pipeline.add(jitter)
        self.pipeline.get_by_name("jitter").set_property("do-lost", "true")
        
        rtpaudiodepayloader=gst.element_factory_make("rtpL16depay", "rtpdeaudio")
        self.pipeline.add(rtpaudiodepayloader)
                          
        conv = gst.element_factory_make("audioconvert", "converter")
        self.pipeline.add(conv)

        sink= gst.element_factory_make("filesink", "file-sink")
        self.pipeline.add(sink)
        uri="./temp.mp3"
        self.pipeline.get_by_name("file-sink").set_property("location", uri)


        gst.element_link_many(udp, filter, jitter, rtpaudiodepayloader,conv, sink)


        self.pipeline.set_state(gst.STATE_PLAYING)
コード例 #38
0
ファイル: __init__.py プロジェクト: maurodx/sshow
def get_preview_backend(config, num_audio_tracks):
    backend = gst.Bin("backend")
    #audio_volume = gst.element_factory_make("volume","volume")

    video_caps = gst.element_factory_make("capsfilter")
    video_caps.props.caps = config.get_video_caps("I420", dict(border=0))
    mqueue = gst.element_factory_make("multiqueue")
    mqueue.props.max_size_time = 10 * gst.SECOND
    mqueue.props.max_size_bytes = 0
    mqueue.props.max_size_buffers = 0
    video_sink = gst.element_factory_make("autovideosink")
    audio_sel = gst.element_factory_make("input-selector")
    audio_sink = gst.element_factory_make("autoaudiosink")
    backend.add(video_caps, mqueue, video_sink, audio_sel, audio_sink)
    gst.element_link_many(video_caps, mqueue, video_sink)
    gst.element_link_many(audio_sel, mqueue, audio_sink)

    audio_caps = []
    for i in range(num_audio_tracks):
        caps = gst.element_factory_make("capsfilter")
        audio_caps.append(caps)
        caps.props.caps = config.get_audio_caps()
        backend.add(caps)
        caps.link(audio_sel)

    backend.add_pad(gst.GhostPad("video_sink", video_caps.get_pad("sink")))
    for i, sink in enumerate(audio_caps):
        backend.add_pad(
            gst.GhostPad("audio_sink%d" % (i, ), sink.get_pad("sink")))

    return backend
コード例 #39
0
ファイル: player.py プロジェクト: pymander/transcribe
 def __init__(self):
     gobject.GObject.__init__(self)
     
     self._rate = 1
     
     self.pipeline = gst.Pipeline('pipeline_main')
     self.audiosrc = gst.element_factory_make('filesrc', 'audio')
     self.decoder = gst.element_factory_make('decodebin', 'decoder')
     self.convert1 = gst.element_factory_make('audioconvert', 'convert1')
     self.resample1 = gst.element_factory_make('audioresample', 'resample1')
     self.volume1 = gst.element_factory_make('volume', 'volume')
     self.volume1.set_property('volume', 1)
     self.scaletempo = gst.element_factory_make('scaletempo',
                                                'scaletempo')
     self.convert2 = gst.element_factory_make('audioconvert', 'convert2')
     self.resample2 = gst.element_factory_make('audioresample', 'resample2')
     self.sink = gst.element_factory_make('autoaudiosink', 'sink')
     
     self.decoder.connect('new-decoded-pad', self.on_new_decoded_pad)
     
     self.pipeline.add(self.audiosrc, self.decoder, self.convert1,
             self.volume1, self.resample1, self.scaletempo, self.convert2,
             self.resample2, self.sink)
     self.audiosrc.link(self.decoder)
     gst.element_link_many(self.convert1, self.resample1, self.volume1, 
             self.scaletempo, self.convert2, self.resample2, self.sink)
     
     self.apad = self.convert1.get_pad('sink')
     
     bus = self.pipeline.get_bus()
     bus.add_signal_watch()
     bus.connect('message', self.on_message)
コード例 #40
0
ファイル: __init__.py プロジェクト: maurodx/sshow
def get_preview_backend(config, num_audio_tracks):
    backend = gst.Bin("backend")
    #audio_volume = gst.element_factory_make("volume","volume")

    video_caps = gst.element_factory_make("capsfilter")
    video_caps.props.caps = config.get_video_caps("I420", dict(border=0))
    mqueue = gst.element_factory_make("multiqueue")
    mqueue.props.max_size_time = 10 * gst.SECOND
    mqueue.props.max_size_bytes   = 0
    mqueue.props.max_size_buffers = 0
    video_sink  = gst.element_factory_make("autovideosink")
    audio_sel = gst.element_factory_make("input-selector")
    audio_sink  = gst.element_factory_make("autoaudiosink")
    backend.add(video_caps, mqueue, video_sink, audio_sel, audio_sink)
    gst.element_link_many(video_caps, mqueue, video_sink)
    gst.element_link_many(audio_sel, mqueue, audio_sink)

    audio_caps = []
    for i in range(num_audio_tracks):
        caps = gst.element_factory_make("capsfilter")
        audio_caps.append(caps)
        caps.props.caps = config.get_audio_caps()
        backend.add(caps)
        caps.link(audio_sel)

    backend.add_pad(gst.GhostPad("video_sink", video_caps.get_pad("sink")))
    for i,sink in enumerate(audio_caps):
        backend.add_pad(gst.GhostPad("audio_sink%d" % (i,), sink.get_pad("sink")))

    return backend
コード例 #41
0
ファイル: stbt.py プロジェクト: ekelly30/stb-tester
    def restart_source_bin(self):
        self.successive_underruns += 1
        if self.successive_underruns > 3:
            sys.stderr.write("Error: Video loss. Too many underruns.\n")
            sys.exit(1)

        gst.element_unlink_many(self.source_bin, self.sink_bin)
        self.source_bin.set_state(gst.STATE_NULL)
        self.sink_bin.set_state(gst.STATE_READY)
        self.pipeline.remove(self.source_bin)
        self.source_bin = None
        debug("Attempting to recover from video loss: "
              "Stopping source pipeline and waiting 5s...")
        time.sleep(5)

        debug("Restarting source pipeline...")
        self.source_bin = self.create_source_bin()
        self.pipeline.add(self.source_bin)
        gst.element_link_many(self.source_bin, self.sink_bin)
        self.source_bin.set_state(gst.STATE_PLAYING)
        self.pipeline.set_state(gst.STATE_PLAYING)
        debug("Restarted source pipeline")

        self.underrun_timeout.start()

        return False  # stop the timeout from running again
コード例 #42
0
ファイル: pipeline.py プロジェクト: andyhelp/playitslowly
    def __init__(self, sink):
        gst.Pipeline.__init__(self)
        self.playbin = gst.element_factory_make("playbin")
        self.add(self.playbin)

        bin = gst.Bin("speed-bin")

        self.speedchanger = gst.element_factory_make("pitch")
        bin.add(self.speedchanger)

        self.audiosink = sink

        bin.add(self.audiosink)
        convert = gst.element_factory_make("audioconvert")
        bin.add(convert)
        gst.element_link_many(self.speedchanger, convert)
        gst.element_link_many(convert, self.audiosink)
        sink_pad = gst.GhostPad("sink", self.speedchanger.get_pad("sink"))
        bin.add_pad(sink_pad)
        self.playbin.set_property("audio-sink", bin)
        bus = self.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_message)

        self.eos = lambda: None
        self.on_eos_cb = None
        self.on_error_cb = None
        self.encoder = pipeline_encoders['wav']
コード例 #43
0
    def _makeBin(self, *args):
        b = gst.Bin()
        idt = gst.element_factory_make("identity", "single-segment")
        idt.props.single_segment = True
        csp = gst.element_factory_make("ffmpegcolorspace", "csp")
        vrate = gst.element_factory_make("videorate", "vrate")

        b.add(idt, csp, vrate)
        gst.element_link_many(idt, csp, vrate)

        gsink = gst.GhostPad("sink", idt.get_pad("sink"))
        gsink.set_active(True)
        b.add_pad(gsink)

        # if we have an output stream specified, we add a capsfilter
        vscale = SmartVideoScale()
        vscale.set_caps(self.output_streams[0].caps)
        b.add(vscale)
        vrate.link(vscale)
        self.debug("output_streams:%d", len(self.output_streams))
        if len(self.output_streams) and self.output_streams[0].caps.is_fixed():
            idt = gst.element_factory_make("capsfilter")
            idt.props.caps = self.output_streams[0].caps
            b.add(idt)
            vscale.link(idt)

            gsrc = gst.GhostPad("src", idt.get_pad("src"))
        else:
            gsrc = gst.GhostPad("src", vscale.get_pad("src"))

        gsrc.set_active(True)
        b.add_pad(gsrc)
        return b
コード例 #44
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def _add_audiosink(self, audio_sink, buffer_position):
        '''Sets up the new audiosink and syncs it'''
        
        self.add(audio_sink)
        audio_sink.sync_state_with_parent()
        gst.element_link_many(self._elements[-1], audio_sink)

        if buffer_position is not None:
            
            # buffer position is the output from get_position. If set, we
            # seek to that position.
            
            # TODO: this actually seems to skip ahead a tiny bit. why?
            
            # Note! this is super important in paused mode too, because when
            #       we switch the sinks around the new sink never goes into
            #       the paused state because there's no buffer. This forces
            #       a resync of the buffer, so things still work.
            
            seek_event = gst.event_new_seek(1.0, gst.FORMAT_DEFAULT,
                gst.SEEK_FLAG_FLUSH, gst.SEEK_TYPE_SET,
                buffer_position[0],
                gst.SEEK_TYPE_NONE, 0)
            
            self.send_event(seek_event)
        
        self.audio_sink = audio_sink        
コード例 #45
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def _setup_finish(self, elem, blocked, state):
        for sink in self.added_sinks:
            queue = self.queuedict[sink.name]
            pad = queue.get_static_pad("sink").get_peer()
            if pad:
                self.tee.release_request_pad(pad)
            try:
                self.remove(queue)
                queue.set_state(gst.STATE_NULL)
            except gst.RemoveError:
                pass
            try:
                self.remove(sink)
                sink.set_state(gst.STATE_NULL)
            except gst.RemoveError:
                pass
        self.added_sinks = []

        for name, sink in self.sinks.iteritems():
            self.add(sink)
            queue = gst.element_factory_make("queue")
            self.add(queue)
            self.queuedict[sink.name] = queue

            gst.element_link_many(self.tee, queue, sink)

            self.added_sinks.append(sink)

        self.set_state(state)
        if blocked:
            self.sinkpad.set_blocked_async(False, lambda *args: False, state)
コード例 #46
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def _setup_finish(self, elem, blocked, state):
        for sink in self.added_sinks:
            queue = self.queuedict[sink.name]
            pad = queue.get_static_pad("sink").get_peer()
            if pad:
                self.tee.release_request_pad(pad)
            try:
                self.remove(queue)
                queue.set_state(gst.STATE_NULL)
            except gst.RemoveError:
                pass
            try:
                self.remove(sink)
                sink.set_state(gst.STATE_NULL)
            except gst.RemoveError:
                pass
        self.added_sinks = []

        for name, sink in self.sinks.iteritems():
            self.add(sink)
            queue = gst.element_factory_make("queue")
            self.add(queue)
            self.queuedict[sink.name] = queue

            gst.element_link_many(self.tee, queue, sink)

            self.added_sinks.append(sink)

        self.set_state(state)
        if blocked:
            self.sinkpad.set_blocked_async(False, lambda *args: False, state)
コード例 #47
0
ファイル: filesrc.py プロジェクト: zhangzhe1103/gst-python
def main(args):
    if len(args) != 3:
        print 'Usage: %s input output' % (args[0])
        return -1
    
    bin = gst.Pipeline('pipeline')

    filesrc = FileSource('filesource')
    assert filesrc
    filesrc.set_property('location', args[1])
   
    filesink = gst.element_factory_make('filesink', 'sink')
    filesink.set_property('location', args[2])

    bin.add(filesrc, filesink)
    gst.element_link_many(filesrc, filesink)
    
    bin.set_state(gst.STATE_PLAYING);
    mainloop = gobject.MainLoop()

    def bus_event(bus, message):
        t = message.type
        if t == gst.MESSAGE_EOS:
            mainloop.quit()
        elif t == gst.MESSAGE_ERROR:
            err, debug = message.parse_error()
            print "Error: %s" % err, debug
            mainloop.quit()           
        return True
    bin.get_bus().add_watch(bus_event)

    mainloop.run()
    bin.set_state(gst.STATE_NULL)
コード例 #48
0
ファイル: pipe.py プロジェクト: thiblahute/exaile
    def __init__(self, player, pre_elems=[]):
        gst.Bin.__init__(self, name='mainbin-%s' % player._name)
        
        self.__player = player
        self._elements = pre_elems[:]

        self.pp = Postprocessing(player)
        self._elements.append(self.pp)

        self.tee = gst.element_factory_make("tee")
        self._elements.append(self.tee)

        #self.queue = gst.element_factory_make("queue")
        #self._elements.append(self.queue)

        self.add(*self._elements)
        gst.element_link_many(*self._elements)
        
        self.audio_sink = None
        self.__audio_sink_lock = threading.Lock()
        self.setup_audiosink()
        
        self.sinkpad = self._elements[0].get_static_pad("sink")
        self.add_pad(gst.GhostPad('sink', self.sinkpad))

        self.sinkqueue = gst.element_factory_make("queue")
        self.sinkhandler = SinkHandler(player, 'playback_audio_sink')
        self.add(self.sinkhandler)
        self.add(self.sinkqueue)
        gst.element_link_many(self.tee, self.sinkqueue, self.sinkhandler)
コード例 #49
0
ファイル: videofade.py プロジェクト: dparker18/Pitivi
    def __init__(self, position=0, duration=2 * gst.SECOND, fadefromblack=True):
        gst.Bin.__init__(self)
        self.incsp = gst.element_factory_make("ffmpegcolorspace", "incsp")
        self.outcsp = gst.element_factory_make("ffmpegcolorspace", "outcsp")
        self.alpha = gst.element_factory_make("alpha", "alpha")
        self.vmix = gst.element_factory_make("videomixer", "videomix")
        self.vmix.set_property("background", 1)
        self.add(self.incsp, self.alpha, self.vmix, self.outcsp)
        gst.element_link_many(self.incsp, self.alpha, self.vmix, self.outcsp)

        self._sinkpad = gst.GhostPad("sink", self.incsp.get_pad("sink"))
        self._sinkpad.set_active(True)
        self._srcpad = gst.GhostPad("src", self.outcsp.get_pad("src"))
        self._srcpad.set_active(True)

        self.add_pad(self._sinkpad)
        self.add_pad(self._srcpad)

        self.startposition = position
        self.duration = duration
        self.fadefromblack = fadefromblack

        self.alphacontrol = gst.Controller(self.alpha, "alpha")
        self.alphacontrol.set_interpolation_mode("alpha", gst.INTERPOLATE_LINEAR)

        self._resetControllerValues()
コード例 #50
0
    def __init__(self, uri, start, duration, fadein, fadeout, volume):
        gst.Bin.__init__(self)
        self.start = start
        self.duration = duration
        self.clip_volume = volume
        self.already_seeked = False
        self.fading = False
        self.gnomevfssrc = gst.element_factory_make("gnomevfssrc")
        self.gnomevfssrc.set_property("location", uri)
        self.wavparse = gst.element_factory_make("wavparse")
        self.audioconvert = gst.element_factory_make("audioconvert")
        self.audioresample = gst.element_factory_make("audioresample")
        self.audiopanorama = gst.element_factory_make("audiopanorama")
        self.volume = gst.element_factory_make("volume")
        self.controller = gst.Controller(self.volume, "volume")
        self.controller.set_interpolation_mode(
            "volume", gst.INTERPOLATE_LINEAR)
        self.controller.set("volume", 0, 0.0)
        self.controller.set("volume", start, 0.0)
        self.controller.set("volume", start + fadein, volume)
        self.controller.set("volume", start + duration - fadeout, volume)
        self.controller.set("volume", start + duration, 0.0)
        self.add(self.gnomevfssrc, self.wavparse, self.audioconvert,
            self.audioresample, self.audiopanorama, self.volume)
        gst.element_link_many(self.gnomevfssrc, self.wavparse)
        gst.element_link_many(self.audioconvert, self.audioresample,
            self.audiopanorama, self.volume)

        def on_pad(comp, pad):
            convpad = self.audioconvert.get_compatible_pad(pad, pad.get_caps())
            pad.link(convpad)
        self.wavparse.connect("pad-added", on_pad)
        self.pad = self.volume.get_pad("src")
        self.ghostpad = gst.GhostPad("src", self.pad)
        self.add_pad(self.ghostpad)
コード例 #51
0
	def __init__(self):
		self.player = gst.element_factory_make("playbin", "player")
		fakesink = gst.element_factory_make('fakesink', "my-fakesink")
		self.player.set_property("video-sink", fakesink)
		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.connect('message', self.on_message)

		self.player = gst.Pipeline("player")
		source = gst.element_factory_make("filesrc", "file-source")
		self.player.add(source)
		demuxer = gst.element_factory_make("oggdemux", "demuxer")
		self.player.add(demuxer)
		demuxer.connect("pad-added", self.demuxer_callback)
		self.audio_decoder = gst.element_factory_make("vorbisdec", "vorbis-decoder")
		self.player.add(self.audio_decoder)
		audioconv = gst.element_factory_make("audioconvert", "converter")
		self.player.add(audioconv)
		audiosink = gst.element_factory_make("autoaudiosink", "audio-output")
		self.player.add(audiosink)

		gst.element_link_many(source, demuxer)
		gst.element_link_many(self.audio_decoder, audioconv, audiosink)

		bus = self.player.get_bus()
		bus.add_signal_watch()
		bus.connect("message", self.on_message)
コード例 #52
0
    def __init__(self):
        settings = Gtk.Settings.get_default()
        settings.set_property("gtk-application-prefer-dark-theme", True)

        self.builder = Gtk.Builder()
        self.builder.add_from_file("lightbox.xml")
        self.builder.connect_signals(self)
        self.devices = self.builder.get_object("devices")
        self.device_combo = self.builder.get_object("device_combo")
        self.formats = self.builder.get_object("formats")
        self.udev_client = GUdev.Client.new(['video4linux'])

        self.player = gst.Pipeline(name='player')
        self.source = gst.element_factory_make('v4l2src', 'source')
        self.filter = gst.element_factory_make("capsfilter", "filter")
        self.flipper = gst.element_factory_make("videoflip", "flipper")
        sink = gst.element_factory_make('xvimagesink', 'sink')

        self.player.add(self.source)
        self.player.add(self.filter)
        self.player.add(self.flipper)
        self.player.add(sink)

        gst.element_link_many(self.source, self.filter, self.flipper, sink)

        bus = self.player.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self._on_message)
        bus.enable_sync_message_emission()
        bus.connect("sync-message::element", self._on_sync_message)

        self._pop_dev_timeout = 0
コード例 #53
0
    def constructVideoPipeline(self):
        """
        Define and links elements to build the video portion
        of the main pipeline.
        @see: self.constructPipeline()
        """
        # Autoconvert element for video processing
        self.autoconvert = gst.element_factory_make("autoconvert")
        self.videosink = gst.element_factory_make("autovideosink")

        # Set the capsfilter
        videocap = gst.Caps("video/x-raw-yuv")
        self.capsFilter = gst.element_factory_make("capsfilter")
        self.capsFilter.set_property("caps", videocap)

        # Converts the video from one colorspace to another
        self.colorSpace = gst.element_factory_make("ffmpegcolorspace")

        self.videobalance = gst.element_factory_make("videobalance")
        self.videobalance.set_property("brightness", 0.5)
        self.videobalance.set_property("contrast", 0.5)
        self.videobalance.set_property("saturation", 0.0)

        self.queue1 = gst.element_factory_make("queue")

        self.player.add(self.queue1, self.autoconvert, self.videobalance,
                        self.capsFilter, self.colorSpace, self.videosink)

        gst.element_link_many(self.queue1, self.autoconvert, self.videobalance,
                              self.capsFilter, self.colorSpace, self.videosink)
コード例 #54
0
ファイル: filesrc.py プロジェクト: rodbegbie/MetaRipper
def main(args):
    print 'This example is not finished yet.'
    return

    if len(args) != 3:
        print 'Usage: %s input output' % (args[0])
        return -1
    
    bin = gst.Pipeline('pipeline')

    filesrc = FileSource('filesource')
    #filesrc = gst.Element('filesrc', 'src')
    assert filesrc
    filesrc.set_property('location', args[1])
   
    filesink = gst.element_factory_make('filesink', 'sink')
    filesink.set_property('location', args[2])

    bin.add_many(filesrc, filesink)
    gst.element_link_many(filesrc, filesink)
    
    bin.set_state(gst.STATE_PLAYING);

    while bin.iterate():
        pass

    bin.set_state(gst.STATE_NULL)
コード例 #55
0
    def __init__(self, type):
        Encoder.__init__(self, type)
        self.oggmux = gst.element_factory_make("oggmux", "oggmux")
        self.add(self.oggmux)
        self.theoraenc = None
        self.vorbisenc = None

        if type & INPUT_TYPE_AUDIO:
            audioconvert = gst.element_factory_make("audioconvert",
                                                    "audioconvert")
            self.add(audioconvert)
            self.vorbisenc = gst.element_factory_make("vorbisenc", "vorbisenc")
            self.add(self.vorbisenc)
            queue_audio = gst.element_factory_make("queue", "queue_audio_enc")
            self.add(queue_audio)
            gst.element_link_many(audioconvert, self.vorbisenc, queue_audio,
                                  self.oggmux)
            self.audio_pad.set_target(audioconvert.sink_pads().next())
        if type & INPUT_TYPE_VIDEO:
            self.theoraenc = gst.element_factory_make("theoraenc", "theoraenc")
            self.add(self.theoraenc)
            queue_video = gst.element_factory_make("queue", "queue_video_enc")
            self.add(queue_video)
            gst.element_link_many(self.theoraenc, queue_video, self.oggmux)
            self.video_pad.set_target(self.theoraenc.sink_pads().next())

        self.source_pad.set_target(self.oggmux.src_pads().next())
コード例 #56
0
ファイル: Convert.py プロジェクト: rodbegbie/MetaRipper
def convert(flacfile, mp3file):
    print "Converting %s... " % flacfile
    src = gst.element_factory_make("filesrc", "src")
    src.set_property("location", flacfile)
    src_pad = src.get_pad("src")

    flac = gst.element_factory_make("flacdec", "decoder")

    mp3 = gst.element_factory_make("lame", "encoder")
    mp3.set_property("bitrate", 192)
    mp3.set_property("quality", 2)

    sink = gst.element_factory_make("filesink", "sink")
    sink.set_property("location", mp3file)

    bin = gst.Pipeline()
    bin.add_many(src, flac, mp3, sink)
    gst.element_link_many(src, flac, mp3, sink)
    bin.connect("error", error_cb)

    bin.set_state(gst.STATE_PAUSED)

    res = bin.set_state(gst.STATE_PLAYING)
    while bin.iterate():
        pass
    print "Done.\n"