def start_pipeline(self, channels, samplerate): self.pipeline = gst.parse_launch(self.pipe) # store a pointer to appsrc in our encoder object self.src = self.pipeline.get_by_name('src') # store a pointer to appsink in our encoder object self.app = self.pipeline.get_by_name('app') srccaps = gst.Caps("""audio/x-raw-float, endianness=(int)1234, channels=(int)%s, width=(int)32, rate=(int)%d""" % (int(channels), int(samplerate))) self.src.set_property("caps", srccaps) self.src.set_property('emit-signals', True) self.src.set_property('num-buffers', -1) self.src.set_property('block', True) self.src.set_property('do-timestamp', True) self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.connect("message", self._on_message_cb) import threading class MainloopThread(threading.Thread): def __init__(self, mainloop): threading.Thread.__init__(self) self.mainloop = mainloop def run(self): self.mainloop.run() self.mainloop = gobject.MainLoop() self.mainloopthread = MainloopThread(self.mainloop) self.mainloopthread.start() # start pipeline self.pipeline.set_state(gst.STATE_PLAYING)
def setup(self, channels=None, samplerate=None, blocksize=None): self.eod = False self.last_buffer = None if self.from_stack: self._frames_iterator = iter(self.process_pipe.frames_stack) return if self.stack: self.process_pipe.frames_stack = [] if self.uri_duration is None: # Set the duration from the length of the file self.uri_duration = self.uri_total_duration - self.uri_start if self.is_segment: # Check start and duration value if self.uri_start > self.uri_total_duration: raise ValueError( ('Segment start time value exceed media ' + 'duration')) if self.uri_start + self.uri_duration > self.uri_total_duration: raise ValueError("""Segment duration value is too large \ given the media duration""") # a lock to wait wait for gstreamer thread to be ready self.discovered_cond = threading.Condition(threading.Lock()) self.discovered = False # the output data format we want if blocksize: self.output_blocksize = blocksize if samplerate: self.output_samplerate = int(samplerate) if channels: self.output_channels = int(channels) if self.is_segment: # Create the pipe with Gnonlin gnlurisource self.pipe = ''' gnlurisource name=src uri={uri} start=0 duration={uri_duration} media-start={uri_start} media-duration={uri_duration} ! audioconvert name=audioconvert ! audioresample ! appsink name=sink sync=False async=True '''.format( uri=self.uri, uri_start=np.uint64(round(self.uri_start * gst.SECOND)), uri_duration=np.int64(round(self.uri_duration * gst.SECOND))) # convert uri_start and uri_duration to # nanoseconds else: # Create the pipe with standard Gstreamer uridecodebin self.pipe = ''' uridecodebin name=src uri={uri} ! audioconvert name=audioconvert ! audioresample ! appsink name=sink sync=False async=True '''.format(uri=self.uri) self.pipeline = gst.parse_launch(self.pipe) if self.output_channels: caps_channels = int(self.output_channels) else: caps_channels = "[ 1, 2 ]" if self.output_samplerate: caps_samplerate = int(self.output_samplerate) else: caps_samplerate = "{ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }" sink_caps = gst.Caps("""audio/x-raw-float, endianness=(int)1234, channels=(int)%s, width=(int)32, rate=(int)%s""" % (caps_channels, caps_samplerate)) self.src = self.pipeline.get_by_name('src') if not self.is_segment: self.src.connect("autoplug-continue", self._autoplug_cb) else: uridecodebin = self.src.get_by_name('internal-uridecodebin') uridecodebin.connect("autoplug-continue", self._autoplug_cb) self.conv = self.pipeline.get_by_name('audioconvert') self.conv.get_pad("sink").connect("notify::caps", self._notify_caps_cb) self.sink = self.pipeline.get_by_name('sink') self.sink.set_property("caps", sink_caps) self.sink.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS) self.sink.set_property("drop", False) self.sink.set_property('emit-signals', True) self.sink.connect("new-buffer", self._on_new_buffer_cb) self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.connect('message', self._on_message_cb) self.queue = Queue.Queue(QUEUE_SIZE) self.mainloop = gobject.MainLoop() self.mainloopthread = MainloopThread(self.mainloop) self.mainloopthread.start() #self.mainloopthread = get_loop_thread() ##self.mainloop = self.mainloopthread.mainloop # start pipeline self.pipeline.set_state(gst.STATE_PLAYING) self.discovered_cond.acquire() while not self.discovered: # print 'waiting' self.discovered_cond.wait() self.discovered_cond.release() if not hasattr(self, 'input_samplerate'): if hasattr(self, 'error_msg'): raise IOError(self.error_msg) else: raise IOError('no known audio stream found')
def setup(self, channels=None, samplerate=None, blocksize=None): self.eod = False self.last_buffer = None # a lock to wait wait for gstreamer thread to be ready self.discovered_cond = threading.Condition(threading.Lock()) self.discovered = False # the output data format we want if blocksize: self.output_blocksize = blocksize if samplerate: self.output_samplerate = int(samplerate) if channels: self.output_channels = int(channels) # Create the pipe with standard Gstreamer uridecodbin self.pipe = '''%s num-buffers=%d name=src ! audioconvert name=audioconvert ! audioresample ! appsink name=sink sync=False async=True ''' % (self.input_src, self.num_buffers) self.pipeline = gst.parse_launch(self.pipe) if self.output_channels: caps_channels = int(self.output_channels) else: caps_channels = "[ 1, 2 ]" if self.output_samplerate: caps_samplerate = int(self.output_samplerate) else: caps_samplerate = "{ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }" sink_caps = gst.Caps("""audio/x-raw-float, endianness=(int)1234, channels=(int)%s, width=(int)32, rate=(int)%s""" % (caps_channels, caps_samplerate)) self.src = self.pipeline.get_by_name('src') self.conv = self.pipeline.get_by_name('audioconvert') self.conv.get_pad("sink").connect("notify::caps", self._notify_caps_cb) self.sink = self.pipeline.get_by_name('sink') self.sink.set_property("caps", sink_caps) self.sink.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS) self.sink.set_property("drop", False) self.sink.set_property('emit-signals', True) self.sink.connect("new-buffer", self._on_new_buffer_cb) self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.connect('message', self._on_message_cb) self.queue = Queue.Queue(QUEUE_SIZE) self.mainloop = gobject.MainLoop() self.mainloopthread = MainloopThread(self.mainloop) self.mainloopthread.start() #self.mainloopthread = get_loop_thread() ##self.mainloop = self.mainloopthread.mainloop # start pipeline self.pipeline.set_state(gst.STATE_PLAYING) self.discovered_cond.acquire() while not self.discovered: # print 'waiting' self.discovered_cond.wait() self.discovered_cond.release() if not hasattr(self, 'input_samplerate'): if hasattr(self, 'error_msg'): raise IOError(self.error_msg) else: raise IOError('no known audio stream found')
def setup(self, channels=None, samplerate=None, blocksize=None): self.eod = False self.last_buffer = None if self.from_stack: self._frames_iterator = iter(self.process_pipe.frames_stack) return if self.stack: self.process_pipe.frames_stack = [] if self.uri_duration is None: # Set the duration from the length of the file self.uri_duration = self.uri_total_duration - self.uri_start if self.is_segment: # Check start and duration value if self.uri_start > self.uri_total_duration: raise ValueError(('Segment start time value exceed media ' + 'duration')) if self.uri_start + self.uri_duration > self.uri_total_duration: raise ValueError("""Segment duration value is too large \ given the media duration""") # a lock to wait wait for gstreamer thread to be ready self.discovered_cond = threading.Condition(threading.Lock()) self.discovered = False # the output data format we want if blocksize: self.output_blocksize = blocksize if samplerate: self.output_samplerate = int(samplerate) if channels: self.output_channels = int(channels) if self.is_segment: # Create the pipe with Gnonlin gnlurisource self.pipe = ''' gnlurisource name=src uri={uri} start=0 duration={uri_duration} media-start={uri_start} media-duration={uri_duration} ! audioconvert name=audioconvert ! audioresample ! appsink name=sink sync=False async=True '''.format(uri=self.uri, uri_start=np.uint64( round(self.uri_start * gst.SECOND)), uri_duration=np.int64(round(self.uri_duration * gst.SECOND))) # convert uri_start and uri_duration to # nanoseconds else: # Create the pipe with standard Gstreamer uridecodebin self.pipe = ''' uridecodebin name=src uri={uri} ! audioconvert name=audioconvert ! audioresample ! appsink name=sink sync=False async=True '''.format(uri=self.uri) self.pipeline = gst.parse_launch(self.pipe) if self.output_channels: caps_channels = int(self.output_channels) else: caps_channels = "[ 1, 2 ]" if self.output_samplerate: caps_samplerate = int(self.output_samplerate) else: caps_samplerate = "{ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000 }" sink_caps = gst.Caps("""audio/x-raw-float, endianness=(int)1234, channels=(int)%s, width=(int)32, rate=(int)%s""" % (caps_channels, caps_samplerate)) self.src = self.pipeline.get_by_name('src') if not self.is_segment: self.src.connect("autoplug-continue", self._autoplug_cb) else: uridecodebin = self.src.get_by_name('internal-uridecodebin') uridecodebin.connect("autoplug-continue", self._autoplug_cb) self.conv = self.pipeline.get_by_name('audioconvert') self.conv.get_pad("sink").connect("notify::caps", self._notify_caps_cb) self.sink = self.pipeline.get_by_name('sink') self.sink.set_property("caps", sink_caps) self.sink.set_property('max-buffers', GST_APPSINK_MAX_BUFFERS) self.sink.set_property("drop", False) self.sink.set_property('emit-signals', True) self.sink.connect("new-buffer", self._on_new_buffer_cb) self.bus = self.pipeline.get_bus() self.bus.add_signal_watch() self.bus.connect('message', self._on_message_cb) self.queue = Queue.Queue(QUEUE_SIZE) self.mainloop = gobject.MainLoop() self.mainloopthread = MainloopThread(self.mainloop) self.mainloopthread.start() #self.mainloopthread = get_loop_thread() ##self.mainloop = self.mainloopthread.mainloop # start pipeline self.pipeline.set_state(gst.STATE_PLAYING) self.discovered_cond.acquire() while not self.discovered: # print 'waiting' self.discovered_cond.wait() self.discovered_cond.release() if not hasattr(self, 'input_samplerate'): if hasattr(self, 'error_msg'): raise IOError(self.error_msg) else: raise IOError('no known audio stream found')