def __init__(self, wav_file=None): Device.__init__(self) self.time_between_chunks = config.get_float(config.GENERAL, 'time_between_chunks', 0.08) self._queue = queue.Queue() self._last_chunk_time = datetime.datetime.utcfromtimestamp(0) self.wav_file = wav_file
def __init__(self, **kwargs): Device.__init__(self) self.pyaudio_instance = pyaudio.PyAudio() self.queue = Queue.Queue() self.quit_event = threading.Event() self.channels = kwargs.get('channels', 1) self.sample_rate = kwargs.get('sample_rate', 16000) self.audio_format = kwargs.get('audio_format', pyaudio.paInt16) self.device_index = None
def __init__(self, client): Device.__init__(self) self.pyaudio_instance = pyaudio.PyAudio() self.queue = Queue.Queue() self.quit_event = threading.Event() self.channels = config.get_int(config.MIC, 'channels', 1) self.sample_rate = config.get_int(config.MIC, 'sample_rate', 16000) self.audio_format = config.get_int(config.MIC, 'audio_format', pyaudio.paInt16) self.client = client self.device_index = None
def stream(voysis_client: Client, audio_device: Device, wakeword_detector: WakewordDetector): durations = {} recording_stopper = RecordingStopper(audio_device, durations) if wakeword_detector: result = audio_device.stream_with_wakeword(voysis_client, recording_stopper, wakeword_detector) else: result = audio_device.stream(voysis_client, recording_stopper) log.info('Durations: %s', (json.dumps(durations))) if result: voysis_client.send_feedback(result['id'], durations=durations) return result, result['id'], result['conversationId'] return None, None, None
def stream(voysis_client: Client, audio_device: Device): durations = {} recording_stopper = RecordingStopper(audio_device, durations) result = audio_device.stream(voysis_client, recording_stopper) print('Durations: ' + (json.dumps(durations))) voysis_client.send_feedback(result['id'], durations=durations) return result, result['id'], result['conversationId']
def __init__(self, audio_file=None, **kwargs): Device.__init__(self, **kwargs) self.time_between_chunks = kwargs.get('time_between_chunks', 0.08) self._queue = queue.Queue() self._last_chunk_time = datetime.datetime.utcfromtimestamp(0) self.audio_file = AudioFile(audio_file) if self.audio_file.header is not None: self.encoding = self.audio_file.header.encoding self.sample_rate = self.audio_file.header.sample_rate self.bits_per_sample = self.audio_file.header.bits_per_sample self.channels = self.audio_file.header.channels self.big_endian = self.audio_file.header.big_endian else: self.encoding = None self.sample_rate = None self.bits_per_sample = None self.channels = None self.big_endian = None
def __init__(self, **kwargs): Device.__init__(self, **kwargs) self.pyaudio_instance = pyaudio.PyAudio() self.queue = Queue.Queue() self.quit_event = threading.Event() self.channels = kwargs.get('channels', 1) self.sample_rate = kwargs.get('sample_rate') if self.sample_rate is None: dev_info = self.pyaudio_instance.get_default_input_device_info() self.sample_rate = int(dev_info['defaultSampleRate']) else: self.sample_rate = int(self.sample_rate) encoding = kwargs.get('encoding') if encoding is None or encoding == 'signed-int': self.encoding = pyaudio.paInt16 elif encoding == 'float': self.encoding = pyaudio.paFloat32 else: raise ValueError('Unsupported encoding: ' + str(encoding)) self.big_endian = kwargs.get('big_endian', False) self.device_index = None
def __init__(self, audio_file=None, **kwargs): Device.__init__(self) self.time_between_chunks = kwargs.get('time_between_chunks', 0.08) self._queue = queue.Queue() self._last_chunk_time = datetime.datetime.utcfromtimestamp(0) self.audio_file = AudioFile(audio_file)