예제 #1
0
    def __init__(self, input_device_index: int = None):
        """Initiates Porcupine object for hot word detection.

        Args:
            input_device_index: Index of Input Device to use.

        See Also:
            - Instantiates an instance of Porcupine object and monitors audio stream for occurrences of keywords.
            - A higher sensitivity results in fewer misses at the cost of increasing the false alarm rate.
            - sensitivity: Tolerance/Sensitivity level. Takes argument or env var ``sensitivity`` or defaults to ``0.5``

        References:
            - `Audio Overflow <https://people.csail.mit.edu/hubert/pyaudio/docs/#pyaudio.Stream.read>`__ handling.
        """
        logger.info(f"Initiating hot-word detector with sensitivity: {env.sensitivity}")
        keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in [pathlib.PurePath(__file__).stem]]
        self.input_device_index = input_device_index
        self.recorded_frames = []

        self.py_audio = PyAudio()
        self.detector = pvporcupine.create(
            library_path=pvporcupine.LIBRARY_PATH,
            model_path=pvporcupine.MODEL_PATH,
            keyword_paths=keyword_paths,
            sensitivities=[env.sensitivity]
        )
        self.audio_stream = None
예제 #2
0
파일: main.py 프로젝트: zfork/porcupine
    def run(self):
        ppn = None
        recorder = None

        try:
            ppn = pvporcupine.create(access_key=self._access_key,
                                     keywords=KEYWORDS,
                                     sensitivities=[0.75] * len(KEYWORDS))

            recorder = PvRecorder(device_index=self._device_index,
                                  frame_length=ppn.frame_length)
            recorder.start()

            self._is_ready = True

            while not self._stop:
                pcm = recorder.read()
                keyword_index = ppn.process(pcm)
                if keyword_index >= 0:
                    self._keyword_var.set(KEYWORDS[keyword_index])
                    print(self._keyword_var.get())
        finally:
            if recorder is not None:
                recorder.delete()

            if ppn is not None:
                ppn.delete()

        self._is_stopped = True
def wake():
    handle = pvporcupine.create(keywords=['bumblebee'], sensitivities=[0.1])
    pa = pyaudio.PyAudio()
    audio_stream = pa.open(rate=handle.sample_rate,
                           channels=1,
                           format=pyaudio.paInt16,
                           input=True,
                           frames_per_buffer=handle.frame_length)
    while True:
        pcm = audio_stream.read(handle.frame_length)
        pcm = struct.unpack_from("h" * handle.frame_length, pcm)
        if handle.process(pcm):
            # 回答“我在”
            q_speak.put_nowait("我在")
            try:
                asr_txt = one_asr()
                # asr_txt = ASR_WAKE_WORD
                q_hmi_you.put_nowait(asr_txt)
                # 意图分析
                intent_txt = intent_for_send(asr_txt)
                print(f'意图是:{intent_txt}')
                q_hmi_sva.put_nowait(intent_txt)
                if intent_txt is None:
                    # 回答“想好说什么再喊我”
                    q_speak.put_nowait("想好说什么再喊我")
                else:
                    # 回答“好的”
                    q_speak.put_nowait("好的")
                    q_opc.put_nowait(intent_txt)
            except sr.WaitTimeoutError:
                # 回答“没别的事,我休息一下”
                q_speak.put_nowait('没别的事,我休息一下')
예제 #4
0
    def run(self, sentensivity):
        porcupine = None
        try:
            keywords = ["jarvis"]
            porcupine = pvporcupine.create(keywords=keywords,
                                           sensitivities=[sentensivity])
            pa = pyaudio.PyAudio()
            audio_stream = pa.open(rate=porcupine.sample_rate,
                                   channels=1,
                                   format=pyaudio.paInt16,
                                   input=True,
                                   frames_per_buffer=porcupine.frame_length)

            logging.info('\nListening {%s}' % keywords)

            while not self.stopped:
                pcm = audio_stream.read(porcupine.frame_length)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                keyword_index = porcupine.process(pcm)
                if keyword_index >= 0 and not self.recording:
                    self.recording = True
                    logging.info(
                        f'[ACTION] Detected {keywords[keyword_index]} at '
                        f'{datetime.now().hour}:{datetime.now().minute}')
                    self.recognize_input()

        except MemoryError:
            porcupine.delete()
            self.start(self.sentensivity)
        except:
            logging.error(f"[ERROR] {traceback.print_exc()}")
예제 #5
0
def is_wake_up_word_said(input_device_index=13,
                         sensitivity=0.5,
                         keyword="hey pico",
                         timeout=10):
    keyword_file_path = [pvporcupine.KEYWORD_FILE_PATHS[keyword]]
    num_keywords = len(keyword_file_path)

    porcupine = pvporcupine.create(library_path=pvporcupine.LIBRARY_PATH,
                                   model_file_path=pvporcupine.MODEL_FILE_PATH,
                                   keyword_file_paths=keyword_file_path,
                                   sensitivities=[sensitivity] * num_keywords)

    pa = pyaudio.PyAudio()
    audio_stream = pa.open(rate=porcupine.sample_rate,
                           channels=1,
                           format=pyaudio.paInt16,
                           input=True,
                           frames_per_buffer=porcupine.frame_length,
                           input_device_index=input_device_index)

    start = time()
    keyword_said = False
    while not keyword_said and time() - start < timeout:
        pcm = audio_stream.read(porcupine.frame_length)
        pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)

        if porcupine.process(pcm):
            keyword_said = True
    audio_stream.close()
    porcupine.delete()
    return keyword_said
예제 #6
0
    def __init__(self, sensitivity):
        super(PorcupineDemo, self).__init__()

        self._keywords = list(KEYWORDS_COLOR.keys())
        self._porcupine = pvporcupine.create(keywords=self._keywords,
                                             sensitivities=[sensitivity] *
                                             len(KEYWORDS_COLOR))
예제 #7
0
    def __init__(self, key_phrase="hey mycroft", config=None, lang="en-us"):
        super().__init__(key_phrase, config, lang)
        keyword_file_paths = [
            expanduser(x.strip()) for x in self.config.get(
                "keyword_file_path", "hey_mycroft.ppn").split(',')
        ]
        sensitivities = self.config.get("sensitivities", 0.5)
        access_key = self.config.get("access_key", None)

        try:
            import pvporcupine
            from pvporcupine.util import (pv_library_path, pv_model_path)
        except ImportError as err:
            raise Exception(
                "Python bindings for Porcupine not found. "
                "Please run \"mycroft-pip install pvporcupine\"") from err

        if isinstance(sensitivities, float):
            sensitivities = [sensitivities] * len(keyword_file_paths)
        else:
            sensitivities = [float(x) for x in sensitivities.split(',')]

        self.audio_buffer = []
        self.has_found = False
        self.num_keywords = len(keyword_file_paths)

        LOG.info(
            'Loading Porcupine using keyword path {} and sensitivities {}'.
            format(keyword_file_paths, sensitivities))
        self.porcupine = pvporcupine.create(access_key,
                                            keyword_paths=keyword_file_paths,
                                            sensitivities=sensitivities)

        LOG.info('Loaded Porcupine')
예제 #8
0
    def on_start(self):
       self.running = True
       print("\nStarting Hotword detection...")

       self.keywords = ["caspar"]
       keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in self.keywords]
       self.sensitivities = [0.5] * len(keyword_paths)

       self.keywords = list()
       for x in keyword_paths:
           self.keywords.append(os.path.basename(x).replace('.ppn', '').split('_')[0])

       self.porcupine = pvporcupine.create(
           library_path=pvporcupine.LIBRARY_PATH,
           model_path=pvporcupine.MODEL_PATH,
           keyword_paths=keyword_paths,
           sensitivities=self.sensitivities)

       self.pa = pyaudio.PyAudio()

       self.audio_stream = self.pa.open(
           rate=self.porcupine.sample_rate,
           channels=1,
           format=pyaudio.paInt16,
           input=True,
           frames_per_buffer=self.porcupine.frame_length,
           input_device_index=None)
예제 #9
0
def listen(keywords=["blueberry"], sensitivity=0.5, action=None):

    # define callback for action to take when wake words are detected
    def _audio_callback(in_data, frame_count, time_info, status):
        if frame_count >= porcupine.frame_length:
            pcm = struct.unpack_from("h" * porcupine.frame_length, in_data)
            result = porcupine.process(pcm)
            index = result if len(keywords) > 1 else int(result) - 1
            if index >= 0:
                if action is None:
                    print("I heard {}!".format(keywords[index]))
                else:
                    action(result)

        return None, pyaudio.paContinue

    porcupine = None
    pa = None
    audio_stream = None

    try:
        # initialize wake word detection
        sensitivities = [float(sensitivity)] * len(keywords)
        porcupine = pvporcupine.create(keywords=keywords,
                                       sensitivities=sensitivities)

        # create input audio stream
        pa = pyaudio.PyAudio()
        audio_stream = pa.open(
            rate=porcupine.sample_rate,
            channels=1,
            format=pyaudio.paInt16,
            input=True,
            frames_per_buffer=porcupine.frame_length,
            input_device_index=None,
            stream_callback=_audio_callback,
        )

        # start monitoring stream
        audio_stream.start_stream()
        print("Listening for keywords ...")

        while True:
            time.sleep(0.1)

    except KeyboardInterrupt:
        print("Stopping ...")

    finally:
        if audio_stream is not None:
            audio_stream.stop_stream()
            audio_stream.close()

        if pa is not None:
            pa.terminate()

        # delete Porcupine last to avoid segfault in callback.
        if porcupine is not None:
            porcupine.delete()
예제 #10
0
    def __init__(self, *args, wakewords='jarvis', **kwargs):
        super().__init__(*args, **kwargs)

        if type(wakewords) is not list:
            wakewords = [wakewords]
        self.wakewords = wakewords

        self.handle = pvporcupine.create(keywords=wakewords)
예제 #11
0
 def install(self, modules):
     modules = super().install(modules)
     self.__audio_recoder = modules['AudioRecorder']
     self.__detect_handler = pvporcupine.create(
         #library_path=self.__default_library_path(),
         #model_path=self.__model_file_path(),
         keyword_paths=[self.__keyword_file_path(self.__keyword_filename)],
         sensitivities=[self.__sensitivity])
예제 #12
0
    def __init__(self, access_key, device_index, sensitivity):
        super(PorcupineDemo, self).__init__()

        self._device_index = device_index
        self._keywords = list(KEYWORDS_COLOR.keys())
        self._porcupine = pvporcupine.create(access_key=access_key,
                                             keywords=self._keywords,
                                             sensitivities=[sensitivity] *
                                             len(KEYWORDS_COLOR))
예제 #13
0
    def __init__(self):
        self._porcupine = pvporcupine.create(keywords=["picovoice"])

        self._sound = pyaudio.PyAudio()

        self._audio_stream = self._sound.open(
            rate=self._porcupine.sample_rate,
            channels=1,
            format=pyaudio.paInt16,
            input=True,
            frames_per_buffer=self._porcupine.frame_length)
예제 #14
0
    def __init__(self):
        # todo: hardcoded parameters
        sensitivity = [0.5]

        self.handle = pvporcupine.create(keywords=['jarvis'], sensitivities=sensitivity)
        self.pa = pyaudio.PyAudio()
        self.audio_stream = self.pa.open(
            rate=self.handle.sample_rate,
            channels=1,
            format=pyaudio.paInt16,
            input=True,
            frames_per_buffer=self.handle.frame_length)
예제 #15
0
    def run(self):
        keywords = list()
        for x in self._keyword_paths:
            keywords.append(
                os.path.basename(x).replace('.ppn', '').split('_')[0])

        porcupine = None
        audio_stream_mic = None

        try:
            porcupine = pvporcupine.create(library_path=self._library_path,
                                           model_path=self._model_path,
                                           keyword_paths=self._keyword_paths,
                                           sensitivities=self._sensitivities)

            self._pa = pyaudio.PyAudio()
            self.play_audio(WAV_READY_FILENAME)

            audio_stream_mic = self._pa.open(
                rate=porcupine.sample_rate,
                channels=1,
                format=pyaudio.paInt16,
                input=True,
                frames_per_buffer=porcupine.frame_length)

            print('Listening {')
            for keyword, sensitivity in zip(keywords, self._sensitivities):
                print('  %s (%.2f)' % (keyword, sensitivity))
            print('}')

            while True:
                pcm = audio_stream_mic.read(porcupine.frame_length)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                result = porcupine.process(pcm)

                if result >= 0:
                    print('[%s] Detected %s' %
                          (str(datetime.now()), keywords[result]))

                    self.play_audio(WAV_DETECTED_FILENAME)

        except KeyboardInterrupt:
            print('Stopping ...')

        finally:
            if porcupine is not None:
                porcupine.delete()

            if audio_stream_mic is not None:
                audio_stream_mic.close()

            if self._pa is not None:
                self._pa.terminate()
	def __init__(self):
		super().__init__()
		self._working = self.ThreadManager.newEvent('ListenForWakeword')
		self._buffer = queue.Queue()
		self._hotwordThread = None

		try:
			self._handler = pvporcupine.create(keywords=['porcupine', 'bumblebee', 'terminator', 'blueberry'])
			with self.Commons.shutUpAlsaFFS():
				self._audio = pyaudio.PyAudio()
		except:
			self._enabled = False
예제 #17
0
def picovoice():
    try:
        porcupine = pvporcupine.create(keywords=["picovoice", "blueberry"])

        pa = pyaudio.PyAudio()

        audio_stream = pa.open(rate=porcupine.sample_rate,
                               channels=1,
                               format=pyaudio.paInt16,
                               input=True,
                               frames_per_buffer=porcupine.frame_length)

        while True:
            pcm = audio_stream.read(porcupine.frame_length)
            pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)

            keyword_index = porcupine.process(pcm)

            if keyword_index >= 0:
                print("Hotword Detected")
                audio_stream.close()
                main()
                print("Done")
    except KeyboardInterrupt:
        if porcupine is not None:
            porcupine.delete()
            print("deleting porc")

        if audio_stream is not None:
            audio_stream.close()
            print("closing stream")

        if pa is not None:
            pa.terminate()
            print("terminating pa")

            exit(0)

    finally:
        if porcupine is not None:
            porcupine.delete()
            print("deleting porc")

        if audio_stream is not None:
            audio_stream.close()
            print("closing stream")

        if pa is not None:
            pa.terminate()
            print("terminating pa")

        picovoice()
예제 #18
0
    def main(self):
        print("ready")
        self.porcupine = None
        pa = None
        audio_stream = None

        self.porcupine = pvporcupine.create(keywords=["jarvis"])

        pa = pyaudio.PyAudio()

        audio_stream = pa.open(rate=self.porcupine.sample_rate,
                               channels=1,
                               format=pyaudio.paInt16,
                               input=True,
                               frames_per_buffer=self.porcupine.frame_length)

        while True:

            try:
                pcm = audio_stream.read(self.porcupine.frame_length)
                pcm = struct.unpack_from("h" * self.porcupine.frame_length,
                                         pcm)
            except:
                audio_stream = pa.open(
                    rate=self.porcupine.sample_rate,
                    channels=1,
                    format=pyaudio.paInt16,
                    input=True,
                    frames_per_buffer=self.porcupine.frame_length)

            keyword_index = self.porcupine.process(pcm)

            if keyword_index >= 0:
                print("Hotword Detected")

                try:  #Tries terminating an action if it exists
                    action.terminate()
                except:
                    pass

                if audio_stream is not None:
                    audio_stream.close()
                said = speak_listen.listen()  #Listens for user input
                print(said)

                #action = multiprocessing.Process(target=

                self.reply(said)
예제 #19
0
    def __init__(self,
                 decoder_model,
                 resource=RESOURCE_FILE,
                 sensitivity=0.75,
                 audio_gain=1,
                 continue_recording=False,
                 output_dir=".",
                 delete_active_recording=False):

        self.is_running = False
        self.is_interrupted = False
        self.is_recording = False
        self.is_terminated = False

        self._start_recording_callback = None
        self._continue_recording_callback = None
        self._stop_recording_callback = None

        # Setup Porcupine

        self.detector = pvporcupine.create(keywords=['alexa'],
                                           sensitivities=[sensitivity])

        # create detector buffer
        self.detector_buffer = DetectorRingBuffer(self.detector.sample_rate *
                                                  5)
        self.backward_buffer = None

        # connect to the PyAudio stream
        self.audio = pyaudio.PyAudio()
        self.stream_in = self.audio.open(
            input=True,
            output=False,
            format=pyaudio.paInt16,
            channels=1,
            rate=self.detector.sample_rate,
            frames_per_buffer=self.detector.frame_length,
            stream_callback=self._audio_callback)

        # listen to interrupots
        signal.signal(signal.SIGINT, self.stop)

        self._enable_continue_recording = continue_recording
        self._output_dir = output_dir
        self._delete_active_recording = delete_active_recording

        Log.debug(self._tag, "AudioHandler created")
예제 #20
0
    def sense(self):

        keywords = ["caspar"]
        keyword_paths = [pvporcupine.KEYWORD_PATHS[x] for x in keywords]
        sensitivities = [0.5] * len(keyword_paths)

        keywords = list()
        for x in keyword_paths:
            keywords.append(os.path.basename(x).replace('.ppn', '').split('_')[0])

        porcupine = pvporcupine.create(
            library_path=pvporcupine.LIBRARY_PATH,
            model_path=pvporcupine.MODEL_PATH,
            keyword_paths=keyword_paths,
            sensitivities=sensitivities)

        pa = pyaudio.PyAudio()

        audio_stream = pa.open(
            rate=porcupine.sample_rate,
            channels=1,
            format=pyaudio.paInt16,
            input=True,
            frames_per_buffer=porcupine.frame_length,
            input_device_index=None)

        print('\nListening {')
        for keyword, sensitivity in zip(keywords, sensitivities):
            print('  %s (%.2f)' % (keyword, sensitivity))
        print('}')

        while self.running:

            pcm = audio_stream.read(porcupine.frame_length)
            pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)

            result = porcupine.process(pcm)
            if result >= 0:
                print('[%s] Detected %s' % (str(datetime.now()), keywords[result]))
                self.assert_belief(HOTWORD_DETECTED("ON"))
                self.running = False
                break
        audio_stream.close()
        pa.terminate()
        porcupine.delete()
def esperaSaludo(): # añadir un temporizador que dado un tiempo maximo se salga con resultado de error
    print("entra en esperaSaludo")

    porcupine = None
    pa = None
    audio_stream = None

    try:
        porcupine = pvporcupine.create(keywords=['terminator'], sensitivities=[1.0])

        pa = pyaudio.PyAudio()

        audio_stream = pa.open(
                        rate=porcupine.sample_rate,
                        channels=1,
                        format=pyaudio.paInt16,
                        input=True,
                        frames_per_buffer=porcupine.frame_length,
                        input_device_index=11)

        print("Microfono listo")
        while True:
            pcm = audio_stream.read(porcupine.frame_length)
            pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)

            keyword_index = porcupine.process(pcm)

            if keyword_index >= 0:
                print("DETECTADA!")
                # pa.close()
                break
        
    finally:
        if porcupine is not None:
            porcupine.delete()

        if audio_stream is not None:
            audio_stream.close()

        if pa is not None:
                pa.terminate()
                
    print("sale de esperaSaludo")
예제 #22
0
def _run(input_audio_file_path, library_path, model_file_path,
         keyword_file_paths, sensitivity):
    """
    Monitors an input audio file for occurrences of keywords for which keyword files are provided and prints their
    occurrence time (in seconds from start of file).

    :param input_audio_file_path: Absolute path to input audio file. The file should have a sample rate of 16000 and
    be single-channel.
    :param library_path: Absolute path to Porcupine's dynamic library.
    :param model_file_path: Absolute path to the model parameter file.
    :param keyword_file_paths: List of absolute paths to keyword files.
    :param sensitivity: Sensitivity parameter. For more information refer to 'include/pv_porcupine.h'. It uses the
    same sensitivity value for all keywords.
    :return:
    """

    num_keywords = len(keyword_file_paths)

    porcupine = pvporcupine.create(library_path=library_path,
                                   model_file_path=model_file_path,
                                   keyword_file_paths=keyword_file_paths,
                                   sensitivities=[sensitivity] * num_keywords)

    def _frame_index_to_sec(frame_index):
        return float(frame_index * porcupine.frame_length) / float(
            porcupine.sample_rate)

    audio, sample_rate = soundfile.read(input_audio_file_path, dtype='int16')
    assert sample_rate == porcupine.sample_rate

    num_frames = len(audio) // porcupine.frame_length
    for i in range(num_frames):
        frame = audio[i * porcupine.frame_length:(i + 1) *
                      porcupine.frame_length]
        result = porcupine.process(frame)
        if num_keywords == 1 and result:
            print('detected keyword at time %f' % _frame_index_to_sec(i))
        elif num_keywords > 1 and result >= 0:
            print('detected keyword index %d at time %f' %
                  (result, _frame_index_to_sec(i)))

    porcupine.delete()
예제 #23
0
def setup_voice_control():
    try:
        global porcupine
        global pa
        global audio_stream
        porcupine = pvporcupine.create(keyword_paths=[
            'wake_word\\pause_game_windows_3_30_2021_v1.9.0\\pause_game_windows_2021-03-30-utc_v1_9_0.ppn',
            'wake_word\\play_game_windows_3_30_2021_v1.9.0\\play_game_windows_2021-03-30-utc_v1_9_0.ppn'
        ],
                                       keywords=["blueberry", "grapefruit"])

        pa = pyaudio.PyAudio()
        print("loading complete")
        audio_stream = pa.open(rate=porcupine.sample_rate,
                               channels=1,
                               format=pyaudio.paInt16,
                               input=True,
                               frames_per_buffer=porcupine.frame_length)
        return True
    except Exception as e:
        return False
예제 #24
0
    def __init__(self, parent, keywords, user):
        self.parent = parent
        self.user = user
        self.channel = parent.client.channel
        self.pcp = create(keywords=keywords, sensitivities=[0.5] * len(keywords))
        self.unpacker = Struct(f'{self.pcp.frame_length}h')
        self.welcome_response = None
        self.wuw_input = asyncio.Queue()
        self.input_queue = self.wuw_input

        self.what = parent.what
        self.that = parent.that
        self.ticktock = parent.ticktock
        self.keywords = parent.keywords
        self.play = parent.play
        self.play_loop = parent.play_loop
        self.play_stream = parent.play_stream
        self.play_interruptible = parent.play_interruptible
        self.say = parent.say
        self.listen_task = BackgroundTask()
        self.listen_task.start(self.listen_loop())
        logger.info(f"Started {self!r}")
예제 #25
0
    def run(self):
        porcupine = py_audio_instance = audio_stream = None
        try:
            porcupine = pvporcupine.create(
                keyword_paths=[
                    self.keyword_directory + "/" + f
                    for f in listdir(self.keyword_directory)
                ],
                sensitivities=[self.sensitivity] *
                len(listdir(self.keyword_directory)))
            py_audio_instance = pyaudio.PyAudio()
            audio_stream = py_audio_instance.open(
                rate=porcupine.sample_rate,
                channels=1,
                format=pyaudio.paInt16,
                input=True,
                frames_per_buffer=porcupine.frame_length,
                input_device_index=self.input_device_index)

            print("Listening for keyword.. ")

            while True:
                audio_frame = audio_stream.read(porcupine.frame_length)
                pcm = struct.unpack_from("h" * porcupine.frame_length,
                                         audio_frame)
                if porcupine.process(pcm) >= 0:
                    self.run_func()
                    print("Listening for keyword.. ")

        except KeyboardInterrupt:
            print("Stopping ...")

        finally:
            if porcupine is not None:
                porcupine.delete()
            if audio_stream is not None:
                audio_stream.close()
            if py_audio_instance is not None:
                py_audio_instance.terminate()
예제 #26
0
 def __init__(self,
              pv_model='./hey_diego_linux_2021-05-23-utc_v1_9_0.ppn',
              speech_url='http://127.0.0.1:5000/speech',
              status_url='http://127.0.0.1:5000/status',
              nlp_url='http://127.0.0.1:5000/nlp'):
     self.handle = pvporcupine.create(keyword_paths=[pv_model])
     # Hey computer for the time being
     #self.handle = pvporcupine.create(keywords=['computer'])
     # Need to update this with an actual address
     self.url = speech_url
     self.status_url = status_url
     self.nlp_url = nlp_url
     self.pa = pyaudio.PyAudio()
     self.audio_stream = self.pa.open(
         rate=self.handle.sample_rate,
         channels=1,
         format=pyaudio.paInt16,
         input=True,
         frames_per_buffer=self.handle.frame_length)
     # Download dependencies
     self.synthesizer = torch.hub.load('coqui-ai/TTS:dev',
                                       'tts',
                                       source='github')
예제 #27
0
    def run(self):
        """
        Creates an input audio stream, instantiates an instance of Porcupine object, and monitors the audio stream for
        occurrences of the wake word(s). It prints the time of detection for each occurrence and the wake word.
        """
        keywords = list()
        for x in self._keyword_paths:
            keywords.append(
                os.path.basename(x).replace('.ppn', '').split('_')[0])

        porcupine = None
        try:
            porcupine = pvporcupine.create(
                library_path=pvporcupine.LIBRARY_PATH,
                model_path=pvporcupine.MODEL_PATH,
                keyword_paths=self._keyword_paths,
                sensitivities=self._sensitivities)

            my_log.debug(f"listening on keywords {keywords}")

            with MicrophoneStream(self._audio_interface, porcupine.sample_rate,
                                  porcupine.frame_length) as stream:
                audio_generator = stream.generator()
                for x in audio_generator:
                    if self.should_stop:
                        raise SystemExit("should_stop")
                    pcm = struct.unpack_from("h" * porcupine.frame_length, x)
                    result = porcupine.process(pcm)
                    if result >= 0:
                        my_log.debug(f"Detected {keywords[result]}")
                        porcupine_heard(keywords[result])
        except Exception as e:
            if type(e) != SystemExit:
                my_log.exception(e)
        finally:
            if porcupine is not None:
                porcupine.delete()
예제 #28
0
def wake_word():
    porcupine = None
    pa = None
    audio_stream = None

    try:
        porcupine = pvporcupine.create(keywords=["blueberry"])

        pa = pyaudio.PyAudio()

        audio_stream = pa.open(rate=porcupine.sample_rate,
                               channels=1,
                               format=pyaudio.paInt16,
                               input=True,
                               frames_per_buffer=porcupine.frame_length)

        while True:
            pcm = audio_stream.read(porcupine.frame_length)
            pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)

            keyword_index = porcupine.process(pcm)

            if keyword_index >= 0:
                print("Hotword Detected")
                wake_word_detected = True
                break
            else:
                print('not detected')
    finally:
        if porcupine is not None:
            porcupine.delete()

        if audio_stream is not None:
            audio_stream.close()

        if pa is not None:
            pa.terminate()
    def porcupine_wait_for_hotword(self, source, kywds, timeout=None):
        self._waiting_for_hotword_callback()
        # porcupine stuff
        num_keywords = len(kywds)
        porcupine = pvporcupine.create(keywords=kywds)
        print(kywds)

        elapsed_time = 0
        seconds_per_buffer = float(source.CHUNK) / source.SAMPLE_RATE
        resampling_state = None

        # buffers capable of holding 5 seconds of original audio
        five_seconds_buffer_count = int(math.ceil(5 / seconds_per_buffer))
        # buffers capable of holding 0.5 seconds of resampled audio
        half_second_buffer_count = int(math.ceil(0.5 / seconds_per_buffer))
        frames = collections.deque(maxlen=five_seconds_buffer_count)

        while True:
            elapsed_time += seconds_per_buffer
            if timeout and elapsed_time > timeout:
                raise WaitTimeoutError(
                    "listening timed out while waiting for hotword to be said")

            buffer = source.stream.read(source.CHUNK)
            if len(buffer) == 0: break  # reached end of the stream
            frames.append(buffer)

            pcm = struct.unpack_from("h" * porcupine.frame_length, buffer)
            porcupine_result = porcupine.process(pcm)

            if num_keywords == 1 and porcupine_result:
                break
            elif num_keywords > 1 and porcupine_result >= 0:
                break

        return b"".join(frames), elapsed_time
예제 #30
0
import pyaudio
import pvporcupine

from gtts import gTTS
from logmmse import logmmse_from_file
from mpyg321.mpyg321 import MPyg321Player

from responder import Responder

KEYWORDS = ["jarvis", "bumblebee"]

rp = Responder()
pa = pyaudio.PyAudio()
pl = MPyg321Player()
ai = wit.Wit(os.getenv("WITAI_TOKEN"))
porcupine = pvporcupine.create(keywords=KEYWORDS)

sample_rate = porcupine.sample_rate
frames_per_buffer = porcupine.frame_length
DURATION = 4.5

audio_stream = pa.open(
    rate=sample_rate,
    channels=1,
    format=pyaudio.paInt16,
    input=True,
    frames_per_buffer=frames_per_buffer,
)


def get_command_recording():