Пример #1
0
 def on_message(client, userdata, msg):
     self._logger.info("从服务器监听到MQTT消息: " + msg.topic + " message" + str(msg.payload))
     logger.send_conversation_log(iot_client=self.mic.iot_client, mic='server',
                                  content=msg.payload.decode('utf8'),
                                  speaker='user')
     if 'mic_text_from_server' in msg.topic:
         self.send_to_brain(msg.payload.decode('utf8'))  # 订阅到的消息发送到大脑处理
Пример #2
0
 def active_listen(self):
     """
     主动监听
     :return:
     """
     input_content = input("我: ")
     logger.send_conversation_log(iot_client=self.iot_client,
                                  mic=mic_name,
                                  content='我:' + input_content,
                                  speaker='user')
     return input_content
Пример #3
0
 def say(self, phrase):
     """
     输出内容
     :param phrase:
     :return:
     """
     say_str = profile.myname + ": " + phrase
     print(say_str)
     logger.send_conversation_log(iot_client=self.iot_client,
                                  mic=mic_name,
                                  content=say_str,
                                  speaker='device')
Пример #4
0
 def say(self, phrase):
     """
     TTS输出内容
     :param phrase:
     :return:
     """
     logger.send_conversation_log(self.iot_client, mic_name, '(TTS)' + phrase, speaker='device')
     is_tts_cached, cache_file_path = self._tts_engine.get_speech_cache(phrase, fetch_wave_on_no_cache=True)
     if is_tts_cached:
         self._logger.info('Saying %s', phrase)
         self.play(cache_file_path)
     else:
         print("%s,%s" % profile.myname, phrase)
Пример #5
0
 def say(self, phrase):
     """
     输出内容
     :param phrase:
     :return:
     """
     input_content = phrase
     logger.send_conversation_log(iot_client=self.iot_client,
                                  mic=mic_name,
                                  content=input_content,
                                  speaker='device')
     self._logger.info(input_content)
     self._logger.info('send mic server message.')
     if self._peer_mic is not None:
         threading.Thread(target=self._peer_mic.say,
                          args=(phrase, )).start()
Пример #6
0
    def active_listen(self):
        """
        持续录音,直到声音停止1秒,或者达到录音超时时间 12s
        :return:
        """
        threshold = None
        print('Listen Instructions...')
        chunk = 1024
        wave_format = pyaudio.paInt16
        channels = 1
        rate = 16000
        record_seconds = 12         # 录音持续时间

        stream = self._audio.open(format=wave_format,
                                  channels=channels,
                                  input_device_index=1,
                                  rate=rate,
                                  input=True,
                                  frames_per_buffer=chunk)
        self._logger.info("active listen recording")

        frames = []

        # stores the lastN score values
        last_n = [i for i in range(20)]
        low_volume_count = 0        # 记录沉默持续时间(较安静时)
        for i in range(0, int(rate / chunk * record_seconds)):
            data = stream.read(chunk)
            frames.append(data)   # 添加到音频流

            last_n.pop(0)       # save this data point as a score
            last_n.append(self._get_score(data))
            if threshold is None or max(last_n) > threshold:        # 寻找最大值
                threshold = max(last_n)

            average = sum(last_n) / len(last_n)             # 当前循环中声音评分的平均值
            # self._logger.info('average:%s, threshold:%s', average, threshold)

            # 采样声音的最大值突破100时,开始检测
            if threshold > 100:
                if average < 80:
                    low_volume_count = low_volume_count + 1
                if average > 100:               # 如果有声音,就清空周期计数
                    low_volume_count = 0
                if low_volume_count >= 20:      # 等待周期数
                    break

        self.play(WAVE_DONG)
        self._logger.info("active listen done recording")

        stream.stop_stream()
        stream.close()
        wf = wave.open(CACHE_WAVE_RECORDED, 'wb')
        wf.setnchannels(channels)
        wf.setsampwidth(self._audio.get_sample_size(wave_format))
        wf.setframerate(rate)
        wf.writeframes(b''.join(frames))
        wf.close()
        asr_result = self.listen(path.CACHE_WAVE_RECORDED)
        logger.send_conversation_log(self.iot_client, mic_name, '(ASR)'+asr_result, speaker='user')
        return asr_result