Exemplo n.º 1
0
def detected_callback():
    snowboydecoder.play_audio_file()
    global detected, mp3_player
    detected = True
    if mp3_player is not None and mp3_player.is_playing():
        mp3_player.stop()
        mp3_player = None
Exemplo n.º 2
0
def detect_callback():
    global assistant
    detector.terminate()
    snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
    assistant.assist()
    snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
    detector.start(detected_callback=detect_callback, interrupt_check=interrupt_callback, sleep_time=0.03)
Exemplo n.º 3
0
def say(text):
    source = 'http://rabbit:5002/api/tts'
    r = requests.get(source, params={'text': text})
    filename = 'say_file.wav'
    open(filename, 'wb').write(r.content)
    snowboydecoder.play_audio_file(filename)
    os.remove(filename)
Exemplo n.º 4
0
def detected_callback():
    if not utils.is_proper_time():
        return
    snowboydecoder.play_audio_file(constants.getData('beep_hi.wav'))
    global player
    if player is not None and player.is_playing():
        player.stop()
        player = None
Exemplo n.º 5
0
def do_not_bother_callback():
    utils.do_not_bother = not utils.do_not_bother
    if utils.do_not_bother:
        snowboydecoder.play_audio_file(constants.getData('off.wav'))
        logger.info('勿扰模式打开')
    else:
        snowboydecoder.play_audio_file(constants.getData('on.wav'))
        logger.info('勿扰模式关闭')
Exemplo n.º 6
0
 def activeListen(self):
     """ 主动问一个问题(适用于多轮对话) """
     snowboydecoder.play_audio_file(constants.getData('beep_hi.wav'))
     listener = snowboydecoder.ActiveListener([constants.getHotwordModel(config.get('hotword', 'wukong.pmdl'))])
     voice = listener.listen()
     snowboydecoder.play_audio_file(constants.getData('beep_lo.wav'))
     query = self.asr.transcribe(voice)
     utils.check_and_delete(voice)
     return query
Exemplo n.º 7
0
def start_listening():
    r = sr.Recognizer()
    with sr.Microphone() as source:
        snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
        print("Say something!")
        audio = r.listen(source)
        # sphinxRecognizer = lambda: r.recognize_sphinx(audio)
        heard = recognize(
            lambda: r.recognize_google(audio))  # , recognize(sphinxRecognizer)
        print heard
        return heard
Exemplo n.º 8
0
 def converse(self, fp):
     """ 核心对话逻辑 """
     try:
         self.interrupt()
         snowboydecoder.play_audio_file(constants.getData('beep_lo.wav'))
         query = self.asr.transcribe(fp)
         utils.check_and_delete(fp)
         self._doResponse(query)
     except Exception as e:
         logger.critical(e)
         utils.clean()
Exemplo n.º 9
0
def startSentence():
    try:
        heard = webListener.start_listening()
        socketManager.send(heard)
    except sr.UnknownValueError:
        snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
        startKeyword()
        print("Speech Recognition could not understand audio")
    except sr.RequestError as e:
        snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
        print("Could not request results from Speech Recognition service; {0}".
              format(e))
Exemplo n.º 10
0
 def detect_callback():
     detector.terminate()
     snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
     globalmodule.assistantcontroller = True
     globalmodule.hotworddetected = False
     while True:
         if globalmodule.snowboycontroller:
             globalmodule.snowboycontroller = False
             logger.info('Listening... Press Ctrl+C to exit')
             detector.start(detected_callback=detect_callback,
                            interrupt_check=interrupt_callback,
                            sleep_time=0.03)
Exemplo n.º 11
0
def conversation(fp):
    global player, asr, ai, tts
    try:
        snowboydecoder.play_audio_file(constants.getData('beep_lo.wav'))
        print("converting audio to text")
        query = asr.transcribe(fp)
        utils.check_and_delete(fp)
        msg = ai.chat(query)
        voice = tts.get_speech(msg)
        player = Player.getPlayerByFileName(voice)
        player.play(voice)
    except ValueError as e:
        logger.critical(e)
        utils.clean()
Exemplo n.º 12
0
def ding(): snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)

# def handle():
#     with open(raw_recording,'rb') as raw:
#         directives = alexa_query(raw, mp3_response, http_log)
#         if 'speak' in directives:
#             play_music(mp3_response,60000)
#         return directives

# def start2():
#     while True:
#         ding()
#         if record_to_file(raw_recording):
#             directives = handle()

def handle_alexa():
Exemplo n.º 13
0
def conversation(fp):
    global detected, mp3_player
    detected = False
    snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
    print("converting audio to text")
    asr_engine = asr.BaiduASR('9670645', 'qg4haN8b2bGvFtCbBGqhrmZy',
                              '585d4eccb50d306c401d7df138bb02e7')
    #asr_engine = asr.TencentRealTimeASR('1253537070', 'AKID7C7JK9QomcWJUjcsKbK8iLQjhju8fC3z', '2vhKRVSn4mXQ9PiT7eOtBqQhR5Z6IvPn')
    query = asr_engine.transcribe(fp)
    utils.check_and_delete(fp)
    ai_engine = ai.TulingRobot('4d6eec9d9a9148bca73236bac6f35824')
    msg = ai_engine.chat(query)
    tts_engine = tts.BaiduTTS('9670645', 'qg4haN8b2bGvFtCbBGqhrmZy',
                              '585d4eccb50d306c401d7df138bb02e7')
    voice = tts_engine.get_speech(msg)
    mp3_player = player.SoxPlayer()
    mp3_player.play(voice)
def text_run(text):

    hit = False

    if 'radio' in text:
        if 'play' in text:
            if '1' in text or 'one' in text:
                run('ls')
                run("mplayer -ao pulse 'http://lhttp.qingting.fm/live/5022340/64k.mp3'"
                    )
                hit = True
            elif '2' in text or 'two' in text:
                run("mplayer -ao pulse 'http://lhttp.qingting.fm/live/275/64k.mp3'"
                    )
                hit = True
            else:
                pass
        elif 'stop' in text:
            run('killall mplayer')
            hit = True
    if 'speaker' in text or 'speak' in text:
        if 'open' in text:
            subprocess.call(['pacmd', 'set-default-sink', 'combined'])
            hit = True
        elif 'stop' in text:
            pass
    if 'music' in text:
        if 'play' in text:
            run('mocp -p')
            hit = True
        elif 'stop' in text:
            run('mocp -P')
            hit = True

    if hit:
        snowboydecoder.play_audio_file(audio_file)
    return hit
    def assist(self):
        # Configure audio source and sink.
        self.audio_device = None
        self.audio_source = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        self.audio_sink = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=self.audio_source,
            sink=self.audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width)
        restart = False
        continue_dialog = True
        try:
            while continue_dialog:
                continue_dialog = False
                self.conversation_stream.start_recording()
                self.logger.info('Recording audio request.')

                def iter_converse_requests():
                    for c in self.gen_converse_requests():
                        assistant_helpers.log_assist_request_without_audio(c)
                        yield c
                    self.conversation_stream.start_playback()

                # This generator yields ConverseResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Converse(iter_converse_requests(),
                                                    self.grpc_deadline):
                    assistant_helpers.log_assist_response_without_audio(resp)
                    if resp.error.code != code_pb2.OK:
                        self.logger.error('server error: %s',
                                          resp.error.message)
                        break
                    if resp.event_type == END_OF_UTTERANCE:
                        self.logger.info('End of audio request detected')
                        self.conversation_stream.stop_recording()
                    if resp.result.spoken_request_text:
                        self.logger.info('Transcript of user request: "%s".',
                                         resp.result.spoken_request_text)
                        srtxt = resp.result.spoken_request_text
                        if text_run(srtxt):
                            self.logger.info(
                                'Got commnad and run from follow text')
                            self.conversation_stream.stop_playback()
                            break
                        self.logger.info('Playing assistant response.')
                    if len(resp.audio_out.audio_data) > 0:
                        self.conversation_stream.write(
                            resp.audio_out.audio_data)
                    if resp.result.spoken_response_text:
                        self.logger.info(
                            'Transcript of TTS response '
                            '(only populated from IFTTT): "%s".',
                            resp.result.spoken_response_text)
                    if resp.result.conversation_state:
                        self.conversation_state_bytes = resp.result.conversation_state
                    if resp.result.volume_percentage != 0:
                        volume_percentage = resp.result.volume_percentage
                        self.logger.info('Volume should be set to %s%%',
                                         volume_percentage)
                    if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                        continue_dialog = True
                        self.logger.info(
                            'Expecting follow-on query from user.')
                self.logger.info('Finished playing assistant response.')
                self.conversation_stream.stop_playback()
        except Exception as e:
            snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
            self._create_assistant()
            self.logger.exception('Skipping because of connection reset')
            restart = True
        try:
            self.conversation_stream.close()
            if restart:
                self.assist()
        except Exception:
            self.logger.error('Failed to close conversation_stream.')
Exemplo n.º 16
0
 def _do_not_bother_off_callback(self):
     utils.do_not_bother = False
     snowboydecoder.play_audio_file(constants.getData('on.wav'))
     logger.info('勿扰模式关闭')
Exemplo n.º 17
0
 def _do_not_bother_on_callback(self):
     utils.do_not_bother = True
     snowboydecoder.play_audio_file(constants.getData('off.wav'))
     logger.info('勿扰模式打开')
Exemplo n.º 18
0
 def _detected_callback(self):
     if not utils.is_proper_time():
         logger.warning('勿扰模式开启中')
         return
     snowboydecoder.play_audio_file(constants.getData('beep_hi.wav'))
     self._conversation.interrupt()
Exemplo n.º 19
0
 def converse(self, fp, callback=None):
     """ 核心对话逻辑 """
     snowboydecoder.play_audio_file(constants.getData('beep_lo.wav'))
     self.doConverse(fp, callback)
Exemplo n.º 20
0
def interrupt_callback():
    global interrupted
    return interrupted


if len(sys.argv) != 3:
    print("Error: need to specify 2 model names")
    print("Usage: python demo.py 1st.model 2nd.model")
    sys.exit(-1)

models = sys.argv[1:]

# capture SIGINT signal, e.g., Ctrl+C
signal.signal(signal.SIGINT, signal_handler)

sensitivity = [0.5] * len(models)
detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity)
callbacks = [
    lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING),
    lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
]
print('Listening... Press Ctrl+C to exit')

# main loop
# make sure you have the same numbers of callbacks and models
detector.start(detected_callback=callbacks,
               interrupt_check=interrupt_callback,
               sleep_time=0.03)

detector.terminate()
Exemplo n.º 21
0
def detectedCallback():
    snowboydecoder.play_audio_file()
    print('recording audio...', end='', flush=True)
Exemplo n.º 22
0
def assistant_detect_controller():
    while True:
        if globalmodule.assistantcontroller:
            globalmodule.assistantcontroller = False
            assistant.assist()
            snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)
Exemplo n.º 23
0
    def gg(self):
        self.detector.terminate()
        snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
        # audio_buffer = self.rc.record_audio()
        text = self.stt.get_str(self.label_stt.setText)
        snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)

        # text = transcribe_streaming.transcribe_streaming(audio_buffer)

        self.label_stt.setText(text)

        if text is None:
            self.label_tts.setText('...')
            self.detector.start(detected_callback=self.gg, sleep_time=0.03)
            return

        elif "날씨" in text:
            self.wd = weather.get_weather(self.lat, self.lon)
            spch = self.wd['str']
            self.pushButton_weather.click()

        elif "꺼져" in text:
            spch = "꺼지라고요? 말씀이 심하시네요.."

        elif "안녕" in text:
            spch = "반가워요!"

        elif "셔틀" in text:
            shuttle = Shuttle()
            if '정왕역' in text:
                self.s_list = shuttle.get_shuttle('산기대', '정왕역')
            elif '오이도' in text:
                self.s_list = shuttle.get_shuttle('산기대', '오이도')
            elif '학교' in text:
                self.s_list = shuttle.get_shuttle('정왕역', '산기대')
            else:
                spch = '위치를 정확히 말해주세요.'
                self.label_tts.setText(spch)
                self.tts.play_tts(spch, self.speaker)
                self.detector.start(detected_callback=self.gg, sleep_time=0.03)
                return
            if len(self.s_list) == 0:
                spch = "탑승 가능한 셔틀버스가 존재하지 않습니다!"
            else:
                spch = "탑승 가능한 가장 빠른 셔틀은 " + self.s_list[0][
                    2] + "이고 출발까지 " + str(self.s_list[0][3]) + "시간 " + str(
                        self.s_list[0][4]) + "분 남았습니다."
            self.pushButton_shuttle.click()

        elif "공지" in text:
            self.pushButton_notice.click()
            if "읽" in text:
                with open('notice.txt', 'r') as f:
                    loaded = loads(f.read())
                spch = '학사공지, ' + loaded['학사']['0']['sub'] + ', 취업공지, ' + loaded['취업']['0']['sub'] + ', 일반공지, ' +\
                       loaded['일반']['0']['sub']
            else:
                spch = '공지사항을 보여드릴게요.'

        elif "학식" in text or "메뉴" in text:
            self.pushButton_food.click()
            spch = '메뉴를 추천해드릴게요!'

        elif "유리" in text or '일본' in text:
            self.speaker = 'yuri'
            spch = '저는 한국말을 잘 못해요!'

        elif "미진" in text or '한국' in text:
            self.speaker = 'mijin'
            spch = '저를 찾으셨나요?'

        elif "교수님" in text:
            spch = '교수님, 저희는 A+가 받고 싶습니다!'

        elif "이놈아" in text:
            spch = '저의 이름을 불러주시다니 정말 기쁘네요~'

        elif "아빠" in text and "누구" in text:
            spch = '저는 아빠가 두명이에요~ 헤헤헤...'

        elif "나쁜 놈" in text:
            spch = '으어어어엉.... ㅠㅠ 저 상처받았어요...'

        else:
            spch = '잘 알아듣지 못했습니다? 정확하게 말해 주세요'

        self.label_tts.setText(spch)
        self.tts.play_tts(spch, self.speaker)
        subprocess.Popen(['mpg123', '-q', "snowboy/resources/ring.mp3"]).wait()
        self.detector.start(detected_callback=self.gg, sleep_time=0.03)