コード例 #1
0
ファイル: chatrobot.py プロジェクト: yuch1a/robotQA
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(key=BING_KEY)
    while not quit_event.is_set():
        if mic.wakeup('watson'):
            print('Wake up')
            os.system('rm temp.mp3')
            data = mic.listen()
            try:
                text = bing.recognize(data)
                if text:
                    print('\n> %s' % text)

                    if 'play music' in text:
                        tts = gTTS(text='I will play music!', lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                        os.system('madplay ~/Tchaikovsky_Concerto_No.1p.mp3')
                    else:
                        output = chat(text)
                        print('\n>> %s' % output)
                        tts = gTTS(text=output, lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')

            except Exception as e:
                print(e.message)
コード例 #2
0
ファイル: main.py プロジェクト: sylcastaing/Olaf-voice
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    player = Player(mic.pyaudio_instance)

    pixel_ring.set_color(rgb=0x505000)
    time.sleep(3)

    speech = Speech()
    myBot = Bot()

    while not quit_event.is_set():
        if mic.wakeup(keyword='olaf'):
            pixel_ring.listen()
            data = mic.listen()
            pixel_ring.wait()
            text = speech.recognize(data)
            if text:
                logger.debug('Recognized : %s', text)
                result = myBot.request(text)
                pixel_ring.speak(4, 0)
                audio = speech.synthetize(result)

                if (audio != None):
                    player.play_raw(audio)

            pixel_ring.off()

    mic.close()
    pixel_ring.off()
コード例 #3
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            text = mic.recognize(data)
            if text:
                print(('Recognized %s' % text))
コード例 #4
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(key=BING_KEY)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            try:
                text = bing.recognize(data)
                if text:
                    print(('Recognized %s' % text))
            except Exception as e:
                print((e.message))
コード例 #5
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    r = sr.Recognizer()

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            try:
                text = r.recognize_google(convert(data), language='en-US')
                if text:
                    print('Recognized %s' % text)
            except Exception as e:
                print(e.message)
コード例 #6
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    baidu = BaiduVoiceApi(appkey=APP_KEY, secretkey=SECRET_KEY)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            try:
                text = baidu.server_api(data)
                if text:
                    text = json.loads(text)
                    print('Recognized %s' % text['result'][0])
            except Exception as e:
                print(e.message)
コード例 #7
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(key=BING_KEY)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            data = mic.listen()
            try:
                text = bing.recognize(data)
                if text:
                    print('Recognized %s' % text)
                    if 'play music' in text:
                        print('I will play music!')
                        os.system('madplay Tchaikovsky_Concerto_No.1p.mp3')
            except Exception as e:
                print(e.message)
コード例 #8
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(BING_KEY)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
            pixel_ring.listen()
            robot("WakeUp")

            time.sleep(.1)
            data = mic.listen()

            try:
                pixel_ring.wait()
                text = bing.recognize(data, language='en-US')
                # spi.write('answer\n')
                print('\nBing:' + text.encode('utf-8'))
                if re.search(r'shake', text) and re.search(r'left hand', text):
                    robot("LeftHand")
                    print("Shake Left hand")
                elif re.search(r'shake', text) and re.search(
                        r'right hand', text):
                    robot("RightHand")
                    print("Shake right hand")
                elif re.search(r'shake.*(head).*', text):
                    robot("ShakeHead")
                    print("Shake head")
                elif re.search(r'head', text) or re.search(r'had', text):
                    robot("NodHead")
                    print("Nod head")
                elif re.search(r'hand', text):
                    robot("ShakeHand")
                    print("Shake hand")
                elif re.search(r'hello', text):
                    robot("RightHand")
                    print("Hello")
                else:
                    print("Other")
            except Exception as e:
                print(
                    "\nCould not request results from Microsoft Bing Voice Recognition service; {0}"
                    .format(e))
            # if text:
            # print('Recognized %s' % text)
            pixel_ring.off()
コード例 #9
0
ファイル: alexa.py プロジェクト: vlinhd11/respeaker-feed
def main():
    import signal

    thread = None
    if not get_refresh_token():
        thread = Thread(target=login)
        thread.daemon = True
        thread.start()

    quit_event = Event()

    def handler(signum, frame):
        quit_event.set()
        if thread:
            tornado.ioloop.IOLoop.instance().stop()

    signal.signal(signal.SIGINT, handler)

    mic = Microphone(quit_event=quit_event)
    alexa = Alexa()

    while not quit_event.is_set():
        if mic.wakeup(keyword='alexa'):
            logging.debug('wakeup')
            if not get_refresh_token():
                if platform.machine() == 'mips':
                    command = 'madplay -o wave:- {} | aplay -M'.format(
                        hint_file)
                else:
                    command = 'ffplay -autoexit -nodisp {}'.format(hint_file)

                subprocess.Popen(command, shell=True).wait()
                continue

            data = mic.listen()
            try:
                alexa.recognize(data)
            except Exception as e:
                logging.warn(e.message)

    mic.close()
    logging.debug('Mission completed')
コード例 #10
0
ファイル: alexa.py プロジェクト: respeaker/respeaker-feed
def main():
    import signal

    thread = None
    if not get_refresh_token():
        thread = Thread(target=login)
        thread.daemon = True
        thread.start()

    quit_event = Event()

    def handler(signum, frame):
        quit_event.set()
        if thread:
            tornado.ioloop.IOLoop.instance().stop()

    signal.signal(signal.SIGINT, handler)

    mic = Microphone(quit_event=quit_event)
    alexa = Alexa()

    while not quit_event.is_set():
        if mic.wakeup(keyword='alexa'):
            logging.debug('wakeup')
            if not get_refresh_token():
                if platform.machine() == 'mips':
                    command = 'madplay -o wave:- {} | aplay -M'.format(hint_file)
                else:
                    command = 'ffplay -autoexit -nodisp {}'.format(hint_file)

                subprocess.Popen(command, shell=True).wait()
                continue

            data = mic.listen()
            try:
                alexa.recognize(data)
            except Exception as e:
                logging.warn(e.message)

    mic.close()
    logging.debug('Mission completed')
コード例 #11
0
def main():
    quit_event = Event()
    mic = Microphone(quit_event=quit_event)
    alexa = Alexa(mic)

    def on_quit(signum, frame):
        quit_event.set()

    signal.signal(signal.SIGINT, on_quit)

    while not quit_event.is_set():
        if mic.wakeup(keyword='alexa'):
            logging.debug('wakeup')
            data = mic.listen()
            try:
                alexa.recognize(data)
            except Exception as e:
                logging.warn(e.message)

    mic.close()
    logging.debug('Mission completed')
コード例 #12
0
ファイル: main.py プロジェクト: jerry-0824/073_avs
class Audio(object):
    def __init__(self):
        from respeaker import Microphone

        self.mic = Microphone()

    def wakeup(self):
        return self.mic.wakeup('alexa')

    def start(self):
        pass

    def __iter__(self):
        return self.mic.listen()

    def stop(self):
        pass

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        pass
コード例 #13
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    while not quit_event.is_set():
        pixels.off()
        print("Waiting for wakeup word!")
        if mic.wakeup(['sen ơi','senoi','maioi','mai ơi']):                        
            print('waked up')
            f_wav=random.randint(1,8)
            mic.stop()
            os.system('aplay /dev/shm/waves/' + str(f_wav)+'.wav')           
            print("Speaking something ...")
            data=mic.listen(duration=6, timeout=1.5)
            mic.stop()
            out=json.loads(send_raw(data))
            trans=out['hypotheses'][0]['utterance'].encode('utf-8')
            print('Recognized output: ' + trans)
            if len(trans)>0:
                try:
                    jmsg=json.loads(msg)
                    jmsg['texts']=trans
                    playtts(jmsg)
                except:
                    print("TTS has some problem")
コード例 #14
0
    print "terminating..."
    pixel_ring.off()
    mission_completed = True
    mic.close()
    player.close()
    myworker.stop()


signal.signal(signal.SIGINT, handle_int)

myworker.start()

while not mission_completed:
    print
    print "*********** wake me up with \"respeaker\" ***************"
    if mic.wakeup('respeaker'):
        data = mic.listen()
        time.sleep(0.5)
        if data:
            try:
                pixel_ring.wait()
                text = bing.recognize(data, language='en-US')
                print
                print('BING recognize:', text.encode('utf-8'))
                myworker.push_cmd(text)
                myworker.wait_done()
            except Exception as e:
                print(e.message)
        pixel_ring.off()

time.sleep(2)
コード例 #15
0
def task(quit_event):
    global ansNum
    global isStart
    global isChat
    global ans
    global vocabulary
    global score
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(key=BING_KEY)
    while not quit_event.is_set():
        if mic.wakeup('teresa'):
            print('Wake up')
            os.system('rm temp.mp3')
            data = mic.listen()
            try:
                text = bing.recognize(data)
                if text:
                    print('\n> %s' % text)

                    if (isStart == False) and ('test start' in text):
                        response = requests.get(END_POINT + '/1/vocabulary')
                        qaObj = response.json()
                        vocabulary = qaObj["question"]
                        ans = qaObj["answer"]
                        score = [4] * len(vocabulary)

                        isStart = True
                        vocNo = 0
                        tts = gTTS(text="How to spell " + vocabulary[vocNo],
                                   lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                    elif (isStart == False) and ('play music' in text):
                        tts = gTTS(text='I will play music!', lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                        os.system('madplay ~/Tchaikovsky_Concerto_No.1p.mp3')
                    elif (isStart
                          == False) and (isChat == False) and ('chat' in text):
                        isChat = True
                        tts = gTTS(text='OK! Let’s Chat !', lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                    elif (isStart == False) and (isChat
                                                 == True) and ('Stop Chat'
                                                               in text):
                        isChat = False
                        tts = gTTS(text='So, let’s talk again soon!',
                                   lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                    elif (isStart == False) and (isChat == True):
                        output = chat(text)
                        print('\n>> %s' % output)
                        tts = gTTS(text=output, lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                    elif isStart == False:
                        tts = gTTS(
                            text="Please say test start to start the test.",
                            lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')
                    else:
                        output = vocTest(text)
                        print('\n>> %s' % output)
                        tts = gTTS(text=output, lang='en-us')
                        tts.save("temp.mp3")
                        os.system('madplay temp.mp3')

            except Exception as e:
                print(e.message)
コード例 #16
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)

    while not quit_event.is_set():
        if mic.wakeup('respeaker'):
            print('Wake up')
コード例 #17
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        mic = Microphone()
        continue_conversation = False
        while True:
            if continue_conversation or mic.wakeup('respeaker'):
                continue_conversation = assistant.converse()
コード例 #18
0
def task(quit_event):
    mic = Microphone(quit_event=quit_event)
    bing = BingSpeechAPI(key=BING_KEY)

    while not quit_event.is_set():
        if mic.wakeup('lithnet'):
            print('Wake up')
            os.system('madplay yes-female.mp3')
            data = mic.listen()
            try:
                text = bing.recognize(data)
                if text:
                    print('Converted query from speech: %s' % text)                    
                    os.system('madplay LetmehavealookFemale.mp3')

                    try:
                        # -----------------------------
                        # Search MIM for Object 
                        # -----------------------------
                        params = {}
                        params['query'] = str(text)
                        params['staging'] = "true"
                        headers = {"Content-Type" : "application/x-www-form-urlencoded"}

                        # Connect to server to get the Access Token
                        print ("Connect to Azure Function to get Object from MIM")
                        textresp = httplib.HTTPSConnection("yourAzureFunctionApp.azurewebsites.net")
                        code = {"code" : "yourAzureFunctionKey=="}
                        functioncode = urllib.urlencode(code)
                        textresp.request("POST", "/api/yourAzureFunction?" + functioncode, json.dumps(params), headers)
                        
                        response = textresp.getresponse()
                        data = response.read()
                        print(response.status, response.reason)
                        
                        # Remove CRLF and leading "
                        data = data[:-5]      
                        data = data[1:]                  
                        print('MIM Returned: ' + data)
                        returnedResults = json.loads(data)

                        # -----------------------------
                        # Convert Repsonse to Speech
                        # -----------------------------
                        if data:
                            mimResponse = returnedResults['MIMResponse']
                            tokenurl = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
                            ttskey = 'yourAzureTextToSpeechAPIKey'
                            params = ""
                            ttstokenheaders = {"Ocp-Apim-Subscription-Key": ttskey}

                            print("Getting AccessToken from westus.tts.speech.microsoft.com for Text to Speech Conversion")

                            resp = requests.post(tokenurl, params=params, headers=ttstokenheaders)
                            token = resp.text
                            accesstoken = token.decode("UTF-8")

                            ttsurl = 'https://westus.tts.speech.microsoft.com/cognitiveservices/v1'

                            ttsheaders = {}
                            ttsheaders['Ocp-Apim-Subscription-Key'] = ttskey
                            ttsheaders['Content-Type'] = "application/ssml+xml"
                            ttsheaders['X-Microsoft-OutputFormat'] = "audio-16khz-32kbitrate-mono-mp3"
                            ttsheaders['User-Agent'] = "MIMText2Speech"
                            ttsheaders['Authorization'] = "Bearer " + accesstoken

                            #<speak version='1.0' xmlns="http://www.w3.org/2001/10/synthesis" xml:lang='en-US'><voice  name='Microsoft Server Speech Text to Speech Voice (en-AU, HayleyRUS)'><prosody volume="+20.00%">Welcome to use Microsoft Cognitive Services Text-to-Speech API.</prosody></voice> </speak>
                            body = ElementTree.Element('speak', version='1.0')
                            body.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-us')
                            voice = ElementTree.SubElement(body, 'voice')
                            voice.set('{http://www.w3.org/XML/1998/namespace}lang', 'en-AU')
                            voice.set('{http://www.w3.org/XML/1998/namespace}gender', 'Female')
                            voice.set('name', 'Microsoft Server Speech Text to Speech Voice (en-AU, HayleyRUS)')
                            prosody = ElementTree.SubElement(body, 'prosody')
                            prosody.set('volume', '-50.00%')
                            prosody.set('rate', '-50.00%')
                            voice.text = data

                            print("Calling westus.tts.speech.microsoft.com to convert response to audio")
                            audioresp = httplib.HTTPSConnection("westus.tts.speech.microsoft.com")
                            audioresp.request("POST", "/cognitiveservices/v1", ElementTree.tostring(body), ttsheaders)
                        
                            response = audioresp.getresponse()
                            data = response.read()
                            print(response.status, response.reason)

                            file = open("audioout.mp3", "wb")
                            file.write(data)
                            file.close()

                            # Play Response
                            os.system('madplay audioout.mp3')
                                                       
                            # -----------------------------
							# Reporting and Auditing  
							# -----------------------------                            
                            datetimenow = strftime("%m-%d-%Y %H:%M:%S", gmtime())
                            logparams = {}
                            logparams['deviceId'] = "MIMVoice"
                            logparams['messageId'] = str(datetimenow)
                            logparams['messageString'] = "MIMVoice-to-Cloud-" +str(datetimenow)
                            logparams['MIMQuery'] = str(text)
                            logparams['MIMResponse'] = mimResponse
                            logparams['entity'] = returnedResults['fullname']
                            logparams['entitlement'] = returnedResults['entitlement']
                            logparams['date'] = strftime("%m-%d-%Y", gmtime())

                            logheaders = {}
                            logheaders['Authorization'] = SASToken
                            logheaders['Content-Type'] = "application/json"

                            # Send Event to IoT Hub 
                            print ("Sending Event Summary to IoT Hub - " + IoTHubName + ".azure-devices.net from deviceID " + deviceID)
                            logresp = httplib.HTTPSConnection(IoTHubName + ".azure-devices.net")
                            logresp.request("POST", "/devices/" + deviceID + "/messages/events?api-version=" + iotHubAPIVer, json.dumps(logparams), logheaders)
                            logresponse = logresp.getresponse()
                            logdata = logresponse.read()
                            
                            if logdata:
                                print(logresponse.status, logresponse.reason)
                                logdata = logdata[:-5]
                                print("DEBUG:Event Summary send to IoT Hub failed: " + logdata)
                            
                    except Exception as e:
                        print(e.message)
                        
            except Exception as e:
                print(e.message)