示例#1
0
    def __init__(self):
        self.api_endpoint = ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
        # Setup logging.
        logging.basicConfig(
        )  # filename='assistant.log', level=logging.DEBUG if self.verbose else logging.INFO)
        self.logger = logging.getLogger("assistant")
        self.logger.setLevel(logging.DEBUG)
        self.custom_command = False
        self.once = True
        # Load OAuth 2.0 credentials.
        try:
            with open(self.credentials, 'r') as f:
                self.credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                self.http_request = google.auth.transport.requests.Request()
                self.credentials.refresh(self.http_request)
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error('Run google-oauthlib-tool to initialize '
                          'new OAuth 2.0 credentials.')
            return

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)
        logging.info('Connecting to %s', self.api_endpoint)
        self.audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = DEFAULT_GRPC_DEADLINE
        self.device_id = "roghecv2assistant-roghecv2-59lv9s"
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        self.device_handler = device_helpers.DeviceRequestHandler(
            "roghecv2assistant-roghecv2-59lv9s")

        # Stores the current volument percentage.
        # Note: No volume change is currently implemented in this sample
        self.volume_percentage = 50
        self.display = True
 def __init__(self):
     root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
     self.language_code = 'en-US'
     self.device_config_path = os.path.join(root_dir, 'device_config.json')
     self.device_credentials_path = os.path.join(root_dir,
                                                 'credentials.json')
     self._set_credentials()
     self._load_device_config()
     self._create_conversation_stream()
     self.display = False
     self._set_http_request()
     self._create_gprc_channel()
     self.conversation_state = None
     self.is_new_conversation = True
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         self.channel)
     self.deadline = 60 * 3 + 5
     self.device_handler = device_helpers.DeviceRequestHandler(
         self.device_id)
示例#3
0
def deviceRequestHandler(device_id):
    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.minsoft.actions.PlayMusic')
    def playMusic(artist, album):
        logging.info('Llega reproducir música de %s y %s.', artist, album)
        #s.send(b"{\"jsonrpc\": \"2.0\", \"method\": \"JSONRPC.Introspect\", \"id\": 1}")
        #s.send(b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.SetPartymode\", \"id\": 1, \"params\": { \"playerid\": 0, \"partymode\": true } }")
        #s.send(b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.GetPlayers\", \"id\": 1, \"params\": { \"media\": \"all\" } }")
        sendCommand(
            b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.SetPartymode\", \"id\": 1, \"params\": { \"playerid\": 0, \"partymode\": true } }"
        )

    @device_handler.command('com.minsoft.actions.Stop')
    def stop():
        sendCommand(
            b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.Stop\", \"id\": 1, \"params\": { \"playerid\": 0 } }"
        )

    @device_handler.command('com.minsoft.actions.Pause')
    def pause():
        sendCommand(
            b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.PlayPause\", \"id\": 1, \"params\": { \"playerid\": 0, \"play\": false } }"
        )

    @device_handler.command('com.minsoft.actions.Resume')
    def pause():
        sendCommand(
            b"{\"jsonrpc\": \"2.0\", \"method\": \"Player.PlayPause\", \"id\": 1, \"params\": { \"playerid\": 0, \"play\": true } }"
        )

    return device_handler
示例#4
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    # text to speech : text를 음성으로 바꾸어서 스피커에 출력
    def tts(text, lang='ko'):
        if lang == None:
            speech = gTTS(text=text)
        else:
            speech = gTTS(text=text, lang=lang)
        speech.save('tmp.mp3')
        os.system("omxplayer tmp.mp3")
        os.remove('tmp.mp3')

    # speech to text : speech를 text를 바꾸어서 text 변수에 저장
    def stt(commands=None, is_respon=False):
        # voice recognition/respone.*******
        continue_conversation, stt_tmp = assistant.assist(commands=commands,
                                                          is_respon=is_respon)

        wait_for_user_trigger = not continue_conversation

        #if once and (not continue_conversation):
        #    break

        text = stt_tmp

        return text

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        wait_for_user_trigger = not once

        # 내가 말할 command들
        select = ['컨트롤', '교육']
        control = ['초음파', '추적', '불빛', '명령', '꺼']
        yn = ['네', '아니']
        move = ['앞으로', '뒤로', '오른쪽', '왼쪽']

        first = True
        more = False

        # dc,servo,ultra sonic,led
        module = m.mode()

        while True:
            if first == True:
                tts("컨트롤모드와 교육모드 중에 선택해주세요 ,.,.,.")
                first = False

            text = stt(select, is_respon=True)
            print("[INFO] answer : ", text)

            if select[0] in text:
                print('동작 모드 ')
                tts('동작모드 입니다....   ')

                while True:

                    if more == True:
                        tts("동작모드를 더 하실껀가요   ")

                        text = stt(is_respon=False)

                        if yn[0] in text:
                            more = False
                            tts('다시 시작할게요   ')
                        if yn[1] in text:
                            more = False
                            first = True
                            break

                    text = stt(is_respon=False)
                    print("[INFO] answer : ", text)

                    if control[0] in text:
                        print("초음파 모드")
                        tts('초음파 모드 입니다   ')
                        sel = random.randrange(2)

                        if sel == 0:
                            print('1')
                            module.avoid()
                        else:
                            print('2')
                            module.avoid2()

                        tts("초음파 모드가 끝났어요   ")

                        more = True

                    elif control[1] in text:
                        print('추적 모드')
                        tts('추적 모드 입니다   ')
                        module.tracking()

                        tts("추적 모드가 끝났어요   ")

                        more = True

                    elif control[2] in text:
                        print('불빛 모드')
                        tts('블빛 모드 입니다   ')
                        module.servo_led()

                        tts("붗빛 모드가 끝났어요 ")

                        more = True

                    elif control[3] in text:
                        print('명령모드')
                        tts('명령 모드 입니다   ')

                        try:
                            start = time.time()
                            while True:
                                if time.time() - start > 60:
                                    break
                                text = stt(commands=move, is_respon=False)
                                print(text)
                                if move[0] in text:
                                    #if module.distance() > 80:
                                    module.go(100, 100)
                                    sleep(2)
                                    module.stop()
                                elif move[1] in text:
                                    module.back(100, 100)
                                    sleep(2)
                                    module.stop()
                                elif move[2] in text:
                                    module.spin_right(100, 100)
                                    sleep(3)
                                    module.stop()
                                elif move[3] in text:
                                    module.spin_left(100, 100)
                                    sleep(3)
                                    module.stop()

                        except KeyboardInterrupt:
                            module.stop()

                        module.stop()
                        tts("명령모드가 끝났어요    ")

                        more = True

                    elif control[4] in text:
                        tts("끝낼게요    ")
                        first = True
                        break

            elif select[1] in text:
                #DB Setting
                host = '192.168.0.8'
                user = '******'
                dbname = 'Education'
                password = '******'
                ser = 1
                name = 'LEE'
                e = Education(host, user, dbname, password, ser, name)
                conn, cur = e.connection()
                e.dbclean(conn, cur)
                tts("교육모드 입니다.   ")

                count = 0
                stage = 0

                while True:
                    distance = module.distance()

                    if distance < 50:
                        count += 1

                    if count > 100:
                        count = 0

                        tts("반가워요 제가 동화를 들려드릴게요  ")
                        sleep(1)
                        os.system(
                            "omxplayer ~/workspace/Raspi_google_robot/stt/ka_01.mp3"
                        )

                        tts("동화가 끝났어요 재미있으셨나요  ")
                        sleep(1)

                        while True:
                            text = stt(is_respon=False)
                            e.isfun(conn, cur, text)
                            if yn[0] in text:
                                tts("고마워요 다음에 또 들려줄게요  ")
                                break
                            elif yn[1] in text:
                                tts("나중에는 더 재미있는 이야기를 들려줄게요  ")
                                break
                        e.search_isfun(conn, cur)
                        tts("우리 같이 숫자 공부해요  ")
                        sleep(1)
                        tts("일 더하기 이는 무엇일까요  ")

                        life = 5
                        math_pt = 100
                        while True:
                            text = stt(is_respon=False)
                            print(text)

                            if '3' in text:
                                tts("정답이에요 축하해요  ")
                                break
                            elif not text:
                                pass
                            else:
                                tts("틀렸어요 다시한번 말해주세요  ")
                                life -= 1
                                math_pt -= 5
                                if life == 0:
                                    tts("코인을 다썼어요 아쉽네요 다음 기회에 또 봐요  ")
                                    break
                        e.math(conn, cur, math_pt)
                        e.search_math(conn, cur)
                        time.sleep(1)

                        tts("우리 같이 발음을 맞춰봐요  ")

                        life = 5
                        english_pt = 100

                        quiz = 'orange'

                        tts(quiz, lang=None)

                        while True:
                            text = stt(is_respon=False)
                            print(text)

                            if '오렌지' in text:
                                tts("정답 이에요")
                                break
                            elif not text:
                                pass
                            else:
                                tts("틀렸어요 다시한번 말해주세요    ")
                                life -= 1
                                english_pt -= 5
                                if life == 0:
                                    tts("아쉽네요 다음 기회에 또 봐요     ")
                                    break
                        e.english(conn, cur, english_pt)
                        e.search_english(conn, cur)
                        time.sleep(1)

                        tts("교육모드가 끝났습니다.    ")
                        e.dbclose(conn, cur)
                        first = True
                        break
            else:
                first = True
        m.clean()
示例#5
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once

        porcupine = PorcupineDemo(library_path=pvporcupine.LIBRARY_PATH,
                                  model_path=pvporcupine.MODEL_PATH,
                                  keyword_paths=[
                                      pvporcupine.KEYWORD_PATHS[x]
                                      for x in ['terminator']
                                  ],
                                  sensitivities=[0.95],
                                  output_path=None,
                                  input_device_index=None)

        porcupine_thread = threading.Thread(target=porcupine.run)
        porcupine_thread.start()

        bill = Billy()

        while True:
            if wait_for_user_trigger:
                while not porcupine.detected:
                    time.sleep(0.1)
                #click.pause(info='Press Enter to send a new request...')
            assistant.bill.eye_on()
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
            assistant.bill.eye_off()
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    if not device_id or not device_model_id:
        logging.error(
            'No device_id or no device_model_id found. Please check config.py')
        sys.exit(0)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#7
0
def main():
    # Configuration
    api_endpoint = ASSISTANT_API_ENDPOINT
    credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'),
                               'credentials.json')
    project_id = os.getenv("PROJECT_ID")
    device_model_id = os.getenv("DEVICE_MODEL_ID")
    device_id = os.getenv("DEVICE_ID")

    device_config = os.path.join(click.get_app_dir('googlesamples-assistant'),
                                 'device_config.json')
    lang = "en-US"

    display = False
    verbose = False
    audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
    audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
    audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
    audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
    audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
    grpc_deadline = DEFAULT_GRPC_DEADLINE

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None

    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))

    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))

    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.

        # Initialise car share app.
        assistant.assist(text_query="talk to car share")
        while True:
            continue_conversation = assistant.assist()
示例#8
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, audio_sample_rate,
         audio_sample_width, audio_iter_size, audio_block_size,
         audio_flush_size, grpc_deadline, *args, **kwargs):

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with Fish() as fish:
        # Configure audio source and sink.
        audio_sink = FishDeviceStream(fish=fish,
                                      sample_rate=audio_sample_rate,
                                      sample_width=audio_sample_width,
                                      block_size=audio_block_size,
                                      flush_size=audio_flush_size)
        audio_source = audio_sink

        # Create conversation stream with the given audio source and sink.
        conversation_stream = FishConversationStream(
            fish=fish,
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        with SampleAssistant(lang, device_model_id, device_id,
                             conversation_stream, display, grpc_channel,
                             grpc_deadline, device_handler) as assistant:
            fish.setIndicator(True)
            continue_conversation = False
            while True:
                if not continue_conversation:
                    logging.info('Waiting for button press')
                    while not fish.getTriggerButton():
                        time.sleep(0.05)
                continue_conversation = assistant.assist()
示例#9
0
def main(api_endpoint, credentials, project_id, device_model_id, device_config,
         lang, display, verbose, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once):

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    input_audio_file = False
    output_audio_file = False

    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))

    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    try:
        with open(device_config) as f:
            device = json.load(f)
            device_id = device['id']
            device_model_id = device['model_id']
            logging.info("Using device model %s and device id %s",
                         device_model_id, device_id)
    except Exception as e:
        logging.warning('Device config not found: %s' % e)
        logging.info('Registering device')
        if not device_model_id:
            logging.error('Option --device-model-id required '
                          'when registering a device instance.')
            sys.exit(-1)
        if not project_id:
            logging.error('Option --project-id required '
                          'when registering a device instance.')
            sys.exit(-1)
        device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                           (api_endpoint, project_id))
        device_id = str(uuid.uuid1())
        payload = {
            'id': device_id,
            'model_id': device_model_id,
            'client_type': 'SDK_SERVICE'
        }
        session = google.auth.transport.requests.AuthorizedSession(credentials)
        r = session.post(device_base_url, data=json.dumps(payload))
        if r.status_code != 200:
            logging.error('Failed to register device: %s', r.text)
            sys.exit(-1)
        logging.info('Device registered: %s', device_id)
        pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
        with open(device_config, 'w') as f:
            json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            print('Turning device on')
        else:
            print('Turning device off')

    @device_handler.command('Extend')
    def extend(number):
        global ser
        ser.write(b'f')
        print("        Extending ...          ")

    @device_handler.command('Flex')
    def extend(number):
        global ser
        ser.write(b'b')
        print("        Flexing ...          ")

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        def signal_handler(signal, frame):
            detector.terminate()

        def interrupt_callback():
            global continue_conversation
            if continue_conversation:
                continue_conversation = assistant.assist()
            print('listening..')

        def det_call():
            print('yes master')
            global continue_conversation
            continue_conversation = assistant.assist()
            print('done================')
            return 0

        signal.signal(signal.SIGINT, signal_handler)
        detector = snowboydecoder.HotwordDetector('saaedy.pmdl',
                                                  sensitivity=0.6)

        detector.start(detected_callback=det_call,
                       interrupt_check=interrupt_callback,
                       sleep_time=0.1)
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
         credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'),
                                  'credentials.json'),
         project_id=None,
         device_model_id=None,
         device_id=None,
         device_config=os.path.join(
             click.get_app_dir('googlesamples_assistant'),
             'device_config.json'),
         lang="en_GB",
         display=True,
         verbose=False,
         input_audio_file=None,
         output_audio_file=None,
         audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
         audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
         audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
         audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
         audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
         grpc_deadline=DEFAULT_GRPC_DEADLINE,
         once=False,
         *args,
         **kwargs):

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    #|=============================================|
    #|                                             |
    #| Handle commands for Google Assistant Stuff  |
    #|                                             |
    #|=============================================|
    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.SetVolume')
    def changeVolume(volumeLevel, isPercentage):
        if (isPercentage):
            os.system(
                'pactl set-sink-volume "alsa_output.usb-Generic_USB2.0_Device_20130100ph0-00.analog-stereo" '
                + str(volumeLevel) + '%')

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                input("PRESS ENTER TO SPEAK")
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#11
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config,
         lang, display, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, hotword_model, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            pin13.write(1)
            logging.info('Turning device on')
        else:
            pin13.write(0)
            logging.info('Turning device off')
            
    @device_handler.command('action.devices.commands.BrightnessAbsolute')
    def brightnessCheck(brightness):
        pin13.write(brightness/100)
        logging.info('ok , brightness is '  , brightness)

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        import snowboydecoder

        def listening():
            detector = snowboydecoder.HotwordDetector(hotword_model, sensitivity=0.397, audio_gain=1)
            print("Say dalilaa .....or, Press Ctrl+C to exit")

            detector.start(detected_callback=detectedCallback, sleep_time=0.01)
            detector.terminate()

        def detectedCallback():
            continue_conversation = assistant.assist()
            print("Say dalilaa ...... or  Press Ctrl+C to exit")
            # listening()

        listening()
示例#12
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        Now = datetime.datetime.now()
        DateTimeStamp = "{:%d/%m/%Y}".format(Now)
        print(DateTimeStamp)
        cached_list = cached.find_one({'ref': DateTimeStamp})['cached']
        print(cached_list)
        current_detected = memcache.Client(['127.0.0.1:11211'], debug=0)
        assistant.assist(text_query='Talk to my test app')
        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        #wait_for_user_trigger = not once
        while True:
            if assistant.MODE is 0 or assistant.MODE is 1:
                query = ''
                while True:
                    name = current_detected.get('Name')
                    if name is not '':
                        query = name
                        assistant.switch_mode(1)
                        if name not in cached_list:
                            cached_list.append(name)
                            cached.update({'ref': DateTimeStamp},
                                          {'$push': {
                                              'cached': name
                                          }})
                            query = name + "first"
                            assistant.switch_mode(0)
                        break

                if assistant.MODE is 0:
                    click.echo('<you> %s' % query)
                    text, continue_conversation = assistant.assist(
                        text_query=query)
                    #print(text)
                elif assistant.MODE is 1:
                    click.echo('<you> %s' % query)
                    text, continue_conversation = assistant.assist(
                        text_query=query)
                    #print(text)
                    assistant.switch_mode(2)
            elif assistant.MODE is 2:
                text, continue_conversation = assistant.assist(text_query=None)
                print(text)
                if text == 'Please report activity again.':
                    print('recording again')
                    assistant.switch_mode(2)
                    #text, continue_conversation = assistant.assist(text_query=None)
                else:
                    print('why??')
                    assistant.switch_mode(1)
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            #wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if not continue_conversation:
                assistant.assist(text_query='Talk to my test app')

            time.sleep(1)
示例#13
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    context = zmq.Context.instance()
    socket = context.socket(zmq.REP)
    socket.bind('tcp://127.0.0.1:5555')

    while True:
        print('receiving socket message...')
        msg = socket.recv_string()
        if msg == 'stop':
            socket.send_string('stopping')
            context.destroy()
            break
        if msg != 'start':
            socket.send_string('invalid message')
            continue
        print('received start message')
        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_helpers.WaveSource(
            open('in.wav', 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
        if output_audio_file:
            audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_sink = audio_device = (audio_device
                                         or audio_helpers.SoundDeviceStream(
                                             sample_rate=audio_sample_rate,
                                             sample_width=audio_sample_width,
                                             block_size=audio_block_size,
                                             flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        device_handler = device_helpers.DeviceRequestHandler(device_id)

        @device_handler.command('action.devices.commands.OnOff')
        def onoff(on):
            if on:
                logging.info('Turning device on')
            else:
                logging.info('Turning device off')

        if not device_id or not device_model_id:
            try:
                with open(device_config) as f:
                    device = json.load(f)
                    device_id = device['id']
                    device_model_id = device['model_id']
            except Exception as e:
                logging.warning('Device config not found: %s' % e)
                logging.info('Registering device')
                if not device_model_id:
                    logging.error('Option --device-model-id required '
                                  'when registering a device instance.')
                    sys.exit(-1)
                if not project_id:
                    logging.error('Option --project-id required '
                                  'when registering a device instance.')
                    sys.exit(-1)
                device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                                   (api_endpoint, project_id))
                device_id = str(uuid.uuid1())
                payload = {'id': device_id, 'model_id': device_model_id}
                session = google.auth.transport.requests.AuthorizedSession(
                    credentials)
                r = session.post(device_base_url, data=json.dumps(payload))
                if r.status_code != 200:
                    logging.error('Failed to register device: %s', r.text)
                    sys.exit(-1)
                logging.info('Device registered: %s', device_id)
                os.makedirs(os.path.dirname(device_config), exist_ok=True)
                with open(device_config, 'w') as f:
                    json.dump(payload, f)

        with SampleAssistant(lang, device_model_id, device_id,
                             conversation_stream, grpc_channel, grpc_deadline,
                             device_handler) as assistant:
            # If file arguments are supplied:
            # exit after the first turn of the conversation.
            #if input_audio_file or output_audio_file:
            assistant.assist()
            print("accepting another request...")

        socket.send_string('done')
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
         credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'),
                                  'credentials.json'),
         device_config=os.path.join(
             click.get_app_dir('googlesamples-assistant'),
             'device_config.json'),
         device_id=None,
         project_id=None,
         device_model_id=None,
         input_audio_file=None,
         output_audio_file=None,
         audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
         audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
         audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
         audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
         audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
         lang='ko-KR',
         verbose=False,
         once=False,
         grpc_deadline=DEFAULT_GRPC_DEADLINE):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))

    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
            print('JCH MP3 Play')
            #launch subprocess
            #python cannot play mp3
            #subprocess.call(['lxterminal', '-e', 'python runMP3.py'])
            subprocess.call(['lxterminal', '-e', './runMP3.sh'])
            print('JCH fork process is run. parent process is still running')
        else:
            logging.info('Turning device off')
            killMP3Pid()
            print('JCH turn off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    #JCH : get MP3 player pid
    def killMP3Pid():
        count = 1
        pid = -1
        cmd = ['ps', '-ef']

        fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
        for line in fd_popen:
            if line.find('runMP3') != -1:
                list = line.split()
                pid = list[1]
                print('bash pid:' + str(pid))
                os.kill(int(pid), signal.SIGTERM)  #or signal.SIGKILL
                break
        fd_popen.close()

        fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
        for line in fd_popen:
            if line.find('omxplayer') != -1:
                print('find')
                list = line.split()
                pid = list[1]
                print('pid:' + str(pid))
                os.kill(int(pid), signal.SIGTERM)
                if count == 2:
                    break
                else:
                    count = count + 1
        fd_popen.close()

    #

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        while assistant.assist():
            if once:
                break
示例#15
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config,
         lang, display, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Porcupine setup
    library_path = "lib/linux/x86_64/libpv_porcupine.so" # Path to Porcupine's C library available under lib/${SYSTEM}/${MACHINE}/
    model_file_path = "lib/common/porcupine_params.pv" # It is available at lib/common/porcupine_params.pv
    keyword_file_paths = ['picovoice_linux.ppn', 'ok_google_linux_2020-04-28_v1.7.0.ppn', 'hey_google_linux_2020-04-28_v1.7.0.ppn']
    sensitivities = [0.8, 0.9, 0.9]
    porcupine = None
    pa = None
    audio_stream = None

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        wait_for_user_trigger = not once

        try:
            porcupine = Porcupine(
                library_path,
                model_file_path,
                keyword_file_paths=keyword_file_paths,
                sensitivities=sensitivities)
            pa = pyaudio.PyAudio()
            audio_stream = pa.open(
                rate=porcupine.sample_rate,
                channels=1,
                format=pyaudio.paInt16,
                input=True,
                frames_per_buffer=porcupine.frame_length,
                input_device_index=2)
            while True:
                #print('listening')
                pcm = audio_stream.read(porcupine.frame_length)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                #print('test')
                result = porcupine.process(pcm)
                if result >= 0:
                    print('detected keyword')
                    continue_conversation = assistant.assist()
                    wait_for_user_trigger = not continue_conversation
        except KeyboardInterrupt:
            print('stopping ...')
        finally:
            if porcupine is not None:
                porcupine.delete()
            if audio_stream is not None:
                audio_stream.close()
            if pa is not None:
                pa.terminate()
示例#16
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        query = ''
        cached = []
        current_detected = memcache.Client(['127.0.0.1:11211'], debug=0)
        assistant.assist(text_query='Talk to my test app')
        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        #wait_for_user_trigger = not once
        while True:

            while True:
                name = current_detected.get('Name')
                if name is not '':
                    query = name
                    break

            click.echo('<you> %s' % query)

            #always set MODE = TRUE for text input for detection input
            #if not assistant.MODE:
            #    assistant.switch_mode()

            #first request made to Dialogflow to notify the user detected
            continue_conversation = assistant.assist(text_query=query)

            #decision fork, whether it is first detection for check in
            #or subsequent detections for action tracking
            if query not in cached:
                cached.append(query)
                continue_conversation = assistant.assist(text_query='start')
            else:
                assistant.switch_mode()
                continue_conversation = assistant.assist(text_query=None)
                assistant.switch_mode()
示例#17
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    credentials, http_request = create_credentials_and_http_request(
        credentials)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))

    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        device_id, device_model_id = configure_device_id_and_model_id(
            device_config)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        while True:
            signal.signal(signal.SIGUSR2, sigusr2_handler)
            global SHOULD_ASSIST
            if not SHOULD_ASSIST:
                time.sleep(0.5)
                continue  # keep looping
            # temp disabling sighandler
            signal.signal(signal.SIGUSR2, sigusr2_handler_disabled)
            continue_conversation = assistant.assist()
            if not continue_conversation:
                SHOULD_ASSIST = False
示例#18
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.acme.commands.play_kkbox')
    def play_music(songName
                   ):  # You must match the parameters from the Action Package.
        logging.info('play %s ' % songName)
        # url = 'https://widget.kkbox.com/v1/?id=4kxvr3wPWkaL9_y3o_&type=song&terr=TW&lang=TC&autoplay=true&loop=true'
        # result = subprocess.Popen(['firefox', url], stdout=subprocess.PIPE)
        # print(result.stdout)

        from kkbox_partner_sdk.auth_flow import KKBOXOAuth

        CLIENT_ID = 'cea7cb81a731b46caeb9b8c0e25abd22'
        CLIENT_SECRET = '6317f7914dcc9e1fb50d01f744b3f1fb'

        auth = KKBOXOAuth(CLIENT_ID, CLIENT_SECRET)
        token = auth.fetch_access_token_by_client_credentials()
        print(token)

        from kkbox_partner_sdk.api import KKBOXAPI

        kkboxapi = KKBOXAPI(token)

        keyword = '女武神'
        types = ['track']
        result = kkboxapi.search_fetcher.search(keyword, types)

        tracks = result['tracks']['data']
        # print('搜尋結果是:{}'.format(tracks))

        track_id = result['tracks']['data'][0]['id']
        track_info = kkboxapi.track_fetcher.fetch_track(track_id)
        url = track_info['url']
        print('歌曲資訊連結是:{}'.format(url))
        send(url)

        tickets = kkboxapi.ticket_fetcher.fetch_media_provision(track_id)
        url = tickets['url']
        print('下載位置連結是:{}'.format(url))

        print('底下是播放資訊')
        import subprocess
        subprocess.run(['ffplay', '-nodisp', '-autoexit', url])

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        once = True

        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#19
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('com.example.commands.SearchCar')
    def search_cars(filter, param):
        print('SEARCH CARS\n' + 'FILTER: ' + filter + ', PARAM: ' + param)

        try:
            response = requests.get(
                'http://localhost:5000/api/cars?{}={}'.format(filter, param))
            data = json.loads(response.text)
            cars = data['cars']
        except:
            print("Problem communicating with server")
            cars = []

        print('%-2s | %-10s | %-10s | %-8s | %s | %s | %s' %
              ("ID", "Make", "Body Type", "Colour", "No. Seats", "Cost/Hour",
               "Location"))
        print(
            '---+------------+------------+----------+-----------+-----------+----------------------'
        )

        for car in cars:
            print('%-2d | %-10s | %-10s | %-8s | %-9d | $%-8d | %s' %
                  (car['id'], car['make'], car['body_type'], car['colour'],
                   car['no_seats'], car['cost_per_hour'], car['location']))

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#20
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # ウェイクワードでの起動
        porcupine = None
        pa = None
        audio_stream = None
        try:
            # ハンドル作成
            porcupine = pvporcupine.create(keywords=['jarvis', 'snowboy'])
            # pyaudioでの録音
            pa = pyaudio.PyAudio()
            audio_stream = pa.open(rate=porcupine.sample_rate,
                                   channels=1,
                                   format=pyaudio.paInt16,
                                   input=True,
                                   frames_per_buffer=porcupine.frame_length)

            # 待ちループ
            def get_next_audio_frame():
                pcm = audio_stream.read(porcupine.frame_length,
                                        exception_on_overflow=False)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                return pcm

            while True:
                keyword_index = porcupine.process(get_next_audio_frame())
                if keyword_index >= 0:
                    # detection event logic/callback
                    continue_conversation = assistant.assist()
                    while audio_stream.get_read_available() > 0:
                        get_next_audio_frame()

        finally:
            if porcupine is not None:
                porcupine.delete()

            if audio_stream is not None:
                audio_stream.close()

            if pa is not None:
                pa.terminate()

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#21
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

	Examples:
	  Run the sample with microphone input and speaker output:

		$ python -m googlesamples.assistant

	  Run the sample with file input and speaker output:

		$ python -m googlesamples.assistant -i <input file>

	  Run the sample with file input and output:

		$ python -m googlesamples.assistant -i <input file> -o <output file>
	"""
    ############################################################################3
    global updt_time, query, resp_text, mute, startmouth, TezHead, beep, faceFound, name2, onceface, facerec_en, keyboard_on

    # Setup logging.
    Kpx = 1
    Kpy = 1
    Ksp = 40

    ## Head X and Y angle limits
    time.sleep(5)
    Xmax = 725
    Xmin = 290
    Ymax = 550
    Ymin = 420
    keyboard_on = False
    ## Initial Head position

    Xcoor = 511
    Ycoor = 450
    Facedet = 0

    ## Time head wait turned
    touch_wait = 2

    no_face_tm = time.time()
    face_det_tm = time.time()
    last_face_det_tm = time.time()
    touch_tm = 0
    touch_samp = time.time()
    qbo_touch = 0
    touch_det = False
    face_not_found_idx = 0
    mutex_wait_touch = False
    faceFound = False
    onceface = False
    dist = 100
    audio_response1 = '/home/pi/Reebo_Python/up.wav'
    wavep = wave.open(audio_response1, 'rb')
    audio_response2 = '/home/pi/Reebo_Python/HiTej.wav'
    wavep2 = wave.open(audio_response2, 'rb')
    facerec_en = False

    ############################################################################3
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alphapi@raspber2/projects/%s/devices' %
                (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "slowly":
            delay = 2
        elif speed == "quickly":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    #~ def findquery():

    #############################   FACEREC THREAD     ##################################################33
    def facerec():

        global name2

        f = open("/home/pi/Reebo_Python/face_features.pkl", 'rb')
        details = pickle.load(f)

        # Initialize some variables
        face_locations = []
        face_encodings = []
        face_names = []
        name2 = []
        unknown_picture = fr.load_image_file("/home/pi/Reebo_Python/test.jpg")

        # Grab a single frame of video
        # frame = unknown_picture

        # Resize frame of video to 1/4 size for faster face recognition processing
        # small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        # rgb_small_frame = small_frame[:, :, ::-1]

        # Find all the faces and face encodings in the current frame of video
        face_locations = fr.face_locations(unknown_picture)
        face_encodings = fr.face_encodings(unknown_picture, face_locations)

        print("{0} persons identified".format(len(face_locations)))

        face_names = []
        for face_encoding in face_encodings:
            matches = fr.compare_faces(details['encodings'], face_encoding,
                                       0.45)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = details["name"][first_match_index]

            face_names.append(name)

        print(face_names)
        for i in range(0, len(face_names)):

            name_temp = str(face_names[i]).replace('photos/', "")
            name_temp = str(name_temp).replace(']\'', "")
            name2.append(str(name_temp))
        print name2
        n = open("/home/pi/Reebo_Python/names.txt", 'w')
        for i in face_names:
            n.write(i + "\n")

        n.close()

        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            if not name:
                continue
            if name == "warner":
                cv2.rectangle(unknown_picture, (left, top), (right, bottom),
                              (255, 0, 0), 2)
                cv2.rectangle(unknown_picture, (left, bottom - 25),
                              (right, bottom), (255, 0, 0), 1)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(unknown_picture, name, (left + 6, bottom - 6),
                            font, 0.5, (255, 255, 255), 1)
            else:
                cv2.rectangle(unknown_picture, (left, top), (right, bottom),
                              (0, 0, 255), 2)
                cv2.rectangle(unknown_picture, (left, bottom - 25),
                              (right, bottom), (0, 0, 255), 1)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(unknown_picture, name, (left + 6, bottom - 6),
                            font, 0.5, (255, 255, 255), 1)
            cv2.imwrite("/home/pi/Reebo_Python/result.png", unknown_picture)

    def findFace():
        global name2, faceFound, onceface, facerec_en, updt_time
        found_tm = time.time()
        onceface = False
        touch_samp = time.time()
        Xmax = 725
        Xmin = 290
        Ymax = 550
        Ymin = 420
        qbo_touch = 0

        while True:
            #print("find face " + str(time.time()))
            try:

                faceFound = False
                #    while not faceFound :
                # This variable is set to true if, on THIS loop a face has already been found
                # We search for a face three diffrent ways, and if we have found one already-
                # there is no reason to keep looking.
                #thread.start_new_thread(WaitForSpeech, ())
                #	WaitForSpeech()
                #    ServoHome()
                Cface = [0, 0]
                t_ini = time.time()
                while time.time() - t_ini < 0.01:  # wait for present frame
                    t_ini = time.time()
                    aframe = webcam.read()[
                        1]  #print "t: " + str(time.time()-t_ini)

                fface = frontalface.detectMultiScale(
                    aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING +
                                     cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT +
                                     cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (60, 60))
                pfacer = profileface.detectMultiScale(
                    aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING +
                                     cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT +
                                     cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (80, 80))
                if fface != ():  # if we found a frontal face...
                    for f in fface:  # f in fface is an array with a rectangle representing a face
                        faceFound = True
                        face = f

                elif pfacer != ():  # if we found a profile face...
                    for f in pfacer:
                        faceFound = True
                        face = f

                if faceFound:
                    updt_time = time.time()
                    #facerec()
                    if onceface == False:
                        cv2.imwrite("/home/pi/Reebo_Python/test.jpg", aframe)

                        onceface = True
                    found_tm = time.time()
                    x, y, w, h = face
                    Cface = [
                        (w / 2 + x), (h / 2 + y)
                    ]  # we are given an x,y corner point and a width and height, we need the center
                    TezHead.SetNoseColor(4)
                    #print "face ccord: " + str(Cface[0]) + "," + str(Cface[1])
                    faceOffset_X = 160 - Cface[0]
                    if (faceOffset_X > 20) | (faceOffset_X < -20):
                        time.sleep(0.002)
                        # acquire mutex
                        TezHead.SetAngleRelative(1, faceOffset_X >> 1)
                        # release mutex
                        #wait for move
                        time.sleep(0.05)
                        #print "MOVE REL X: " + str(faceOffset_X >> 1)
                    faceOffset_Y = Cface[1] - 120
                    if (faceOffset_Y > 20) | (faceOffset_Y < -20):
                        time.sleep(0.002)
                        # acquire mutex
                        TezHead.SetAngleRelative(2, faceOffset_Y >> 1)
                        # release mutex
                        #wait for move
                        time.sleep(0.05)
                if time.time() - found_tm > 0.5:
                    TezHead.SetNoseColor(0)

            except Exception as e:
                print e
                pass
            try:
                current_touched = cap.touched()
                #last_touched = cap.touched()
                cap.set_thresholds(10, 6)
                # Check each pin's last and current state to see if it was pressed or released.
                i = 0
                for i in [1, 11]:
                    pin_bit = 1 << i
                    # Each pin is represented by a bit in the touched value.  A value of 1
                    # First check if transitioned from not touched to touched.
                    if current_touched & pin_bit:  #and not last_touched & pin_bit:
                        print('{0} touched!'.format(i))
                        qbo_touch = int(i)
            ##            # Next check if transitioned from touched to not touched.
            ##            if not current_touched & pin_bit and last_touched & pin_bit:
            ##                print('{0} released!'.format(i))
            ##        # Update last state and wait a short period before repeating.
            ##        last_touched = current_touched
            #time.sleep(0.1)
            except:
                #print sys.exc_info()
                #print "error"
                pass

            if (time.time() - touch_samp >
                    0.5):  # & (time.time() - last_face_det_tm > 3):
                touch_samp = time.time()
                #~ time.sleep(0.002)
                if qbo_touch in [1, 11]:
                    if qbo_touch == 1:
                        print("right")
                        TezHead.SetServo(1, Xmax - 50, 100)
                        time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        #thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0
                    elif qbo_touch == [2]:
                        #~ time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0

                    elif qbo_touch == 11:
                        print("left")
                        TezHead.SetServo(1, Xmin + 50, 100)
                        time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        #thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0

    def distance():
        # set Trigger to HIGH
        GPIO.output(GPIO_TRIGGER, True)

        # set Trigger after 0.01ms to LOW
        time.sleep(0.00001)
        GPIO.output(GPIO_TRIGGER, False)

        StartTime = time.time()
        StopTime = time.time()

        # save StartTime
        while GPIO.input(GPIO_ECHO) == 0:
            StartTime = time.time()

        # save time of arrival
        while GPIO.input(GPIO_ECHO) == 1:
            StopTime = time.time()

        # time difference between start and arrival
        TimeElapsed = StopTime - StartTime
        # multiply with the sonic speed (34300 cm/s)
        # and divide by 2, because there and back
        distance = (TimeElapsed * 34300) / 2

        return distance
        ##################################  SOCKET THREAD   ######################################################
    def socket_thread(conn):

        print 'Socket.IO Thread Started.'

        def empid_received():
            socket.emit('event-ask-cardno')
            print "ASK CARD NO"

        def cardno_received():
            print "Card No received"
            conn.send(False)

        socket.on('event-empid-received', empid_received)
        socket.on('event-cardno-received', cardno_received)
        socket.wait()

    def findquery(parent_conn):
        global resp_text, mute, query, beep
        keyboard_on = False
        if resp_text == "Sorry, I can't help.":
            query = "Talk to Reebo"
            mute = True
        elif resp_text == "Alright! Say Cheese!":
            print "camera"
            aframe = webcam.read()[1]
            cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe)
            socket.emit('event-take-selfie')
            #mute=False

        elif resp_text.startswith("Can you please smile for the camera?"):
            mute = False
            beep = False
            print "BEEP"
            time.sleep(5)
            aframe = webcam.read()[1]
            cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe)
            socket.emit('event-take-selfie')
            query = "Say@#$: Thank you. Please enter your employee ID and card number"
            assistant.assist()
            socket.emit('event-ask-empid')
            keyboard_on = True
            print "KEYBOARD in findquery: ", keyboard_on
            keyboard_on = parent_conn.recv()
            query = "Say@#$: Thank You. You will be granted access shortly"
            mute = False
            beep = False

    if len(sys.argv) > 1:
        port = sys.argv[1]
    else:
        port = '/dev/serial0'

    try:
        # Open serial port
        ser = serial.Serial(port,
                            baudrate=115200,
                            bytesize=serial.EIGHTBITS,
                            stopbits=serial.STOPBITS_ONE,
                            parity=serial.PARITY_NONE,
                            rtscts=False,
                            dsrdtr=False,
                            timeout=0)
        print "Open serial port sucessfully."
        print(ser.name)
    except Exception as e:
        print e
        print "Error opening serial port."
        sys.exit()

    try:
        cap = MPR121.MPR121()
        time.sleep(3)
        #
        if not cap.begin():
            print('Error initializing MPR121.  Check your wiring!')
    except Exception as e:
        print(e)
        pass

    TezHead = TezCmd.Controller(ser)
    TezHead.SetMouth(0x110E00)

    time.sleep(1)
    #TezHead.SetPid(1, 26, 12, 16)
    TezHead.SetPid(1, 26, 2, 16)

    #TezHead.SetPid(2, 26, 12, 16)
    TezHead.SetPid(2, 26, 2, 16)
    time.sleep(1)
    TezHead.SetServo(1, Xcoor, 100)
    TezHead.SetServo(2, Ycoor, 100)
    time.sleep(1)
    TezHead.SetNoseColor(0)

    webcam = cv2.VideoCapture(
        -1)  # Get ready to start getting images from the webcam
    webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,
               320)  # I have found this to be about the highest-
    webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,
               240)  # resolution you'll want to attempt on the pi
    #webcam.set(cv2.CV_CAP_PROP_BUFFERSIZE, 2)		# frame buffer storage

    if not webcam:
        print "Error opening WebCAM"
        sys.exit(1)

    #open = False

    frontalface = cv2.CascadeClassifier(
        "/home/pi/Documents/Python projects/haarcascade_frontalface_alt2.xml"
    )  # frontal face pattern detection
    profileface = cv2.CascadeClassifier(
        "/home/pi/Documents/Python projects/haarcascade_profileface.xml"
    )  # side face pattern detection
    #parent_conn, child_conn = Pipe()

    t1 = Thread(target=findFace)
    t1.start()
    t3 = Thread(target=facerec)
    parent_conn, child_conn = Pipe()
    socket_thd = Thread(target=socket_thread, args=(child_conn, ))
    socket_thd.start()

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        button_once = False
        #playsound('/home/pi/env/HiTej.wav')
        print "playsound"
        mute = True
        query = "Talk to Reebo"
        print query
        #################################################################################3
        #~ query,mute=findquery()
        #####################################FIND QUERY AND MUTE#####################3
        assistant.assist()
        mute = False
        query = "audio"
        time.sleep(1)
        updt_time = time.time()
        stream = conversation_stream
        num_frames = wavep.getnframes(
        )  # number of frames in audio response file
        resp_samples = wavep.readframes(num_frames)  # get frames from wav file
        num_frames2 = wavep2.getnframes(
        )  # number of frames in audio response file
        resp_samples2 = wavep2.readframes(
            num_frames2)  # get frames from wav file
        name = ""
        while True:
            #if wait_for_user_trigger:

            #logging.info('Press key')
            #x=raw_input()
            #~ stream.start_recording() # unelegant method to access private methods..

            findquery(parent_conn)
            if mute == False or beep == True:
                print "beep"
                stream.start_playback()
                #~ stream.stop_recording()
                stream.write(
                    resp_samples)  # write response sample to output stream
                print "HI"
                stream.stop_playback()

            assistant.assist()
            beep = False
            query = "audio"
            mute = False
            #updt_time=time.time()
            print time.time() - updt_time
            dist = distance()
            #~ if dist<50:
            #~ print dist
            #~ updt_time=time.time()
            if time.time() - updt_time > 10:
                name2 = ""
                if onceface == True:
                    facerec_en = False
                    print "Thread Status", t3.is_alive()
                    if t3.is_alive():
                        t3.terminate()
                        t3.join(1)
                        print "t3 terminated"
                    print facerec_en
                    onceface = False
                print("in loop")
                query = "audio"
                dist = distance()
                print faceFound
                while faceFound == False:
                    time.sleep(0.1)
                    #print "FACE FALSE"
                #~ if dist>60:
                #~ #mute=False
                #~ updt_time=time.time()
                #~ print query
                #~ while dist>60:
                #~ dist=distance()
                #~ time.sleep(0.1)
                #~ print dist
                #~ query="Hi"
                #~ print query
                #~ assistant.assist()
                #~ print ("playback")
                #~ socket.emit('event-robot-message',"Hi! Do you want some help ?")
                print "Thread Status", t3.is_alive()
                t3 = Thread(target=facerec)
                t3.start()

                #~ query="Talk to Tej"
                #~ mute=True
                #~ assistant.assist()
                #time.sleep(3)
                stream.start_playback()
                #~ stream.stop_recording()
                stream.write(
                    resp_samples2)  # write response sample to output stream
                socket.emit(
                    'event-robot-message',
                    "Hi! My Name is Reebo. I\'ll be your personal assistant for today"
                )
                stream.stop_playback()
                query = "Say:@#$: "
                if len(name2) >= 1:
                    for i in range(0, len(name2)):
                        if name2[i] != "" and name2[i] != "Unknown":
                            query = query + " Hi " + str(name2[i]) + "!"

                query = query + "What can I do for you?"
                mute = False
                print query
                assistant.assist()
                #time.sleep(0.1)
                #~ stream.start_playback()
                #~ stream.stop_recording()
                #~ stream.write(resp_samples2) # write response sample to output stream
                #~ stream.stop_playback()
                #~ #query="Talk to Tej"
                #~ mute=True
                #~ assistant.assist()

                #~ updt_time= time.time()
                query = "audio"
                mute = False
    def run(self):

        rospy.loginfo(self.logname + 'Initializing...')

        # Get parameters from launch file
        keyphrase_dir = rospy.get_param('key_phrase_dir', "")
        keyphrase_1 = rospy.get_param('key_phrase_1', "")
        keyphrase_1_path = keyphrase_dir + '/' + keyphrase_1
        keyphrase_2 = rospy.get_param('key_phrase_2', "")
        keyphrase_2_path = keyphrase_dir + '/' + keyphrase_2

        # Hotword Sensitivity:  larger value is more sensitive (good for quiet room)
        hotword_sensitivity = 0.50  # rospy.get_param('hotword_sensitivity', 0.80)  # 0.38?
        apply_frontend = rospy.get_param('apply_frontend',
                                         True)  # Frontend filtering

        proxyUrl = rospy.get_param('proxyUrl', "")  # default to no proxy

        rospy.loginfo(self.logname + "KEYPHRASE INFO: ")
        rospy.loginfo("  keyphrase_dir:       " + keyphrase_dir)
        rospy.loginfo("  keyphrase_1:         " + keyphrase_1)
        rospy.loginfo("  keyphrase_2:         " + keyphrase_2)
        rospy.loginfo("========================================")

        if (keyphrase_1 == "") or (keyphrase_2 == ""):
            rospy.logfatal("========================================")
            rospy.logfatal("MISSING KEYPHRASE! SHUTTING DOWN!")
            rospy.logfatal("========================================")
            return

        # Check for Internet connection (fail early unstead of first time we try to use a cloud service)
        if not self.internet_available():
            rospy.logfatal("========================================")
            rospy.logfatal("INTERNET NOT AVAILABLE, SHUTTING DOWN!")
            rospy.logfatal("========================================")
            return

        #=====================================================================================
        # Google Assistant Setup

        rospy.loginfo('Initializing Google Assistant...')
        # initialize parameters (these are usually passed on command line in sample)
        verbose = False
        credentials = '/home/system/.config/google-oauthlib-tool/credentials.json'

        device_config = '/home/system/.config/googlesamples-assistant/device_config.json'

        # Setup logging.
        #logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

        # Load OAuth 2.0 credentials.
        rospy.loginfo('Loading OAuth 2.0 credentials...')
        try:
            with open(credentials, 'r') as f:
                credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                http_request = google.auth.transport.requests.Request()
                credentials.refresh(http_request)
        except Exception as e:
            rospy.logfatal('Error loading credentials: %s', e)
            rospy.logfatal('Run google-oauthlib-tool to initialize '
                           'new OAuth 2.0 credentials.')
            sys.exit(-1)

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            credentials, http_request, ASSISTANT_API_ENDPOINT)
        rospy.loginfo('Connecting to %s', ASSISTANT_API_ENDPOINT)

        # get device info from the config file
        try:
            with open(device_config) as f:
                device = json.load(f)
                self.device_id = device['id']
                self.device_model_id = device['model_id']
                rospy.loginfo("Using device model %s and device id %s",
                              self.device_model_id, self.device_id)
        except Exception as e:
            rospy.logfatal('Device config not found: %s' % e)
            sys.exit(-1)

        #===================================================================
        # DEVICE HANDLERS.  See resources/actions .json to add more actions

        rospy.loginfo('Setting up Google Assistant device handlers...')
        self.device_handler = device_helpers.DeviceRequestHandler(
            self.device_id)

        @self.device_handler.command('com.shinselrobots.commands.move')
        def move(move_direction, amount):
            if amount == '$amount':
                amount = ''
            rospy.loginfo('******> Got Move Command [%s]  [%s] ',
                          move_direction, amount)
            move_speed = '0.5'
            move_command = '0.75'  # Normal Move (meters)
            if move_direction == 'BACKWARD':
                move_command = '-0.5'  # Normal Backward Move (meters)
                if amount == 'SMALL':
                    move_command = '-0.25'  # Small Move
                elif amount == 'LARGE':
                    move_command = '-0.75'  # Large Move
            else:
                move_command = '0.75'  # Normal Forward Move (meters)
                if amount == 'SMALL':
                    move_command = '0.25'  # Small Move
                elif amount == 'LARGE':
                    move_command = '1.0'  # Large Move

            self.handle_behavior_command('MOVE', move_command, move_speed,
                                         ('ok, moving ' + move_direction))

        @self.device_handler.command('com.shinselrobots.commands.turn')
        def turn(turn_direction, amount):
            if amount == '$amount':
                amount = ''
            rospy.loginfo('******> Got Turn Command [%s]  [%s] ',
                          turn_direction, amount)
            turn_speed = '0.5'
            turn_command = '45'  # Normal Turn
            if amount == 'SMALL':
                turn_command = '30'  # Small Turn
            elif amount == 'LARGE':
                turn_command = '90'  # Large Turn
            if turn_direction == 'RIGHT':
                turn_command = '-' + turn_command  # Negative Turn
            self.handle_behavior_command('TURN', turn_command, turn_speed,
                                         ('turning ' + turn_direction))

        @self.device_handler.command('com.shinselrobots.commands.spin_left')
        def spin_left(param1):
            turn_speed = '0.5'
            self.handle_behavior_command('TURN', '180', turn_speed,
                                         'spinning left')

        @self.device_handler.command('com.shinselrobots.commands.spin_right')
        def spin_right(param1):
            turn_speed = '0.5'
            self.handle_behavior_command('TURN', '-180', turn_speed,
                                         'spinning right')

        @self.device_handler.command('com.shinselrobots.commands.stop')
        def stop(param1):
            self.handle_behavior_command('STOP', '', '', 'stopping')

        @self.device_handler.command('com.shinselrobots.commands.sleep')
        def sleep(param1):
            self.handle_behavior_command('SLEEP', '', '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.wake')
        def wake(param1):
            self.handle_behavior_command('WAKEUP', '', '', 'ok, waking up')

        @self.device_handler.command('com.shinselrobots.commands.intro')
        def intro(param1):
            self.handle_behavior_command('INTRO', '', '', 'ok, sure')

        @self.device_handler.command('com.shinselrobots.commands.hands_up')
        def hands_up(param1):
            self.handle_behavior_command('HANDS_UP', '', '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.shake_hands')
        def shake_hands(param1):
            self.handle_behavior_command('SHAKE_HANDS', '', '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.arms_home')
        def arms_home(param1):
            self.handle_behavior_command('ARMS_HOME', '', '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.follow')
        def follow(param1):
            self.handle_behavior_command('FOLLOW_ME', '', '',
                                         'ok, I will follow you')

        @self.device_handler.command(
            'com.shinselrobots.commands.microphone_off')
        def microphone_off(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: MICROPHONE OFF')
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text("Ok, I will stop listening")
            self.mic_user_enable_pub.publish(False)

        @self.device_handler.command('com.shinselrobots.commands.microphone_on'
                                     )
        def microphone_on(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: MICROPHONE ON')
            rospy.loginfo('**********************************************')
            # no action needed, but send just in case...
            self.mic_user_enable_pub.publish(True)

        @self.device_handler.command('com.shinselrobots.commands.toggle_lights'
                                     )
        def toggle_lights(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: toggle lights')
            rospy.loginfo('**********************************************')

            # TODO Fix this temp Kludge, to use some global state (param server, or messaging?)
            text_to_say = ""
            light_mode = 0
            if self.arm_lights_on:
                rospy.loginfo('SPEECH:  TURN LIGHTS OFF (Toggle)')
                light_mode = 0
                text_to_say = "entering stealth mode"
                self.arm_lights_on = False
            else:
                rospy.loginfo('SPEECH:  TURN LIGHTS ON (Toggle)')
                light_mode = 1
                text_to_say = "Doesnt this look cool?"
                self.arm_lights_on = True

            rospy.loginfo('DEBUG2:   *********')
            self.pub_light_mode.publish(light_mode)

            rospy.loginfo('DEBUG3:   *********')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text(text_to_say)

            rospy.loginfo('DEBUG:  DONE WITH TOGGLE LIGHTS *********')

        @self.device_handler.command('com.shinselrobots.commands.sing_believer'
                                     )
        def sing_believer(param1):
            self.handle_behavior_command('RUN_SCRIPT', 'believer', '',
                                         'all right')

        @self.device_handler.command('com.shinselrobots.commands.bow')
        def bow(param1):
            self.handle_behavior_command('BOW', '', '', 'ok')

        @self.device_handler.command(
            'com.shinselrobots.commands.who_is_president')
        def who_is_president(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: Who is President')
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text(
                    "According to the internet, the president is Donald Trump. but, that might just be fake news"
                )

        @self.device_handler.command('com.shinselrobots.commands.wave')
        def wave(param1):
            self.handle_behavior_command('WAVE', '', '', '')

        @self.device_handler.command('com.shinselrobots.commands.head_center')
        def head_center(param1):
            self.handle_behavior_command('HEAD_CENTER', '', '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.tell_time')
        def tell_time(param1):
            self.handle_behavior_command('TELL_TIME', '', '', 'let me check')

        @self.device_handler.command('com.shinselrobots.commands.joke')
        def joke(param1):
            self.handle_behavior_command('TELL_JOKE', param1, '', 'ok')

        @self.device_handler.command('com.shinselrobots.commands.tell_age')
        def tell_age(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: TELL AGE')
            rospy.loginfo('    Param1 = %s', param1)
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text(
                    "I have been under construction for about a year.  My hardware is nearly complete, but my software is constantly evolving"
                )

        @self.device_handler.command('com.shinselrobots.commands.tell_function'
                                     )
        def tell_function(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: TELL FUNCTION')
            rospy.loginfo('    Param1 = %s', param1)
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text(
                    "i am currently focused on human interaction. but I hope to gain object manipulation capabilities soon.  because the ultimate goal of any robot is to fetch beer"
                )

        @self.device_handler.command('com.shinselrobots.commands.tell_size')
        def tell_size(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: TELL SIZE')
            rospy.loginfo('    Param1 = %s', param1)
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text(
                    "I am 4 foot 3 inches tall, and I weigh about 75 pounds")

        @self.device_handler.command('com.shinselrobots.commands.tell_sex')
        def tell_sex(param1):
            rospy.loginfo('**********************************************')
            rospy.loginfo('    Google Assistant Command: TELL SEX')
            rospy.loginfo('    Param1 = %s', param1)
            rospy.loginfo('**********************************************')
            if not use_google_assistant_voice:
                # assistant not acknowledging the command, so we do it
                self.local_voice_say_text("i am a boy robot")

        rospy.loginfo('Google Assistant *** Initialization complete ***')

        self.TTS_client = None
        if not use_google_assistant_voice:
            # check for service as late as possible to give service time to start up
            rospy.loginfo(self.logname +
                          "Initializing LOCAL Text to Speech...")
            client = actionlib.SimpleActionClient(
                "/speech_service", audio_and_speech_common.msg.speechAction)
            if (False == client.wait_for_server(rospy.Duration(10, 0))):
                rospy.logerr(
                    self.logname +
                    "WARNING!!! Text to Speech server is not available, skipping"
                )
            else:
                self.TTS_client = client

            rospy.loginfo(self.logname + "LOCAL Text to speech server ready")

        #=====================================================================================
        rospy.loginfo(self.logname + "Starting detector...")

        keyphrase_models = [keyphrase_1_path, keyphrase_2_path]
        detector = snowboydecoder.HotwordDetector(
            keyphrase_models,
            sensitivity=hotword_sensitivity,
            apply_frontend=False)

        rospy.loginfo(self.logname + "Listening for keyphrase...")
        # main loop - This funciton will block until ros shutdown

        keyword_detected_callbacks = [
            self.detectedCallback1, self.detectedCallback2
        ]
        detector.start(detected_callback=keyword_detected_callbacks,
                       audio_recorder_callback=self.audioRecorderCallback,
                       interrupt_check=interrupt_callback,
                       mic_pause=mic_pause_callback,
                       sleep_time=0.01,
                       silent_count_threshold=15,
                       recording_timeout=10)  # (blocks?  10 = about 4 seconds)
        # Tune recording_timeout for max expected command. Default of 100 is a LONG time!

        detector.terminate()
示例#23
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """

    # GPIO setup
    GPIO.setmode(GPIO.BCM)
    GPIO.setwarnings(False)
    GPIO.setup(sBUTTON, GPIO.IN)
    GPIO.setup(gBUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    GPIO.setup(gLED, GPIO.OUT, initial=GPIO.HIGH)

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    image_dir = '/home/pi/robot/image/'

    def camera():
        now = datetime.now()
        dir_name = now.strftime('%Y%m%d')
        dir_path = image_dir + dir_name + '/'
        file_name = now.strftime('%H%M%S') + '.jpg'
        fname = dir_path + file_name
        try:
            os.mkdir(dir_path)
        except OSError:
            logging.info('Date dir already exists')
        os.system('raspistill -o ' + fname)
        return fname

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
            #Klight added
            GPIO.output(gLED, GPIO.HIGH)
            time.sleep(1)
            GPIO.output(gLED, GPIO.LOW)
            #os.system('python /home/pi/robot/blinkt_color.py')

        else:
            logging.info('Turning device off')
            GPIO.output(gLED, GPIO.LOW)

    # Kblink
    @device_handler.command('com.acme.commands.blink_light')
    def blinker(number, lightKey):
        logging.info('Blinking device %s times.' % number)
        for i in range(int(number)):
            logging.info('Device is blinking %s/%s time.' % (i, number))
            time.sleep(0.5)
            GPIO.output(gLED, GPIO.HIGH)
            time.sleep(0.5)
            GPIO.output(gLED, GPIO.LOW)

    # Kcamera
    @device_handler.command('com.acme.commands.pi_camera')
    def picamera(number, cameraKey):
        logging.info('Taking a %s %s times.' % (cameraKey, number))
        GPIO.output(gLED, GPIO.HIGH)
        if cameraKey:  # in ('picture', 'camera', 'photo'):
            fname = camera()
            result = os.system(
                'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face '
                + fname)  #robot/vision.py "" '+fname)
            logging.info('Image:' + fname)
            GPIO.output(gLED, GPIO.LOW)

    @device_handler.command('com.acme.commands.pi_jp')
    def pijp(number, cameraKey):
        logging.info(cameraKey)
        GPIO.output(gLED, GPIO.HIGH)
        if cameraKey:  # in ('picture', 'camera', 'photo'):
            fname = camera()
            result = os.system(
                'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face '
                + fname)  #robot/vision.py "" '+fname)
            logging.info('Image:' + fname)
            GPIO.output(gLED, GPIO.LOW)

    @device_handler.command('com.acme.commands.pi_motor')
    def pimotor(number, directionKey):
        logging.info(directionKey)
        GPIO.output(gLED, GPIO.HIGH)
        result = os.system('python3 /home/pi/robot/motor.py')
        GPIO.output(gLED, GPIO.LOW)
        """if color.get('name') == "blue": #shoot:
          logging.info('Camera shoot!')
          GPIO.output(gLED, GPIO.HIGH)
      else:
          logging.info('Something else happened.')
          GPIO.output(gLED, GPIO.LOW)"""

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                # GPIO button added
                state = GPIO.input(gBUTTON)
                logging.info("Push button to Google talk!")
                GPIO.output(gLED, GPIO.HIGH)
                time.sleep(0.2)
                GPIO.output(gLED, GPIO.LOW)
                if state:
                    pass  #continue
                else:
                    continue  #pass

                #click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#24
0
class Assistant():
    def __init__(self):
        self.api_endpoint = ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
        # Setup logging.
        logging.basicConfig(
        )  # filename='assistant.log', level=logging.DEBUG if self.verbose else logging.INFO)
        self.logger = logging.getLogger("assistant")
        self.logger.setLevel(logging.DEBUG)
        self.custom_command = False
        self.once = True
        # Load OAuth 2.0 credentials.
        try:
            with open(self.credentials, 'r') as f:
                self.credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                self.http_request = google.auth.transport.requests.Request()
                self.credentials.refresh(self.http_request)
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error('Run google-oauthlib-tool to initialize '
                          'new OAuth 2.0 credentials.')
            return

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)
        logging.info('Connecting to %s', self.api_endpoint)
        self.audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = DEFAULT_GRPC_DEADLINE
        self.device_id = "roghecv2assistant-roghecv2-59lv9s"
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        self.device_handler = device_helpers.DeviceRequestHandler(
            "roghecv2assistant-roghecv2-59lv9s")

        # Stores the current volument percentage.
        # Note: No volume change is currently implemented in this sample
        self.volume_percentage = 50
        self.display = True

    def assist(self, canvas):
        device_actions_futures = []

        # Configure audio source and sink.
        self.audio_device = None
        self.audio_source = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        self.audio_sink = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=self.audio_source,
            sink=self.audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width)
        restart = False
        continue_dialog = True
        try:
            while continue_dialog:
                continue_dialog = False
                self.conversation_stream.start_recording()
                self.logger.info('Recording audio request.')

                def iter_log_assist_requests():
                    for c in self.gen_assist_requests():
                        assistant_helpers.log_assist_request_without_audio(c)
                        yield c
                    logging.debug('Reached end of AssistRequest iteration.')

                # This generator yields AssistResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Assist(iter_log_assist_requests(),
                                                  self.grpc_deadline):
                    assistant_helpers.log_assist_response_without_audio(resp)
                    if resp.event_type == END_OF_UTTERANCE:
                        logging.info('End of audio request detected.')
                        logging.info('Stopping recording.')
                        self.conversation_stream.stop_recording()
                    if resp.speech_results:
                        mess = ' '.join(r.transcript
                                        for r in resp.speech_results)
                        logging.info('Transcript of user request: "%s".', mess)
                        canvas[1]['text'] = mess
                        if self.once:
                            self.custom_command = google_control.custom_command_handler(
                                mess, canvas)
                    if len(resp.audio_out.audio_data
                           ) > 0 and not self.custom_command:
                        if not self.conversation_stream.playing:
                            self.conversation_stream.stop_recording()
                            self.conversation_stream.start_playback()
                            logging.info('Playing assistant response.')
                        self.conversation_stream.write(
                            resp.audio_out.audio_data)
                    if resp.dialog_state_out.conversation_state:
                        conversation_state = resp.dialog_state_out.conversation_state
                        logging.debug('Updating conversation state.')
                        self.conversation_state = conversation_state
                    if resp.dialog_state_out.volume_percentage != 0:
                        volume_percentage = resp.dialog_state_out.volume_percentage
                        logging.info('Setting volume to %s%%',
                                     volume_percentage)
                        self.conversation_stream.volume_percentage = volume_percentage
                    if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                        continue_conversation = True
                        logging.info('Expecting follow-on query from user.')
                    elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                        continue_conversation = False
                    if resp.device_action.device_request_json:
                        device_request = json.loads(
                            resp.device_action.device_request_json)
                        fs = self.device_handler(device_request)
                        if fs:
                            device_actions_futures.extend(fs)
                    if self.display and resp.screen_out.data and not self.custom_command:
                        system_browser = browser_helpers.system_browser
                        system_browser.display(resp.screen_out.data)
                        self.scrapper(canvas)

                self.logger.info('Finished playing assistant response.')
                self.conversation_stream.stop_playback()
        except Exception as e:
            self._create_assistant()
            self.logger.exception('Skipping because of connection reset')
            restart = True
        try:
            self.conversation_stream.close()
            if restart:
                self.assist()
        except Exception:
            self.logger.error('Failed to close conversation_stream.')
        self.once = True

    device_handler = device_helpers.DeviceRequestHandler(
        "roghecv2assistant-roghecv2-59lv9s")

    def scrapper(self, canvas):
        tree = html.parse(
            "/home/ubberboy/Documents/RoghecV2/snowboy/google-assistant-sdk-screen-out.html"
        )
        result = tree.xpath(
            '/html/body/div/div[2]/div[3]/div[2]/div/div/text()')
        final_text = '\n'.join(result)
        print(final_text)
        canvas[3]["text"] = ''.join(c if c <= '\uffff' else ''.join(
            chr(x) for x in struct.unpack('>2H', c.encode('utf-16be')))
                                    for c in final_text)

    def _create_assistant(self):
        # Create gRPC channel
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)

        self.logger.info('Connecting to %s', self.api_endpoint)
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            grpc_channel)

    def is_grpc_error_unavailable(e):
        is_grpc_error = isinstance(e, grpc.RpcError)
        if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
            logging.error('grpc unavailable error: %s', e)
            return True
        return False

    @retry(reraise=True,
           stop=stop_after_attempt(3),
           retry=retry_if_exception(is_grpc_error_unavailable))
    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    # This generator yields ConverseRequest to send to the gRPC
    # Google Assistant API.
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_id,
            ))
        if self.display:
            config.screen_out_config.screen_mode = PLAYING
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
示例#25
0
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
        credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'),
        device_config=os.path.join(click.get_app_dir('googlesamples-assistant'),'device_config.json'),
        device_id=None,
        project_id=None,
        device_model_id=None,
        input_audio_file=None,
        output_audio_file=None,
        audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
        audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
        audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
        audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
        audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
        lang='en-US', display=False,
        verbose=False,
        once=False,
        grpc_deadline=DEFAULT_GRPC_DEADLINE, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    connectMQTT()

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # keep recording voice requests using the microphone    
        # and playing back assistant response using the speaker.    
        # This will loop as long as assist() returns true   
        # meaning that a follow on query for the user is    
        # expected. If the once flag is set only one request    
        # is performed no matter what assist() returns.
        # assist() can be thought of as a state continue_conversation
        while assistant.assist():   
            if once:    
                break
示例#26
0
def main(api_endpoint, credentials, device_model_id, device_id, lang, display,
         verbose, grpc_deadline, *args, **kwargs):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('com.example.commands.SearchCar')
    def search_cars(filter, param):
        print('SEARCH CARS\n' + 'FILTER: ' + filter + ', PARAM: ' + param)

        try:
            response = requests.get(
                'http://*****:*****@assistant> %s' % response_text)
示例#27
0
def main(project_id=None,
         device_model_id=None,
         device_id=None,
         api_endpoint=ASSISTANT_API_ENDPOINT,
         credentials=os.path.join(click.get_app_dir("google-oauthlib-tool"),
                                  "credentials.json"),
         device_config=os.path.join(
             click.get_app_dir("googlesamples-assistant"),
             "device_config.json"),
         lang="en-US",
         display=False,
         verbose=False,
         audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
         audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
         audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
         audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
         audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
         grpc_deadline=DEFAULT_GRPC_DEADLINE,
         once=False,
         view=None,
         *args,
         **kwargs):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, "r") as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error("Error loading credentials: %s", e)
        logging.error(
            "Run google-oauthlib-tool to initialize new OAuth 2.0 credentials."
        )
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info("Connecting to %s", api_endpoint)

    audio_device = None
    # Configure audio source and sink.
    audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream(
        sample_rate=audio_sample_rate,
        sample_width=audio_sample_width,
        block_size=audio_block_size,
        flush_size=audio_flush_size,
    )
    audio_sink = audio_device = audio_device or audio_helpers.SoundDeviceStream(
        sample_rate=audio_sample_rate,
        sample_width=audio_sample_width,
        block_size=audio_block_size,
        flush_size=audio_flush_size,
    )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )
    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device["id"]
                device_model_id = device["model_id"]
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning("Device config not found: %s" % e)
            logging.info("Registering device")
            if not device_model_id:
                logging.error("Option --device-model-id required "
                              "when registering a device instance.")
                sys.exit(-1)
            if not project_id:
                logging.error(
                    "Option --project-id required when registering a device instance."
                )
                sys.exit(-1)
            device_base_url = "https://%s/v1alpha2/projects/%s/devices" % (
                api_endpoint,
                project_id,
            )
            device_id = str(uuid.uuid1())
            payload = {
                "id": device_id,
                "model_id": device_model_id,
                "client_type": "SDK_SERVICE",
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error("Failed to register device: %s", r.text)
                sys.exit(-1)
            logging.info("Device registered: %s", device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, "w") as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command("com.example.commands.SwitchView")
    def switch(name):
        view.show_frame(name)

    with SampleAssistant(
            lang,
            device_model_id,
            device_id,
            conversation_stream,
            display,
            grpc_channel,
            grpc_deadline,
            device_handler,
            view,
    ) as assistant:
        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info="Press Enter to send a new request...")
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#28
0
    record()
    response = detect_intent_audio(project_id, session_id, input_audio_path,
                                   lang)
    handle_response(response)


if __name__ == "__main__":

    logging.basicConfig(level=logging.DEBUG)
    arduinoSerialData = serial.Serial(find_ports()[0], 9600)

    #Setup for the Google Assistant API
    with open(credentials,
              'r') as f:  #Grab those Credentials (must be in this directory)
        credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))
        http_request = google.auth.transport.requests.Request()
        credentials.refresh(http_request)
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(  #Request a grpc channel with those Credentials
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)
    device_handler = device_helpers.DeviceRequestHandler(device_id)

    audio = pyaudio.PyAudio()
    play_audio_file("startup.wav")
    signal.signal(signal.SIGINT, signal_handler)
    detector = snowboydecoder.HotwordDetector(model,
                                              sensitivity=mic_sensitivity)
    send('1.0.0')
    listen()
示例#29
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config, lang, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            print("[joe debug]")
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#30
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, "r") as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error("Error loading credentials: %s", e)
        logging.error("Run google-oauthlib-tool to initialize "
                      "new OAuth 2.0 credentials.")
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info("Connecting to %s", api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, "rb"),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
        )
    else:
        audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size,
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, "wb"),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
        )
    else:
        audio_sink = audio_device = audio_device or audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size,
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device["id"]
                device_model_id = device["model_id"]
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning("Device config not found: %s" % e)
            logging.info("Registering device")
            if not device_model_id:
                logging.error("Option --device-model-id required "
                              "when registering a device instance.")
                sys.exit(-1)
            if not project_id:
                logging.error("Option --project-id required "
                              "when registering a device instance.")
                sys.exit(-1)
            device_base_url = "https://%s/v1alpha2/projects/%s/devices" % (
                api_endpoint,
                project_id,
            )
            device_id = str(uuid.uuid1())
            payload = {
                "id": device_id,
                "model_id": device_model_id,
                "client_type": "SDK_SERVICE",
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error("Failed to register device: %s", r.text)
                sys.exit(-1)
            logging.info("Device registered: %s", device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, "w") as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command("action.devices.commands.OnOff")
    def onoff(on):
        if on:
            logging.info("Turning device on")
        else:
            logging.info("Turning device off")

    @device_handler.command("com.example.commands.BlinkLight")
    def blink(speed, number):
        logging.info("Blinking device %s times." % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info("Device is blinking.")
            time.sleep(delay)

    with SampleAssistant(
            lang,
            device_model_id,
            device_id,
            conversation_stream,
            display,
            grpc_channel,
            grpc_deadline,
            device_handler,
    ) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                print("Press button to initiate a new request")
                dots.fill(0x00000F)  # lite blue LEDs
                dots.show()
                while button.value:
                    time.sleep(0.1)
            # red LEDs
            dots.fill(0xFF0000)
            dots.show()
            continue_conversation = assistant.assist()
            # LEDs off
            dots.fill(0x000000)
            dots.show()

            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break