def _get_audio_source(self): """Returns the system audio souruce""" return audio_helpers.SoundDeviceStream( sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE)
def initialize(self): self.ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com' self.END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE self.DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON self.CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE api_endpoint=self.ASSISTANT_API_ENDPOINT credentials=os.path.join(click.get_app_dir(common_settings.ASSISTANT_APP_NAME), common_settings.ASSISTANT_CREDENTIALS_FILENAME) verbose=False self.audio_sample_rate=common_settings.DEFAULT_AUDIO_SAMPLE_RATE self.audio_sample_width=common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH self.audio_iter_size=common_settings.DEFAULT_AUDIO_ITER_SIZE self.audio_block_size=common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE self.audio_flush_size=common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE self.grpc_deadline=common_settings.DEFAULT_GRPC_DEADLINE # Load credentials. try: creds = auth_helpers.load_credentials(credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE]) except Exception as e: self.error('Error loading credentials: %s', e) self.error('Run auth_helpers to initialize new OAuth2 credentials.') return # Create gRPC channel grpc_channel = auth_helpers.create_grpc_channel(api_endpoint, creds, ssl_credentials_file="", grpc_channel_options="") self.log('Connecting to google') # Create Google Assistant API gRPC client. self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel) # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = (self.audio_device or audio_helpers.SoundDeviceStream(sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = (self.audio_device or audio_helpers.SoundDeviceStream(sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream(source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size) self.conversation_state_bytes = None self.volume_percentage = 70 self.listen_state(self.startGH,self.args["activation_boolean"],new="on") self.log("App started. now listening to Homeassistant input")
def main(device_id, verbose): logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) with open("credentials.json", "r") as f: credentials = google.oauth2.credentials.Credentials(**json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT ) logging.info("Connecting to %s", ASSISTANT_API_ENDPOINT) audio_sink = audio_source = audio_helpers.SoundDeviceStream( sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, ) trigger = Button(INPUT_PIN, pull_up=False) with Assistant(device_id, conversation_stream, grpc_channel) as assistant: print('Waiting for trigger...') trigger.wait_for_press() print('Triggered!') while trigger.is_pressed: print('Running assist') assistant.assist() print('Done with assist')
def configure_conversation_stream(input_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size): if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size) audio_sink = audio_helpers.WaveSink(open(LOCAL_AUDIO_FILE, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) return audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, )
def main(api_endpoint, credentials, project_id, device_model_id, device_config, lang, display, verbose, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None input_audio_file = False output_audio_file = False audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession(credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: print('Turning device on') else: print('Turning device off') @device_handler.command('Extend') def extend(number): global ser ser.write(b'f') print(" Extending ... ") @device_handler.command('Flex') def extend(number): global ser ser.write(b'b') print(" Flexing ... ") @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: def signal_handler(signal, frame): detector.terminate() def interrupt_callback(): global continue_conversation if continue_conversation: continue_conversation = assistant.assist() print('listening..') def det_call(): print('yes master') global continue_conversation continue_conversation = assistant.assist() print('done================') return 0 signal.signal(signal.SIGINT, signal_handler) detector = snowboydecoder.HotwordDetector('saaedy.pmdl', sensitivity=0.6) detector.start(detected_callback=det_call, interrupt_check=interrupt_callback, sleep_time=0.1)
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), project_id=None, device_model_id=None, device_id=None, device_config=os.path.join( click.get_app_dir('googlesamples_assistant'), 'device_config.json'), lang="en_GB", display=True, verbose=False, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, grpc_deadline=DEFAULT_GRPC_DEADLINE, once=False, *args, **kwargs): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) #|=============================================| #| | #| Handle commands for Google Assistant Stuff | #| | #|=============================================| device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.SetVolume') def changeVolume(volumeLevel, isPercentage): if (isPercentage): os.system( 'pactl set-sink-volume "alsa_output.usb-Generic_USB2.0_Device_20130100ph0-00.analog-stereo" ' + str(volumeLevel) + '%') with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: input("PRESS ENTER TO SPEAK") continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('com.example.commands.SearchCar') def search_cars(filter, param): print('SEARCH CARS\n' + 'FILTER: ' + filter + ', PARAM: ' + param) try: response = requests.get( 'http://localhost:5000/api/cars?{}={}'.format(filter, param)) data = json.loads(response.text) cars = data['cars'] except: print("Problem communicating with server") cars = [] print('%-2s | %-10s | %-10s | %-8s | %s | %s | %s' % ("ID", "Make", "Body Type", "Colour", "No. Seats", "Cost/Hour", "Location")) print( '---+------------+------------+----------+-----------+-----------+----------------------' ) for car in cars: print('%-2d | %-10s | %-10s | %-8s | %-9d | $%-8d | %s' % (car['id'], car['make'], car['body_type'], car['colour'], car['no_seats'], car['cost_per_hour'], car['location'])) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) # text to speech : text를 음성으로 바꾸어서 스피커에 출력 def tts(text, lang='ko'): if lang == None: speech = gTTS(text=text) else: speech = gTTS(text=text, lang=lang) speech.save('tmp.mp3') os.system("omxplayer tmp.mp3") os.remove('tmp.mp3') # speech to text : speech를 text를 바꾸어서 text 변수에 저장 def stt(commands=None, is_respon=False): # voice recognition/respone.******* continue_conversation, stt_tmp = assistant.assist(commands=commands, is_respon=is_respon) wait_for_user_trigger = not continue_conversation #if once and (not continue_conversation): # break text = stt_tmp return text with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: if input_audio_file or output_audio_file: assistant.assist() return wait_for_user_trigger = not once # 내가 말할 command들 select = ['컨트롤', '교육'] control = ['초음파', '추적', '불빛', '명령', '꺼'] yn = ['네', '아니'] move = ['앞으로', '뒤로', '오른쪽', '왼쪽'] first = True more = False # dc,servo,ultra sonic,led module = m.mode() while True: if first == True: tts("컨트롤모드와 교육모드 중에 선택해주세요 ,.,.,.") first = False text = stt(select, is_respon=True) print("[INFO] answer : ", text) if select[0] in text: print('동작 모드 ') tts('동작모드 입니다.... ') while True: if more == True: tts("동작모드를 더 하실껀가요 ") text = stt(is_respon=False) if yn[0] in text: more = False tts('다시 시작할게요 ') if yn[1] in text: more = False first = True break text = stt(is_respon=False) print("[INFO] answer : ", text) if control[0] in text: print("초음파 모드") tts('초음파 모드 입니다 ') sel = random.randrange(2) if sel == 0: print('1') module.avoid() else: print('2') module.avoid2() tts("초음파 모드가 끝났어요 ") more = True elif control[1] in text: print('추적 모드') tts('추적 모드 입니다 ') module.tracking() tts("추적 모드가 끝났어요 ") more = True elif control[2] in text: print('불빛 모드') tts('블빛 모드 입니다 ') module.servo_led() tts("붗빛 모드가 끝났어요 ") more = True elif control[3] in text: print('명령모드') tts('명령 모드 입니다 ') try: start = time.time() while True: if time.time() - start > 60: break text = stt(commands=move, is_respon=False) print(text) if move[0] in text: #if module.distance() > 80: module.go(100, 100) sleep(2) module.stop() elif move[1] in text: module.back(100, 100) sleep(2) module.stop() elif move[2] in text: module.spin_right(100, 100) sleep(3) module.stop() elif move[3] in text: module.spin_left(100, 100) sleep(3) module.stop() except KeyboardInterrupt: module.stop() module.stop() tts("명령모드가 끝났어요 ") more = True elif control[4] in text: tts("끝낼게요 ") first = True break elif select[1] in text: #DB Setting host = '192.168.0.8' user = '******' dbname = 'Education' password = '******' ser = 1 name = 'LEE' e = Education(host, user, dbname, password, ser, name) conn, cur = e.connection() e.dbclean(conn, cur) tts("교육모드 입니다. ") count = 0 stage = 0 while True: distance = module.distance() if distance < 50: count += 1 if count > 100: count = 0 tts("반가워요 제가 동화를 들려드릴게요 ") sleep(1) os.system( "omxplayer ~/workspace/Raspi_google_robot/stt/ka_01.mp3" ) tts("동화가 끝났어요 재미있으셨나요 ") sleep(1) while True: text = stt(is_respon=False) e.isfun(conn, cur, text) if yn[0] in text: tts("고마워요 다음에 또 들려줄게요 ") break elif yn[1] in text: tts("나중에는 더 재미있는 이야기를 들려줄게요 ") break e.search_isfun(conn, cur) tts("우리 같이 숫자 공부해요 ") sleep(1) tts("일 더하기 이는 무엇일까요 ") life = 5 math_pt = 100 while True: text = stt(is_respon=False) print(text) if '3' in text: tts("정답이에요 축하해요 ") break elif not text: pass else: tts("틀렸어요 다시한번 말해주세요 ") life -= 1 math_pt -= 5 if life == 0: tts("코인을 다썼어요 아쉽네요 다음 기회에 또 봐요 ") break e.math(conn, cur, math_pt) e.search_math(conn, cur) time.sleep(1) tts("우리 같이 발음을 맞춰봐요 ") life = 5 english_pt = 100 quiz = 'orange' tts(quiz, lang=None) while True: text = stt(is_respon=False) print(text) if '오렌지' in text: tts("정답 이에요") break elif not text: pass else: tts("틀렸어요 다시한번 말해주세요 ") life -= 1 english_pt -= 5 if life == 0: tts("아쉽네요 다음 기회에 또 봐요 ") break e.english(conn, cur, english_pt) e.search_english(conn, cur) time.sleep(1) tts("교육모드가 끝났습니다. ") e.dbclose(conn, cur) first = True break else: first = True m.clean()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. credentials, http_request = create_credentials_and_http_request( credentials) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: device_id, device_model_id = configure_device_id_and_model_id( device_config) device_handler = device_helpers.DeviceRequestHandler(device_id) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: if input_audio_file or output_audio_file: assistant.assist() return while True: signal.signal(signal.SIGUSR2, sigusr2_handler) global SHOULD_ASSIST if not SHOULD_ASSIST: time.sleep(0.5) continue # keep looping # temp disabling sighandler signal.signal(signal.SIGUSR2, sigusr2_handler_disabled) continue_conversation = assistant.assist() if not continue_conversation: SHOULD_ASSIST = False
def main(project_id=None, device_model_id=None, device_id=None, api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir("google-oauthlib-tool"), "credentials.json"), device_config=os.path.join( click.get_app_dir("googlesamples-assistant"), "device_config.json"), lang="en-US", display=False, verbose=False, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, grpc_deadline=DEFAULT_GRPC_DEADLINE, once=False, view=None, *args, **kwargs): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, "r") as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error("Error loading credentials: %s", e) logging.error( "Run google-oauthlib-tool to initialize new OAuth 2.0 credentials." ) sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info("Connecting to %s", api_endpoint) audio_device = None # Configure audio source and sink. audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size, ) audio_sink = audio_device = audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size, ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device["id"] device_model_id = device["model_id"] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning("Device config not found: %s" % e) logging.info("Registering device") if not device_model_id: logging.error("Option --device-model-id required " "when registering a device instance.") sys.exit(-1) if not project_id: logging.error( "Option --project-id required when registering a device instance." ) sys.exit(-1) device_base_url = "https://%s/v1alpha2/projects/%s/devices" % ( api_endpoint, project_id, ) device_id = str(uuid.uuid1()) payload = { "id": device_id, "model_id": device_model_id, "client_type": "SDK_SERVICE", } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error("Failed to register device: %s", r.text) sys.exit(-1) logging.info("Device registered: %s", device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, "w") as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command("com.example.commands.SwitchView") def switch(name): view.show_frame(name) with SampleAssistant( lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler, view, ) as assistant: # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info="Press Enter to send a new request...") continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') return # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. # audio_device = None # if input_audio_file: # audio_source = audio_helpers.WaveSource( # open(input_audio_file, 'rb'), # sample_rate=audio_sample_rate, # sample_width=audio_sample_width # ) # else: # audio_source = audio_device = ( # audio_device or audio_helpers.SoundDeviceStream( # sample_rate=audio_sample_rate, # sample_width=audio_sample_width, # block_size=audio_block_size, # flush_size=audio_flush_size # ) # ) # if output_audio_file: # audio_sink = audio_helpers.WaveSink( # open(output_audio_file, 'wb'), # sample_rate=audio_sample_rate, # sample_width=audio_sample_width # ) # else: # audio_sink = audio_device = ( # audio_device or audio_helpers.SoundDeviceStream( # sample_rate=audio_sample_rate, # sample_width=audio_sample_width, # block_size=audio_block_size, # flush_size=audio_flush_size # ) # ) # Our Audio setup # audio_device, audio_dump = None # audio_source, audio_source_file = None # audo_sink, audio_sink_file = None audio_device = None if not input_audio_file: print("\nI am here to inform you that... \nYou done f****d up\n") exit() audio_source_file = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink_file = audio_helpers.WaveSink( open(output_audio_file, "wb"), sample_rate=audio_sample_rate, sample_width=audio_sample_width) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream_file = audio_helpers.ConversationStream( source=audio_source_file, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width) conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) global auditer, audsw, audsr, audbs, audfs, iaud auditer = audio_iter_size audsw = audio_sample_width audsr = audio_sample_rate audbs = audio_block_size audfs = audio_flush_size iaud = input_audio_file with SampleAssistant(conversation_stream, grpc_channel, grpc_deadline, conversation_stream_file) as assistant: assistant.csf = conversation_stream_file continue_conversation = assistant.converse()
def main(api_endpoint, credentials, device_model_id, device_id, lang, display, verbose, grpc_deadline,audio_sample_rate, audio_block_size, audio_iter_size, audio_sample_width, audio_flush_size, audio_output_file, *args, **kwargs): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') return # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) audio_device = None audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if audio_output_file: audio_sink = audio_helpers.WaveSink( open(audio_output_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width ) with SampleTextAssistant(lang, device_model_id, device_id,conversation_stream, display, grpc_channel, grpc_deadline) as assistant: while True: query = click.prompt('') click.echo('<you> %s' % query) response_text, response_html = assistant.assist(text_query=query) if display and response_html: system_browser = browser_helpers.system_browser system_browser.display(response_html) if response_text: click.echo('<@assistant> %s' % response_text)
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), device_config=os.path.join(click.get_app_dir('googlesamples-assistant'),'device_config.json'), device_id=None, project_id=None, device_model_id=None, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, lang='en-US', display=False, verbose=False, once=False, grpc_deadline=DEFAULT_GRPC_DEADLINE, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ connectMQTT() # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # This will loop as long as assist() returns true # meaning that a follow on query for the user is # expected. If the once flag is set only one request # is performed no matter what assist() returns. # assist() can be thought of as a state continue_conversation while assistant.assist(): if once: break
def converse(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ print("\nTOP OF CONVERSE FUNCTION\n") continue_conversation = None while True: if not continue_conversation: afile = audio_helpers.WaveSource(open(iaud, 'rb'), sample_rate=audsw, sample_width=audsw) asink = (audio_helpers.SoundDeviceStream(sample_rate=audsr, sample_width=audsw, block_size=audbs, flush_size=audfs)) convstream = audio_helpers.ConversationStream( source=afile, sink=asink, iter_size=auditer, sample_width=audsw) self.conversation_stream = self.csf fdetect() self.conversation_stream.start_recording() logging.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: logging.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected') self.conversation_stream.stop_recording() self.conversation_stream = self.conversation_stream_mic if resp.result.spoken_request_text: logging.info('Transcript of user request: "%s".', resp.result.spoken_request_text) logging.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write(resp.audio_out.audio_data) if resp.result.spoken_response_text: logging.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state = resp.result.conversation_state if resp.result.volume_percentage != 0: self.conversation_stream.volume_percentage = ( resp.result.volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True logging.info('Expecting follow-on query from user.') elif resp.result.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False logging.info('Finished playing assistant response.') self.conversation_stream.stop_playback()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ ############################################################################3 global updt_time, query, resp_text, mute, startmouth, TezHead, beep, faceFound, name2, onceface, facerec_en, keyboard_on # Setup logging. Kpx = 1 Kpy = 1 Ksp = 40 ## Head X and Y angle limits time.sleep(5) Xmax = 725 Xmin = 290 Ymax = 550 Ymin = 420 keyboard_on = False ## Initial Head position Xcoor = 511 Ycoor = 450 Facedet = 0 ## Time head wait turned touch_wait = 2 no_face_tm = time.time() face_det_tm = time.time() last_face_det_tm = time.time() touch_tm = 0 touch_samp = time.time() qbo_touch = 0 touch_det = False face_not_found_idx = 0 mutex_wait_touch = False faceFound = False onceface = False dist = 100 audio_response1 = '/home/pi/Reebo_Python/up.wav' wavep = wave.open(audio_response1, 'rb') audio_response2 = '/home/pi/Reebo_Python/HiTej.wav' wavep2 = wave.open(audio_response2, 'rb') facerec_en = False ############################################################################3 logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alphapi@raspber2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "slowly": delay = 2 elif speed == "quickly": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) #~ def findquery(): ############################# FACEREC THREAD ##################################################33 def facerec(): global name2 f = open("/home/pi/Reebo_Python/face_features.pkl", 'rb') details = pickle.load(f) # Initialize some variables face_locations = [] face_encodings = [] face_names = [] name2 = [] unknown_picture = fr.load_image_file("/home/pi/Reebo_Python/test.jpg") # Grab a single frame of video # frame = unknown_picture # Resize frame of video to 1/4 size for faster face recognition processing # small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) # rgb_small_frame = small_frame[:, :, ::-1] # Find all the faces and face encodings in the current frame of video face_locations = fr.face_locations(unknown_picture) face_encodings = fr.face_encodings(unknown_picture, face_locations) print("{0} persons identified".format(len(face_locations))) face_names = [] for face_encoding in face_encodings: matches = fr.compare_faces(details['encodings'], face_encoding, 0.45) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = details["name"][first_match_index] face_names.append(name) print(face_names) for i in range(0, len(face_names)): name_temp = str(face_names[i]).replace('photos/', "") name_temp = str(name_temp).replace(']\'', "") name2.append(str(name_temp)) print name2 n = open("/home/pi/Reebo_Python/names.txt", 'w') for i in face_names: n.write(i + "\n") n.close() for (top, right, bottom, left), name in zip(face_locations, face_names): if not name: continue if name == "warner": cv2.rectangle(unknown_picture, (left, top), (right, bottom), (255, 0, 0), 2) cv2.rectangle(unknown_picture, (left, bottom - 25), (right, bottom), (255, 0, 0), 1) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(unknown_picture, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1) else: cv2.rectangle(unknown_picture, (left, top), (right, bottom), (0, 0, 255), 2) cv2.rectangle(unknown_picture, (left, bottom - 25), (right, bottom), (0, 0, 255), 1) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(unknown_picture, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1) cv2.imwrite("/home/pi/Reebo_Python/result.png", unknown_picture) def findFace(): global name2, faceFound, onceface, facerec_en, updt_time found_tm = time.time() onceface = False touch_samp = time.time() Xmax = 725 Xmin = 290 Ymax = 550 Ymin = 420 qbo_touch = 0 while True: #print("find face " + str(time.time())) try: faceFound = False # while not faceFound : # This variable is set to true if, on THIS loop a face has already been found # We search for a face three diffrent ways, and if we have found one already- # there is no reason to keep looking. #thread.start_new_thread(WaitForSpeech, ()) # WaitForSpeech() # ServoHome() Cface = [0, 0] t_ini = time.time() while time.time() - t_ini < 0.01: # wait for present frame t_ini = time.time() aframe = webcam.read()[ 1] #print "t: " + str(time.time()-t_ini) fface = frontalface.detectMultiScale( aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (60, 60)) pfacer = profileface.detectMultiScale( aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING + cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT + cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (80, 80)) if fface != (): # if we found a frontal face... for f in fface: # f in fface is an array with a rectangle representing a face faceFound = True face = f elif pfacer != (): # if we found a profile face... for f in pfacer: faceFound = True face = f if faceFound: updt_time = time.time() #facerec() if onceface == False: cv2.imwrite("/home/pi/Reebo_Python/test.jpg", aframe) onceface = True found_tm = time.time() x, y, w, h = face Cface = [ (w / 2 + x), (h / 2 + y) ] # we are given an x,y corner point and a width and height, we need the center TezHead.SetNoseColor(4) #print "face ccord: " + str(Cface[0]) + "," + str(Cface[1]) faceOffset_X = 160 - Cface[0] if (faceOffset_X > 20) | (faceOffset_X < -20): time.sleep(0.002) # acquire mutex TezHead.SetAngleRelative(1, faceOffset_X >> 1) # release mutex #wait for move time.sleep(0.05) #print "MOVE REL X: " + str(faceOffset_X >> 1) faceOffset_Y = Cface[1] - 120 if (faceOffset_Y > 20) | (faceOffset_Y < -20): time.sleep(0.002) # acquire mutex TezHead.SetAngleRelative(2, faceOffset_Y >> 1) # release mutex #wait for move time.sleep(0.05) if time.time() - found_tm > 0.5: TezHead.SetNoseColor(0) except Exception as e: print e pass try: current_touched = cap.touched() #last_touched = cap.touched() cap.set_thresholds(10, 6) # Check each pin's last and current state to see if it was pressed or released. i = 0 for i in [1, 11]: pin_bit = 1 << i # Each pin is represented by a bit in the touched value. A value of 1 # First check if transitioned from not touched to touched. if current_touched & pin_bit: #and not last_touched & pin_bit: print('{0} touched!'.format(i)) qbo_touch = int(i) ## # Next check if transitioned from touched to not touched. ## if not current_touched & pin_bit and last_touched & pin_bit: ## print('{0} released!'.format(i)) ## # Update last state and wait a short period before repeating. ## last_touched = current_touched #time.sleep(0.1) except: #print sys.exc_info() #print "error" pass if (time.time() - touch_samp > 0.5): # & (time.time() - last_face_det_tm > 3): touch_samp = time.time() #~ time.sleep(0.002) if qbo_touch in [1, 11]: if qbo_touch == 1: print("right") TezHead.SetServo(1, Xmax - 50, 100) time.sleep(0.002) TezHead.SetServo(2, Ymin - 5, 100) #thread.start_new_thread(WaitTouchMove, ()) # wait for begin touch move. time.sleep(1) qbo_touch = 0 elif qbo_touch == [2]: #~ time.sleep(0.002) TezHead.SetServo(2, Ymin - 5, 100) thread.start_new_thread(WaitTouchMove, ()) # wait for begin touch move. time.sleep(1) qbo_touch = 0 elif qbo_touch == 11: print("left") TezHead.SetServo(1, Xmin + 50, 100) time.sleep(0.002) TezHead.SetServo(2, Ymin - 5, 100) #thread.start_new_thread(WaitTouchMove, ()) # wait for begin touch move. time.sleep(1) qbo_touch = 0 def distance(): # set Trigger to HIGH GPIO.output(GPIO_TRIGGER, True) # set Trigger after 0.01ms to LOW time.sleep(0.00001) GPIO.output(GPIO_TRIGGER, False) StartTime = time.time() StopTime = time.time() # save StartTime while GPIO.input(GPIO_ECHO) == 0: StartTime = time.time() # save time of arrival while GPIO.input(GPIO_ECHO) == 1: StopTime = time.time() # time difference between start and arrival TimeElapsed = StopTime - StartTime # multiply with the sonic speed (34300 cm/s) # and divide by 2, because there and back distance = (TimeElapsed * 34300) / 2 return distance ################################## SOCKET THREAD ###################################################### def socket_thread(conn): print 'Socket.IO Thread Started.' def empid_received(): socket.emit('event-ask-cardno') print "ASK CARD NO" def cardno_received(): print "Card No received" conn.send(False) socket.on('event-empid-received', empid_received) socket.on('event-cardno-received', cardno_received) socket.wait() def findquery(parent_conn): global resp_text, mute, query, beep keyboard_on = False if resp_text == "Sorry, I can't help.": query = "Talk to Reebo" mute = True elif resp_text == "Alright! Say Cheese!": print "camera" aframe = webcam.read()[1] cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe) socket.emit('event-take-selfie') #mute=False elif resp_text.startswith("Can you please smile for the camera?"): mute = False beep = False print "BEEP" time.sleep(5) aframe = webcam.read()[1] cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe) socket.emit('event-take-selfie') query = "Say@#$: Thank you. Please enter your employee ID and card number" assistant.assist() socket.emit('event-ask-empid') keyboard_on = True print "KEYBOARD in findquery: ", keyboard_on keyboard_on = parent_conn.recv() query = "Say@#$: Thank You. You will be granted access shortly" mute = False beep = False if len(sys.argv) > 1: port = sys.argv[1] else: port = '/dev/serial0' try: # Open serial port ser = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, stopbits=serial.STOPBITS_ONE, parity=serial.PARITY_NONE, rtscts=False, dsrdtr=False, timeout=0) print "Open serial port sucessfully." print(ser.name) except Exception as e: print e print "Error opening serial port." sys.exit() try: cap = MPR121.MPR121() time.sleep(3) # if not cap.begin(): print('Error initializing MPR121. Check your wiring!') except Exception as e: print(e) pass TezHead = TezCmd.Controller(ser) TezHead.SetMouth(0x110E00) time.sleep(1) #TezHead.SetPid(1, 26, 12, 16) TezHead.SetPid(1, 26, 2, 16) #TezHead.SetPid(2, 26, 12, 16) TezHead.SetPid(2, 26, 2, 16) time.sleep(1) TezHead.SetServo(1, Xcoor, 100) TezHead.SetServo(2, Ycoor, 100) time.sleep(1) TezHead.SetNoseColor(0) webcam = cv2.VideoCapture( -1) # Get ready to start getting images from the webcam webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320) # I have found this to be about the highest- webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240) # resolution you'll want to attempt on the pi #webcam.set(cv2.CV_CAP_PROP_BUFFERSIZE, 2) # frame buffer storage if not webcam: print "Error opening WebCAM" sys.exit(1) #open = False frontalface = cv2.CascadeClassifier( "/home/pi/Documents/Python projects/haarcascade_frontalface_alt2.xml" ) # frontal face pattern detection profileface = cv2.CascadeClassifier( "/home/pi/Documents/Python projects/haarcascade_profileface.xml" ) # side face pattern detection #parent_conn, child_conn = Pipe() t1 = Thread(target=findFace) t1.start() t3 = Thread(target=facerec) parent_conn, child_conn = Pipe() socket_thd = Thread(target=socket_thread, args=(child_conn, )) socket_thd.start() with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once button_once = False #playsound('/home/pi/env/HiTej.wav') print "playsound" mute = True query = "Talk to Reebo" print query #################################################################################3 #~ query,mute=findquery() #####################################FIND QUERY AND MUTE#####################3 assistant.assist() mute = False query = "audio" time.sleep(1) updt_time = time.time() stream = conversation_stream num_frames = wavep.getnframes( ) # number of frames in audio response file resp_samples = wavep.readframes(num_frames) # get frames from wav file num_frames2 = wavep2.getnframes( ) # number of frames in audio response file resp_samples2 = wavep2.readframes( num_frames2) # get frames from wav file name = "" while True: #if wait_for_user_trigger: #logging.info('Press key') #x=raw_input() #~ stream.start_recording() # unelegant method to access private methods.. findquery(parent_conn) if mute == False or beep == True: print "beep" stream.start_playback() #~ stream.stop_recording() stream.write( resp_samples) # write response sample to output stream print "HI" stream.stop_playback() assistant.assist() beep = False query = "audio" mute = False #updt_time=time.time() print time.time() - updt_time dist = distance() #~ if dist<50: #~ print dist #~ updt_time=time.time() if time.time() - updt_time > 10: name2 = "" if onceface == True: facerec_en = False print "Thread Status", t3.is_alive() if t3.is_alive(): t3.terminate() t3.join(1) print "t3 terminated" print facerec_en onceface = False print("in loop") query = "audio" dist = distance() print faceFound while faceFound == False: time.sleep(0.1) #print "FACE FALSE" #~ if dist>60: #~ #mute=False #~ updt_time=time.time() #~ print query #~ while dist>60: #~ dist=distance() #~ time.sleep(0.1) #~ print dist #~ query="Hi" #~ print query #~ assistant.assist() #~ print ("playback") #~ socket.emit('event-robot-message',"Hi! Do you want some help ?") print "Thread Status", t3.is_alive() t3 = Thread(target=facerec) t3.start() #~ query="Talk to Tej" #~ mute=True #~ assistant.assist() #time.sleep(3) stream.start_playback() #~ stream.stop_recording() stream.write( resp_samples2) # write response sample to output stream socket.emit( 'event-robot-message', "Hi! My Name is Reebo. I\'ll be your personal assistant for today" ) stream.stop_playback() query = "Say:@#$: " if len(name2) >= 1: for i in range(0, len(name2)): if name2[i] != "" and name2[i] != "Unknown": query = query + " Hi " + str(name2[i]) + "!" query = query + "What can I do for you?" mute = False print query assistant.assist() #time.sleep(0.1) #~ stream.start_playback() #~ stream.stop_recording() #~ stream.write(resp_samples2) # write response sample to output stream #~ stream.stop_playback() #~ #query="Talk to Tej" #~ mute=True #~ assistant.assist() #~ updt_time= time.time() query = "audio" mute = False
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, hotword_model, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: pin13.write(1) logging.info('Turning device on') else: pin13.write(0) logging.info('Turning device off') @device_handler.command('action.devices.commands.BrightnessAbsolute') def brightnessCheck(brightness): pin13.write(brightness/100) logging.info('ok , brightness is ' , brightness) @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return import snowboydecoder def listening(): detector = snowboydecoder.HotwordDetector(hotword_model, sensitivity=0.397, audio_gain=1) print("Say dalilaa .....or, Press Ctrl+C to exit") detector.start(detected_callback=detectedCallback, sleep_time=0.01) detector.terminate() def detectedCallback(): continue_conversation = assistant.assist() print("Say dalilaa ...... or Press Ctrl+C to exit") # listening() listening()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: Now = datetime.datetime.now() DateTimeStamp = "{:%d/%m/%Y}".format(Now) print(DateTimeStamp) cached_list = cached.find_one({'ref': DateTimeStamp})['cached'] print(cached_list) current_detected = memcache.Client(['127.0.0.1:11211'], debug=0) assistant.assist(text_query='Talk to my test app') # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. #wait_for_user_trigger = not once while True: if assistant.MODE is 0 or assistant.MODE is 1: query = '' while True: name = current_detected.get('Name') if name is not '': query = name assistant.switch_mode(1) if name not in cached_list: cached_list.append(name) cached.update({'ref': DateTimeStamp}, {'$push': { 'cached': name }}) query = name + "first" assistant.switch_mode(0) break if assistant.MODE is 0: click.echo('<you> %s' % query) text, continue_conversation = assistant.assist( text_query=query) #print(text) elif assistant.MODE is 1: click.echo('<you> %s' % query) text, continue_conversation = assistant.assist( text_query=query) #print(text) assistant.switch_mode(2) elif assistant.MODE is 2: text, continue_conversation = assistant.assist(text_query=None) print(text) if text == 'Please report activity again.': print('recording again') assistant.switch_mode(2) #text, continue_conversation = assistant.assist(text_query=None) else: print('why??') assistant.switch_mode(1) # wait for user trigger if there is no follow-up turn in # the conversation. #wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if not continue_conversation: assistant.assist(text_query='Talk to my test app') time.sleep(1)
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) context = zmq.Context.instance() socket = context.socket(zmq.REP) socket.bind('tcp://127.0.0.1:5555') while True: print('receiving socket message...') msg = socket.recv_string() if msg == 'stop': socket.send_string('stopping') context.destroy() break if msg != 'start': socket.send_string('invalid message') continue print('received start message') # Configure audio source and sink. audio_device = None audio_source = audio_helpers.WaveSource( open('in.wav', 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = {'id': device_id, 'model_id': device_model_id} session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) os.makedirs(os.path.dirname(device_config), exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. #if input_audio_file or output_audio_file: assistant.assist() print("accepting another request...") socket.send_string('done')
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), device_config=os.path.join( click.get_app_dir('googlesamples-assistant'), 'device_config.json'), device_id=None, project_id=None, device_model_id=None, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, lang='ko-KR', verbose=False, once=False, grpc_deadline=DEFAULT_GRPC_DEADLINE): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') print('JCH MP3 Play') #launch subprocess #python cannot play mp3 #subprocess.call(['lxterminal', '-e', 'python runMP3.py']) subprocess.call(['lxterminal', '-e', './runMP3.sh']) print('JCH fork process is run. parent process is still running') else: logging.info('Turning device off') killMP3Pid() print('JCH turn off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) #JCH : get MP3 player pid def killMP3Pid(): count = 1 pid = -1 cmd = ['ps', '-ef'] fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout for line in fd_popen: if line.find('runMP3') != -1: list = line.split() pid = list[1] print('bash pid:' + str(pid)) os.kill(int(pid), signal.SIGTERM) #or signal.SIGKILL break fd_popen.close() fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout for line in fd_popen: if line.find('omxplayer') != -1: print('find') list = line.split() pid = list[1] print('pid:' + str(pid)) os.kill(int(pid), signal.SIGTERM) if count == 2: break else: count = count + 1 fd_popen.close() # with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: while assistant.assist(): if once: break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Porcupine setup library_path = "lib/linux/x86_64/libpv_porcupine.so" # Path to Porcupine's C library available under lib/${SYSTEM}/${MACHINE}/ model_file_path = "lib/common/porcupine_params.pv" # It is available at lib/common/porcupine_params.pv keyword_file_paths = ['picovoice_linux.ppn', 'ok_google_linux_2020-04-28_v1.7.0.ppn', 'hey_google_linux_2020-04-28_v1.7.0.ppn'] sensitivities = [0.8, 0.9, 0.9] porcupine = None pa = None audio_stream = None # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return wait_for_user_trigger = not once try: porcupine = Porcupine( library_path, model_file_path, keyword_file_paths=keyword_file_paths, sensitivities=sensitivities) pa = pyaudio.PyAudio() audio_stream = pa.open( rate=porcupine.sample_rate, channels=1, format=pyaudio.paInt16, input=True, frames_per_buffer=porcupine.frame_length, input_device_index=2) while True: #print('listening') pcm = audio_stream.read(porcupine.frame_length) pcm = struct.unpack_from("h" * porcupine.frame_length, pcm) #print('test') result = porcupine.process(pcm) if result >= 0: print('detected keyword') continue_conversation = assistant.assist() wait_for_user_trigger = not continue_conversation except KeyboardInterrupt: print('stopping ...') finally: if porcupine is not None: porcupine.delete() if audio_stream is not None: audio_stream.close() if pa is not None: pa.terminate()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # GPIO setup GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(sBUTTON, GPIO.IN) GPIO.setup(gBUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(gLED, GPIO.OUT, initial=GPIO.HIGH) # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) image_dir = '/home/pi/robot/image/' def camera(): now = datetime.now() dir_name = now.strftime('%Y%m%d') dir_path = image_dir + dir_name + '/' file_name = now.strftime('%H%M%S') + '.jpg' fname = dir_path + file_name try: os.mkdir(dir_path) except OSError: logging.info('Date dir already exists') os.system('raspistill -o ' + fname) return fname @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') #Klight added GPIO.output(gLED, GPIO.HIGH) time.sleep(1) GPIO.output(gLED, GPIO.LOW) #os.system('python /home/pi/robot/blinkt_color.py') else: logging.info('Turning device off') GPIO.output(gLED, GPIO.LOW) # Kblink @device_handler.command('com.acme.commands.blink_light') def blinker(number, lightKey): logging.info('Blinking device %s times.' % number) for i in range(int(number)): logging.info('Device is blinking %s/%s time.' % (i, number)) time.sleep(0.5) GPIO.output(gLED, GPIO.HIGH) time.sleep(0.5) GPIO.output(gLED, GPIO.LOW) # Kcamera @device_handler.command('com.acme.commands.pi_camera') def picamera(number, cameraKey): logging.info('Taking a %s %s times.' % (cameraKey, number)) GPIO.output(gLED, GPIO.HIGH) if cameraKey: # in ('picture', 'camera', 'photo'): fname = camera() result = os.system( 'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face ' + fname) #robot/vision.py "" '+fname) logging.info('Image:' + fname) GPIO.output(gLED, GPIO.LOW) @device_handler.command('com.acme.commands.pi_jp') def pijp(number, cameraKey): logging.info(cameraKey) GPIO.output(gLED, GPIO.HIGH) if cameraKey: # in ('picture', 'camera', 'photo'): fname = camera() result = os.system( 'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face ' + fname) #robot/vision.py "" '+fname) logging.info('Image:' + fname) GPIO.output(gLED, GPIO.LOW) @device_handler.command('com.acme.commands.pi_motor') def pimotor(number, directionKey): logging.info(directionKey) GPIO.output(gLED, GPIO.HIGH) result = os.system('python3 /home/pi/robot/motor.py') GPIO.output(gLED, GPIO.LOW) """if color.get('name') == "blue": #shoot: logging.info('Camera shoot!') GPIO.output(gLED, GPIO.HIGH) else: logging.info('Something else happened.') GPIO.output(gLED, GPIO.LOW)""" with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: # GPIO button added state = GPIO.input(gBUTTON) logging.info("Push button to Google talk!") GPIO.output(gLED, GPIO.HIGH) time.sleep(0.2) GPIO.output(gLED, GPIO.LOW) if state: pass #continue else: continue #pass #click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.acme.commands.play_kkbox') def play_music(songName ): # You must match the parameters from the Action Package. logging.info('play %s ' % songName) # url = 'https://widget.kkbox.com/v1/?id=4kxvr3wPWkaL9_y3o_&type=song&terr=TW&lang=TC&autoplay=true&loop=true' # result = subprocess.Popen(['firefox', url], stdout=subprocess.PIPE) # print(result.stdout) from kkbox_partner_sdk.auth_flow import KKBOXOAuth CLIENT_ID = 'cea7cb81a731b46caeb9b8c0e25abd22' CLIENT_SECRET = '6317f7914dcc9e1fb50d01f744b3f1fb' auth = KKBOXOAuth(CLIENT_ID, CLIENT_SECRET) token = auth.fetch_access_token_by_client_credentials() print(token) from kkbox_partner_sdk.api import KKBOXAPI kkboxapi = KKBOXAPI(token) keyword = '女武神' types = ['track'] result = kkboxapi.search_fetcher.search(keyword, types) tracks = result['tracks']['data'] # print('搜尋結果是:{}'.format(tracks)) track_id = result['tracks']['data'][0]['id'] track_info = kkboxapi.track_fetcher.fetch_track(track_id) url = track_info['url'] print('歌曲資訊連結是:{}'.format(url)) send(url) tickets = kkboxapi.ticket_fetcher.fetch_media_provision(track_id) url = tickets['url'] print('下載位置連結是:{}'.format(url)) print('底下是播放資訊') import subprocess subprocess.run(['ffplay', '-nodisp', '-autoexit', url]) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. once = True wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(): # Configuration api_endpoint = ASSISTANT_API_ENDPOINT credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') project_id = os.getenv("PROJECT_ID") device_model_id = os.getenv("DEVICE_MODEL_ID") device_id = os.getenv("DEVICE_ID") device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') lang = "en-US" display = False verbose = False audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE grpc_deadline = DEFAULT_GRPC_DEADLINE # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. # Initialise car share app. assistant.assist(text_query="talk to car share") while True: continue_conversation = assistant.assist()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # ウェイクワードでの起動 porcupine = None pa = None audio_stream = None try: # ハンドル作成 porcupine = pvporcupine.create(keywords=['jarvis', 'snowboy']) # pyaudioでの録音 pa = pyaudio.PyAudio() audio_stream = pa.open(rate=porcupine.sample_rate, channels=1, format=pyaudio.paInt16, input=True, frames_per_buffer=porcupine.frame_length) # 待ちループ def get_next_audio_frame(): pcm = audio_stream.read(porcupine.frame_length, exception_on_overflow=False) pcm = struct.unpack_from("h" * porcupine.frame_length, pcm) return pcm while True: keyword_index = porcupine.process(get_next_audio_frame()) if keyword_index >= 0: # detection event logic/callback continue_conversation = assistant.assist() while audio_stream.get_read_available() > 0: get_next_audio_frame() finally: if porcupine is not None: porcupine.delete() if audio_stream is not None: audio_stream.close() if pa is not None: pa.terminate() # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: logging.error( 'No device_id or no device_model_id found. Please check config.py') sys.exit(0) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def audioRecorderCallback(self, snowboy_audio_file): # Got keyword from Snowboy, now handle the audio from the file Snowboy recorded rospy.loginfo( "SPEECH: Snowboy got keyword, Handling Audio that was recorded...") suggested_response = "" service_response = 0 partial_result = False # just always send final text phrase_heard_uppercase = "" # Handle case where mic disabled by system (talking, or moving servos) if not self.mic_system_enabled: rospy.loginfo("SPEECH: MIC disabled by SYSTEM. Ignoring input") return # Handle case where mic disabled by user (including case where turning mic back on) if not self.mic_user_enabled: # mic disabled by User (don't listen) if self.mic_user_enable_pending: # User said to turn mic back on! rospy.loginfo("SPEECH: MIC now enabled by USER.") self.mic_user_enable_pending = False # reset flag self.mic_user_enabled = True # enable mic now (ignoring whatever was in the buffer) self.local_voice_say_text("Ok, I am listening") else: rospy.loginfo("SPEECH: MIC disabled by USER. Ignoring input") return #===================================================================================== # Normal operation - first handle the audio from the file Snowboy recorded rospy.loginfo(self.logname + "handling audio from Snowboy...") audio_device = None read_from_file = True # first read is file from Snowboy display_assistant_responses = False # Display HTML! grpc_deadline = DEFAULT_GRPC_DEADLINE rospy.loginfo('initializing SampleAssistant...') with SampleAssistant('en-US', self.device_model_id, self.device_id, display_assistant_responses, self.grpc_channel, grpc_deadline, self.device_handler) as assistant: # If user asked an open-ended question, handle follow up question without waiting for robot name! continue_conversation = True # go through loop at least once while continue_conversation: if read_from_file: audio_source = audio_helpers.WaveSource( open(snowboy_audio_file, 'rb'), sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH) read_from_file = False # After handling Snowboy's initial buffer, everyting else is mic input else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_helpers. DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers. DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers. DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers. DEFAULT_AUDIO_DEVICE_FLUSH_SIZE)) rospy.loginfo('Setting up Output device (speaker or file)...') send_response_to_file = False # DAVES change this to hide Google's spoken response! if send_response_to_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = ( audio_helpers.SoundDeviceStream( sample_rate=audio_helpers. DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers. DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers. DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers. DEFAULT_AUDIO_DEVICE_FLUSH_SIZE)) # Create conversation stream with the given audio source and sink. rospy.loginfo('Creating Conversation Stream...') conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, ) assistant.set_conversation_stream( conversation_stream ) # pass in the current stream (file or mic input) rospy.loginfo('Calling Assist...') assistant_response = None assistant_response_ascii = None continue_conversation, assistant_response = assistant.assist() rospy.loginfo('Done with conversation / response.') if assistant_response: try: # Handle Unicode assistant_response_ascii = assistant_response.encode( 'ascii', errors='ignore') rospy.loginfo('FINAL ASSISTANT RESPONSE TEXT: [%s]', assistant_response_ascii) if not use_google_assistant_voice: self.local_voice_say_text(assistant_response_ascii) except Exception as e: rospy.logwarn( 'Bad FINAL ASCII response from Assistant: %s', e) # END OF BLOCK FROM GOOGLE_CLOUD self.pub_eye_color.publish( eye_color_default) # restore eye color to normal os.remove(snowboy_audio_file)
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) GPIO.setmode(GPIO.BCM) GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once porcupine = PorcupineDemo(library_path=pvporcupine.LIBRARY_PATH, model_path=pvporcupine.MODEL_PATH, keyword_paths=[ pvporcupine.KEYWORD_PATHS[x] for x in ['terminator'] ], sensitivities=[0.95], output_path=None, input_device_index=None) porcupine_thread = threading.Thread(target=porcupine.run) porcupine_thread.start() bill = Billy() while True: if wait_for_user_trigger: while not porcupine.detected: time.sleep(0.1) #click.pause(info='Press Enter to send a new request...') assistant.bill.eye_on() continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break assistant.bill.eye_off()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: query = '' cached = [] current_detected = memcache.Client(['127.0.0.1:11211'], debug=0) assistant.assist(text_query='Talk to my test app') # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. #wait_for_user_trigger = not once while True: while True: name = current_detected.get('Name') if name is not '': query = name break click.echo('<you> %s' % query) #always set MODE = TRUE for text input for detection input #if not assistant.MODE: # assistant.switch_mode() #first request made to Dialogflow to notify the user detected continue_conversation = assistant.assist(text_query=query) #decision fork, whether it is first detection for check in #or subsequent detections for action tracking if query not in cached: cached.append(query) continue_conversation = assistant.assist(text_query='start') else: assistant.switch_mode() continue_conversation = assistant.assist(text_query=None) assistant.switch_mode()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: print("[joe debug]") if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, "r") as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error("Error loading credentials: %s", e) logging.error("Run google-oauthlib-tool to initialize " "new OAuth 2.0 credentials.") sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info("Connecting to %s", api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, "rb"), sample_rate=audio_sample_rate, sample_width=audio_sample_width, ) else: audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size, ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, "wb"), sample_rate=audio_sample_rate, sample_width=audio_sample_width, ) else: audio_sink = audio_device = audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size, ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device["id"] device_model_id = device["model_id"] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning("Device config not found: %s" % e) logging.info("Registering device") if not device_model_id: logging.error("Option --device-model-id required " "when registering a device instance.") sys.exit(-1) if not project_id: logging.error("Option --project-id required " "when registering a device instance.") sys.exit(-1) device_base_url = "https://%s/v1alpha2/projects/%s/devices" % ( api_endpoint, project_id, ) device_id = str(uuid.uuid1()) payload = { "id": device_id, "model_id": device_model_id, "client_type": "SDK_SERVICE", } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error("Failed to register device: %s", r.text) sys.exit(-1) logging.info("Device registered: %s", device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, "w") as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command("action.devices.commands.OnOff") def onoff(on): if on: logging.info("Turning device on") else: logging.info("Turning device off") @device_handler.command("com.example.commands.BlinkLight") def blink(speed, number): logging.info("Blinking device %s times." % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info("Device is blinking.") time.sleep(delay) with SampleAssistant( lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler, ) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: print("Press button to initiate a new request") dots.fill(0x00000F) # lite blue LEDs dots.show() while button.value: time.sleep(0.1) # red LEDs dots.fill(0xFF0000) dots.show() continue_conversation = assistant.assist() # LEDs off dots.fill(0x000000) dots.show() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break