def test_noop_execution(self): device_handler = device_helpers.DeviceRequestHandler('some-device', ) device_handler.command('SOME_COMMAND')(self.handler) device_request = build_noop_device_request('some-device') fs = device_handler(device_request) self.assertEqual(len(fs), 0) self.assertFalse(self.handler_called)
def test_unknown_command(self): device_handler = device_helpers.DeviceRequestHandler('some-device', ) device_handler.command('SOME_COMMAND')(self.handler) device_request = build_device_request('some-device', 'OTHER_COMMAND', 'some-arg') fs = device_handler(device_request) self.assertEqual(len(fs), 1) self.assertFalse(self.handler_called)
def test_different_device(self): device_handler = device_helpers.DeviceRequestHandler('some-device', ) device_handler.command('SOME_COMMAND')(self.handler) device_request = build_device_request('other-device', 'SOME_COMMAND', 'some-arg') fs = device_handler(device_request) self.assertEqual(len(fs), 0) self.assertFalse(self.handler_called)
def test_success(self): device_handler = device_helpers.DeviceRequestHandler('some-device', ) device_handler.command('SOME_COMMAND')(self.handler) device_request = build_device_request('some-device', 'SOME_COMMAND', 'some-arg') fs = device_handler(device_request) self.assertEqual(len(fs), 1) concurrent.futures.wait(fs) self.assertEqual(self.handler_called, 'some-arg')
def _install_device_handlers(self): self.device_handler = device_helpers.DeviceRequestHandler( self.device_id) @self.device_handler.command('action.devices.commands.OnOff') def handler(on): get_bus().post( GoogleDeviceOnOffEvent(device_id=self.device_id, device_model_id=self.device_model_id, on=on))
def __deviceHandlerSetup(self): print("In device handler setup") deviceHandler = device_helpers.DeviceRequestHandler(self.deviceId) @deviceHandler.command('action.devices.commands.OnOff') def onoff(on): if on: print("Turned on") else: print("Turned off") return deviceHandler
def test_exception(self): err = Exception('some error') def failing_command(arg): raise err device_handler = device_helpers.DeviceRequestHandler('some-device', ) device_handler.command('FAILING_COMMAND')(failing_command) device_request = build_device_request('some-device', 'FAILING_COMMAND', 'some-arg') fs = device_handler(device_request) self.assertEqual(len(fs), 1) concurrent.futures.wait(fs) self.assertEqual(fs[0].exception(), err)
def return_ga(): api_endpoint = 'embeddedassistant.googleapis.com' project_id = "assistantdevice-1f1e8" device_model_id = "assistantdevice-1f1e8-ga1-v3slut" device_id = None credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') lang = "en-US" display = False verbose = False input_audio_file = None output_audio_file = None audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE grpc_deadline = 60 * 3 + 5 once = False # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logger.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logger.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logger.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logger.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logger.info('Turning device on') else: logger.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logger.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logger.info('Device is blinking.') time.sleep(delay) logger.info("Start Google Assistant") assistant = SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) return assistant
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # D-Bus preparation cv_trigger = Condition(Lock()) eventq = Queue() dbus_obj = DBusSignals() loop = GLib.MainLoop() system_bus = SystemBus() def dbus_handler(sender, object, iface, signal, params): # logging.info(sender) # logging.info(object) # logging.info(signal) # logging.info(params) logging.debug('Received D-Bus signal: {}'.format(signal)) if signal == 'trigger': cv_trigger.acquire() cv_trigger.notify() cv_trigger.release() pub = system_bus.publish("io.respeaker.respeakerd", dbus_obj) sub = system_bus.subscribe(iface='respeakerd.signal', signal_fired=dbus_handler) def exit_dbus(): sub.unsubscribe() pub.unpublish() # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') exit_dbus() sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') exit_dbus() sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) exit_dbus() sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler, eventq) as assistant: dbus_obj.ready() assistant.assist() exit_dbus() return # else it's a long term run def assistant_thread(conversation_stream_): with SampleAssistant(lang, device_model_id, device_id, conversation_stream_, grpc_channel, grpc_deadline, device_handler, eventq) as assistant: dbus_obj.ready() # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once # capture device should always be opened when not playing conversation_stream_.start_recording() while True: if wait_for_user_trigger: logging.info("speak hotword to wake up") cv_trigger.acquire() cv_trigger.wait() cv_trigger.release() logging.info("wake up!") continue_conversation = False try: continue_conversation = assistant.assist() except PortAudioError as e: logging.warn('PortAudio Error: {}'.format(str(e))) eventq.put('on_idle') # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break exit_dbus() logging.debug('Exit from the assistant thread...') loop.quit() def event_process_thread(): while True: event = eventq.get() if event == 'on_listen': dbus_obj.on_listen() elif event == 'on_think': dbus_obj.on_think() elif event == 'on_speak': dbus_obj.on_speak() elif event == 'on_idle': dbus_obj.on_idle() time.sleep(0.5) def on_exit(sig): exit_dbus() loop.quit() logging.info("Quit...") setup_signals( signals=[sys_signal.SIGINT, sys_signal.SIGTERM, sys_signal.SIGHUP], handler=on_exit) # make conversation_stream writable inside thread thrd1 = Thread(target=assistant_thread, args=(conversation_stream, )) thrd2 = Thread(target=event_process_thread) thrd1.daemon = True thrd2.daemon = True thrd1.start() thrd2.start() logging.info("Glib mainloop start running...") loop.run() dbus_obj.on_idle()
def _install_device_handlers(self): self.device_handler = device_helpers.DeviceRequestHandler(self.device_id) @self.device_handler.command('action.devices.commands.OnOff') def handler(on): self.logger.info('Received OnOff command. on={}'.format(on))
def main(): args = imp.load_source('args', INFO_FILE) if not hasattr(args, 'credentials'): args.credentials = os.path.join(os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json') if not hasattr(args, 'device_config'): args.device_config = os.path.join(os.path.expanduser('~/.config'), 'googlesamples-assistant', 'device_config.json') verbose = False credentials = args.credentials project_id = args.project_id device_config = args.device_config device_id = '' device_model_id = args.device_model_id api_endpoint = 'embeddedassistant.googleapis.com' audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE grpc_deadline = 60 * 3 + 5 lang = 'en-US' once = False play_audio_file(resources['startup']) # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: if GPIO != None: button_state = GPIO.input(22) else: #use keyboard as a trigger button_state = get_key_stroke() if button_state == False: continue else: #button_state=False pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), project_id=None, device_model_id=None, device_id=None, device_config=os.path.join( click.get_app_dir('googlesamples-assistant'), 'device_config.json'), lang='en-US', display=False, verbose=False, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, grpc_deadline=DEFAULT_GRPC_DEADLINE, once=False, *args, **kwargs): # Setup logging. #logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) logging.basicConfig(level=logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) audio_stream = None handle = None pa = None library_path = '../../lib/windows/amd64/libpv_porcupine.dll' model_file_path = '../../lib/common/porcupine_params.pv' keyword_file_paths = ['bumblebee_windows.ppn'] num_keywords = len(keyword_file_paths) sensitivities = [0.7] #0.2 handle = porcupine.Porcupine(library_path, model_file_path, keyword_file_paths=keyword_file_paths, sensitivities=sensitivities) pa = pyaudio.PyAudio() audio_stream = pa.open(rate=handle.sample_rate, channels=1, format=pyaudio.paInt16, input=True, frames_per_buffer=handle.frame_length) conversation_stream.volume_percentage = 100 print('Listening for keyword bumblebee...') with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: while True: pcm = audio_stream.read(handle.frame_length) pcm = struct.unpack_from("h" * handle.frame_length, pcm) result = handle.process(pcm) if num_keywords > 0 and result: play_audio_file('ding.wav') print('[%s] detected keyword!!!!' % str(datetime.now())) assistant.assist() play_audio_file('dong.wav') print('Listening for keyword bumblebee...') print('stopping ...') if handle is not None: handle.delete() if audio_stream is not None: audio_stream.close() if pa is not None: pa.terminate()
def init_assistant( verbose: bool = False, language_code: str = 'en-US', credentials: str = os.path.join(os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json'), device_id: str = None, device_model_id: str = None, device_config: str = os.path.join(os.path.expanduser('~/.config'), 'googlesamples-assistant', 'device_config.json'), project_id: str = None, display: bool = False, ): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT) logging.info('Connecting to %s', ASSISTANT_API_ENDPOINT) # Configure audio source and sink. audio_source = audio_device = audio_sink = ( audio_helpers.SoundDeviceStream( sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (ASSISTANT_API_ENDPOINT, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) return SampleAssistant(language_code, device_model_id, device_id, conversation_stream, display, grpc_channel, DEFAULT_GRPC_DEADLINE, device_handler)
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ if gender=='Male': subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Male.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Female.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "slowly": delay = 2 elif speed == "quickly": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return def detected(): continue_conversation=assistant.assist() if continue_conversation: print('Continuing conversation') assistant.assist() signal.signal(signal.SIGINT, signal_handler) sensitivity = [0.5]*len(models) callbacks = [detected]*len(models) detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) def start_detector(): detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03) # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: if custom_wakeword: start_detector() else: button_state=GPIO.input(pushbuttontrigger) if button_state==True: continue else: pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break detector.terminate()
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.setLevel(lf.DEBUG if verbose else lf.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) # Configure audio source and sink. audio_device = audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_device, sink=audio_device, iter_size=audio_iter_size, sample_width=audio_sample_width, volume=VOLUME) tts_client = texttospeech.TextToSpeechClient() tts_voice = texttospeech.types.VoiceSelectionParams( language_code='it-IT', ssml_gender=texttospeech.enums.SsmlVoiceGender.MALE, name="it-it-Wavenet-D") tts_config = texttospeech.types.AudioConfig( audio_encoding=texttospeech.enums.AudioEncoding.LINEAR16, sample_rate_hertz=16000, pitch=-2.50, speaking_rate=1.0, effects_profile_id=['small-bluetooth-speaker-class-device']) device_handler = device_helpers.DeviceRequestHandler(device_id) fulfillments = [] #TODO find a better way, is a bit too tangled echo_effect = EchoEffect() def speak(text, publishing=None): publisher = lambda: publish(publishing['resource'], publishing[ 'payload']) if publishing else None use_echo(text, publisher) def use_tts(text, publisher=lambda: True): text = text.replace("\\n", ". ") text = text.replace("\\t", ". ") text = text.replace(" h ", " ore ") text = text.replace("1 ore ", "1 ora ") text = text.replace(" per da ", " per ") order = texttospeech.types.SynthesisInput(text=text) logging.info('saying: {}'.format(order)) response = tts_client.synthesize_speech(order, tts_voice, tts_config) conversation_stream.start_playback() publisher() conversation_stream.write(response.audio_content) conversation_stream.stop_playback() def use_echo(text, publisher): echo_effect.action = publisher assistant.assist("pappagallo {}".format(text)) @device_handler.command('ambrogio.TEST') def order(number): ful = Fulfillment('ordine: {} eseguito'.format(number)) fulfillments.append(ful) def weather(place_name, place, date_name, date): place_name = nvl(place_name, 'Haarlem') def_coordinates = {'latitude': 52.3873878, 'longitude': 4.6462194} place = nvl(place, def_coordinates, lambda x: x['coordinates']) date_name = nvl(date_name, 'oggi') date = nvl(date, None, lambda x: dt.date(**x)) logging.info("date: {}".format(date)) params = dict() params.update(place) params.update({'day': date}) wreq = dark.WeatherRequest(**params) wres = dark.call_api(wreq) prop = 'a' if place_name[0].lower() in VOWELS: prop = 'ad' to_speak = '{} {} {} è {} con una temperatura percepita di {} gradi'.format( date_name, prop, place_name, wres.summaryHuman, round(wres.tempFelt)) to_publish = { 'min': round(wres.tempLowFelt), 'max': round(wres.tempHighFelt), 'temp': round(wres.tempFelt), 'icon': wres.summary } publishing = {'resource': 'weather', 'payload': to_publish} ful = Fulfillment(to_speak, publishing) fulfillments.append(ful) @device_handler.command('ambrogio.WEATHER') def weather_action(place_name, place, date_name, date): weather(place_name, place, date_name, date) @device_handler.command('ambrogio.ECHO') def echo_action(txt): logging.info("echoing: {}".format(txt)) if echo_effect.action: echo_effect.action() echo_effect.action = None @device_handler.command('ambrogio.GREET') def morning(nope): greet = 'salve' base = dt.datetime.combine(dt.date.today(), MIDNIGHT) now = dt.datetime.now() if now < base + MORNING: greet = 'buongiorno' elif now < base + AFTERNOON: greet = 'buon pomeriggio' else: greet = 'buona sera' ful = Fulfillment(greet) fulfillments.append(ful) weather(None, None, None, None) weather('amsterdam', {'coordinates': { 'latitude': 52.3667, 'longitude': 4.8945 }}, None, None) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, None, grpc_channel, grpc_deadline, device_handler, speak) as assistant: def do_assist(): logging.info("ambrogio engaging") ding(conversation_stream) continue_conversation = assistant.assist() while continue_conversation: continue_conversation = assistant.assist() while len(fulfillments) > 0: ful = fulfillments.pop(0) speak(ful.text, ful.publishing) logging.info("ambrogio out") ding(conversation_stream) logging.info("initializing snowboydetector") ding(conversation_stream) detector = snowboydecoder.HotwordDetector("resources/ambrogio.pmdl", sensitivity=0.40, audio_gain=0.50) logging.info("starting snowboydetector") detector.start(do_assist) logging.info("terminating snowboydetector") detector.terminate() print("terminated snowboy")
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) os.makedirs(os.path.dirname(device_config), exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: button_state=GPIO.input(22) if button_state==True: continue else: pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') logging.warning('U CAN DO SOMETHING IN THERE FOR ON!!!') else: logging.info('Turning device off') logging.warning('U CAN DO SOMETHING IN THERE FOR OFF!!!') @device_handler.command('action.devices.commands.BrightnessAbsolute') def brightnessCheck(brightness): if brightness > 50: logging.info('brightness > 50') else: logging.info('brightness <= 50') @device_handler.command('action.devices.commands.ColorAbsolute') def color(color): if color.get('name') == "blue": logging.info('color is blue') else: logging.info('color is not blue') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(language, is_debug, is_answer, device_id=None, device_model_id=None): #################################################################################################### # # 初期定義 # #################################################################################################### grpc_deadline = 60 * 3 + 5 credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE #################################################################################################### # # 取り扱い説明書 # #################################################################################################### """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ #################################################################################################### # # Load OAuth 2.0 CREDENTIALS. # (認証) # #################################################################################################### try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: print('Error loading CREDENTIALS: %s' % e) print('Run google-oauthlib-tool to initialize ', 'new OAuth 2.0 CREDENTIALS.') sys.exit(-1) #################################################################################################### # # Create an authorized gRPC channel. # (grpc接続) # #################################################################################################### grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT) # print("connection -> %s" % ASSISTANT_API_ENDPOINT) #################################################################################################### # # Configure audio source and sink. # (オーディオストリーム設定) # #################################################################################################### audio_device = None audio_sink = audio_source = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) #################################################################################################### # # Create conversation stream with the given audio source and sink. # #################################################################################################### conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if device_id is None or device_model_id is None: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] print("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: print(e) sys.exit(1) #################################################################################################### # # コールバック設定 # #################################################################################################### device_handler = device_helpers.DeviceRequestHandler(device_id) print(device_id, device_model_id) #################################################################################################### # # アシスタント起動 # #################################################################################################### ''' text=None if text is not None: return TextAssistant(language, device_model_id, device_id, IS_DISPLAY, grpc_channel, grpc_deadline) ''' return VoiceAssistant(language, device_model_id, device_id, conversation_stream, IS_DISPLAY, grpc_channel, grpc_deadline, device_handler, is_debug, is_answer)
def main(keyword_file_paths, library_path, model_file_path, asensitivities, input_audio_device_index, output_path, show_audio_devices_info, api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() #print http_request credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "slowly": delay = 2 elif speed == "quickly": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) if show_audio_devices_info: PorcupineDemo.show_audio_devices_info() else: if not keyword_file_paths: raise ValueError('keyword file paths are missing') keyword_file_paths = [x.strip() for x in keyword_file_paths.split(',')] if isinstance(asensitivities, float): sensitivities = [asensitivities] * len(keyword_file_paths) else: sensitivities = [float(x) for x in asensitivities.split(',')] PorcupineDemo(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler, library_path, model_file_path, keyword_file_paths, sensitivities, input_device_index=input_audio_device_index, output_path=output_path).run() print input_audio_device_index
def __init__(self, credentials_file=os.path.join(os.path.expanduser('~'), '.config', 'google-oauthlib-tool', 'credentials.json'), device_config=os.path.join(os.path.expanduser('~'), '.config', 'googlesamples-assistant', 'device_config.json'), lang='en-US', conversation_start_fifo=os.path.join(os.path.sep, 'tmp', 'pushtotalk.fifo'), *args, **kwargs): """ Params: credentials_file -- Path to the Google OAuth credentials file (default: ~/.config/google-oauthlib-tool/credentials.json) device_config -- Path to device_config.json. Register your device and create a project, then run the pushtotalk.py script from googlesamples to create your device_config.json lang -- Assistant language (default: en-US) """ super().__init__(*args, **kwargs) self.lang = lang self.credentials_file = credentials_file self.device_config = device_config self.conversation_start_fifo = conversation_start_fifo try: os.mkfifo(self.conversation_start_fifo) except FileExistsError: pass with open(self.device_config) as f: device = json.load(f) self.device_id = device['id'] self.device_model_id = device['model_id'] # Load OAuth 2.0 credentials. try: with open(self.credentials_file, 'r') as f: credentials = google.oauth2.credentials.Credentials( token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') raise # Create an authorized gRPC channel. self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, self.api_endpoint) logging.info('Connecting to %s', self.api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width, ) self.device_handler = device_helpers.DeviceRequestHandler( self.device_id)