예제 #1
0
def try_to_get_credentials(client_secrets):
    """Try to get credentials, or print an error and quit on failure."""

    if os.path.exists(ASSISTANT_CREDENTIALS):
        return auth_helpers.load_credentials(ASSISTANT_CREDENTIALS,
                                             scopes=[ASSISTANT_OAUTH_SCOPE])

    if not os.path.exists(VR_CACHE_DIR):
        os.mkdir(VR_CACHE_DIR)

    if not os.path.exists(client_secrets) and os.path.exists(
            OLD_CLIENT_SECRETS):
        client_secrets = OLD_CLIENT_SECRETS

    if not os.path.exists(client_secrets):
        print('You need client secrets to use the Assistant API.')
        print('Follow these instructions:')
        print(
            '    https://developers.google.com/api-client-library/python/auth/installed-app'
            '#creatingcred')
        print('and put the file at', client_secrets)
        sys.exit(1)

    if not os.getenv('DISPLAY') and not sys.stdout.isatty():
        print("""
To use the Assistant API, manually start the application from the dev terminal.
See the "Turn on the Assistant API" section of the Voice Recognizer
User's Guide for more info.""")
        sys.exit(1)

    credentials = auth_helpers.credentials_flow_interactive(
        client_secrets, scopes=[ASSISTANT_OAUTH_SCOPE])
    auth_helpers.save_credentials(ASSISTANT_CREDENTIALS, credentials)
    logging.info('OAuth credentials initialized: %s', ASSISTANT_CREDENTIALS)
    return credentials
예제 #2
0
def setup_assistant():
	""" This sets up the OAuth credentials for the Google Assistant. """
	
	ue.log("Initializing Google Assistant.")
	# Initialize credentials
	credentials = os.path.join(sys.path[0],
								common_settings.ASSISTANT_CREDENTIALS_FILENAME)

	# Load credentials.
	try:
		global creds
		creds = auth_helpers.load_credentials(
			credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE, common_settings.PUBSUB_OAUTH_SCOPE]
		)
	except Exception:
		# Maybe we didn't load the credentials yet?
		# This could happen on first run
		client_secret = os.path.join(sys.path[0], 'client_secrets.json')
		creds = auth_helpers.credentials_flow_interactive(client_secret, common_settings.ASSISTANT_OAUTH_SCOPE)
		auth_helpers.save_credentials(credentials, creds)
		try:
			creds = auth_helpers.load_credentials(
				credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE]
			)
		except Exception as e:
			ue.log_error('Error loading credentials: ' + str(e))
			ue.log_error('Run auth_helpers to initialize new OAuth2 credentials.')
			# Return invalid status code
			return -1
			
	# Define endpoint
	# This might where you can inject custom API.AI behaviors?
	api_endpoint = ASSISTANT_API_ENDPOINT

	# Create an authorized gRPC channel.
	grpc_channel = auth_helpers.create_grpc_channel(
		api_endpoint, creds
	)
	ue.log('Connecting to '+ str(api_endpoint))
	
	global assistant
	assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)
	
	global msg_queue
	msg_queue = []
	
	return 0 # Initialized Google Assistant successfully
예제 #3
0
    def init_grpc(self):
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error(
                'Run auth_helpers to initialize new OAuth2 credentials.')

        # Create gRPC channel
        grpc_channel = auth_helpers.create_grpc_channel(
            api_endpoint,
            creds,
            ssl_credentials_file=None,
            grpc_channel_options=None)
        logging.info('Connecting to %s', api_endpoint)
        # Create Google Assistant API gRPC client.
        self._assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
            grpc_channel)
예제 #4
0
    def __init__(self):
        self.api_endpoint = Assistant.ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir(common_settings.ASSISTANT_APP_NAME),
            common_settings.ASSISTANT_CREDENTIALS_FILENAME)
        self.verbose = False
        self.audio_sample_rate = common_settings.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = common_settings.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = common_settings.DEFAULT_GRPC_DEADLINE

        # Setup logging.
        logging.basicConfig(
        )  # filename='assistant.log', level=logging.DEBUG if self.verbose else logging.INFO)

        self.logger = logging.getLogger("assistant")
        self.logger.setLevel(logging.DEBUG)
        self.creds = auth_helpers.load_credentials(
            self.credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])

        # Create gRPC channel
        grpc_channel = auth_helpers.create_grpc_channel(
            self.api_endpoint, self.creds)
        self.logger.info('Connecting to %s', self.api_endpoint)
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
            grpc_channel)

        # Stores an opaque blob provided in ConverseResponse that,
        # when provided in a follow-up ConverseRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Converse()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state_bytes = None

        # Stores the current volument percentage.
        # Note: No volume change is currently implemented in this sample
        self.volume_percentage = 50
예제 #5
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        mic = Microphone()
        continue_conversation = False
        while True:
            if continue_conversation or mic.wakeup('respeaker'):
                continue_conversation = assistant.converse()
예제 #6
0
def main(*args, **kwargs):

    # init ROS
    rospy.init_node('assistant_robin_server')

    # get parameters
    api_endpoint = rospy.get_param(
        '~api_endpoint', 'embeddedassistant.googleapis.com'
    )  # Address of Google Assistant API service.
    credentials = rospy.get_param(
        '~credentials',
        os.path.join(click.get_app_dir(common_settings.ASSISTANT_APP_NAME),
                     common_settings.ASSISTANT_CREDENTIALS_FILENAME)
    )  # Path to read OAuth2 credentials.
    verbose = rospy.get_param('~verbose', False)  # Verbose logging.
    audio_sample_rate = rospy.get_param(
        '~audio_sample_rate', common_settings.DEFAULT_AUDIO_SAMPLE_RATE
    )  # Audio sample rate in hertz.
    audio_sample_width = rospy.get_param(
        '~audio_sample_width', common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
    )  # Audio sample width in bytes.
    audio_iter_size = rospy.get_param(
        '~audio_iter_size', common_settings.DEFAULT_AUDIO_ITER_SIZE
    )  # Size of each read during audio stream iteration in bytes.
    audio_block_size = rospy.get_param(
        '~audio_block_size', common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
    )  # Block size in bytes for each audio device read and write operation.
    audio_flush_size = rospy.get_param(
        '~audio_flush_size', common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
    )  # Size of silence data in bytes written during flush operation.
    grpc_deadline = rospy.get_param(
        '~grpc_deadline',
        common_settings.DEFAULT_GRPC_DEADLINE)  # gRPC deadline in seconds.

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        rospy.logerr('Error loading credentials: %s', e)
        rospy.logerr('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    rospy.logdebug('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))
    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        global ass
        ass = assistant
        s = rospy.Service('activate', Activate, handle_activate)
        rospy.loginfo("Assistant ready.")

        rospy.spin()
예제 #7
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.converse()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
예제 #8
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create gRPC channel
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)
    # Create Google Assistant API gRPC client.
    assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
    )

    # Interactive by default.
    wait_for_user_trigger = True
    # If file arguments are supplied, don't wait for user trigger.
    if input_audio_file or output_audio_file:
        wait_for_user_trigger = False

    # Stores an opaque blob provided in ConverseResponse that,
    # when provided in a follow-up ConverseRequest,
    # gives the Assistant a context marker within the current state
    # of the multi-Converse()-RPC "conversation".
    # This value, along with MicrophoneMode, supports a more natural
    # "conversation" with the Assistant.
    conversation_state_bytes = None

    # Stores the current volument percentage.
    # Note: No volume change is currently implemented in this sample
    volume_percentage = 50

    while True:

        conversation_stream.start_recording()
        logging.info('Recording audio request.')

        # This generator yields ConverseRequest to send to the gRPC
        # Google Assistant API.
        def gen_converse_requests():
            converse_state = None
            if conversation_state_bytes:
                logging.debug('Sending converse_state: %s',
                              conversation_state_bytes)
                converse_state = embedded_assistant_pb2.ConverseState(
                    conversation_state=conversation_state_bytes, )
            config = embedded_assistant_pb2.ConverseConfig(
                audio_in_config=embedded_assistant_pb2.AudioInConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                ),
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                    volume_percentage=volume_percentage,
                ),
                converse_state=converse_state)
            # The first ConverseRequest must contain the ConverseConfig
            # and no audio data.
            yield embedded_assistant_pb2.ConverseRequest(config=config)
            for data in conversation_stream:
                # Subsequent requests need audio data, but not config.
                yield embedded_assistant_pb2.ConverseRequest(audio_in=data)

        def iter_converse_requests():
            for c in gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in assistant.Converse(iter_converse_requests(),
                                       grpc_deadline):
            assistant_helpers.log_converse_response_without_audio(resp)
            if resp.error.code != code_pb2.OK:
                logging.error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                logging.info('Transcript of user request: "%s".',
                             resp.result.spoken_request_text)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                logging.info(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spoken_response_text)
            if resp.result.conversation_state:
                conversation_state_bytes = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                volume_percentage = resp.result.volume_percentage
                logging.info('Volume should be set to %s%%', volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                wait_for_user_trigger = False
                logging.info('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                wait_for_user_trigger = True
        logging.info('Finished playing assistant response.')
        conversation_stream.stop_playback()
        # If file arguments are supplied, end the conversation.
        if input_audio_file or output_audio_file:
            break
        if wait_for_user_trigger:
            break
    conversation_stream.close()
예제 #9
0
파일: __main__.py 프로젝트: SebGeek/Kenobi
def main(api_endpoint, credentials, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE]
        )
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint, creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option')
    )
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream,
                         grpc_channel, grpc_deadline) as assistant:
        global new_request
        
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        while True:
            print ('Press blue button to send a new request...')
            new_request = False
            while new_request == False:
                GPIO.output(GPIO_LED, False)
                time.sleep(0.5)
                if new_request == True:
                    break
                GPIO.output(GPIO_LED, True)
                time.sleep(0.5)

            GPIO.output(GPIO_LED, True)
            _ = assistant.converse()
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')
        # Setup logging.
        logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception as e:
            #logging.error('Error loading credentials: %s', e)
            #logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
            ue.log_error('Error loading credentials: %s', e)
            ue.log_error(
                'Run auth_helpers to initialize new OAuth2 credentials.')
            return

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(
            api_endpoint,
            creds,
            ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
            grpc_channel_options=kwargs.get('grpc_channel_option'))
        ue.log('Connecting to %s', api_endpoint)

        # Configure audio source and sink.
        audio_device = None
        if input_audio_file:
            audio_source = audio_helpers.WaveSource(
                open(input_audio_file, 'rb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_source = audio_device = (audio_device
                                           or audio_helpers.SoundDeviceStream(
                                               sample_rate=audio_sample_rate,
                                               sample_width=audio_sample_width,
                                               block_size=audio_block_size,
                                               flush_size=audio_flush_size))
        if output_audio_file:
            audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_sink = audio_device = (audio_device
                                         or audio_helpers.SoundDeviceStream(
                                             sample_rate=audio_sample_rate,
                                             sample_width=audio_sample_width,
                                             block_size=audio_block_size,
                                             flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )
예제 #11
0
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')

        # Initialize defaults
        credentials = os.path.join(
            sys.path[0], common_settings.ASSISTANT_CREDENTIALS_FILENAME)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception:
            # Maybe we didn't load the credentials yet?
            # This could happen on first run
            creds = auth_helpers.credentials_flow_interactive(
                credentials, common_settings.ASSISTANT_OAUTH_SCOPE)
            auth_helpers.save_credentials(credentials, creds)
            try:
                creds = auth_helpers.load_credentials(
                    credentials,
                    scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
            except Exception as e:
                ue.log_error('Error loading credentials: ' + str(e))
                ue.log_error(
                    'Run auth_helpers to initialize new OAuth2 credentials.')
                return

        ue.log('Begin play done!')

        # Define endpoint
        # This might where you can inject custom API.AI behaviors?
        api_endpoint = ASSISTANT_API_ENDPOINT

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(api_endpoint, creds)
        ue.log('Connecting to ' + str(api_endpoint))

        # Set up audio parameters
        audio_sample_rate = common_settings.DEFAULT_AUDIO_SAMPLE_RATE
        audio_sample_width = common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
        audio_iter_size = common_settings.DEFAULT_AUDIO_ITER_SIZE
        audio_block_size = common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        audio_flush_size = common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE

        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))

        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        ue.log('Audio device: ' + str(audio_device))

        self.assistant = SampleAssistant(conversation_stream, grpc_channel,
                                         common_settings.DEFAULT_GRPC_DEADLINE)
        self.assistant.converse()