Пример #1
0
def setup_assistant():

    # Load credentials.
    try:
        credentials = os.path.join(
            click.get_app_dir(common_settings.ASSISTANT_APP_NAME),
            common_settings.ASSISTANT_CREDENTIALS_FILENAME)
        global creds
        creds = auth_helpers.load_credentials(
            credentials,
            scopes=[
                common_settings.ASSISTANT_OAUTH_SCOPE,
                common_settings.PUBSUB_OAUTH_SCOPE
            ])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return -1

    # Create gRPC channel
    grpc_channel = auth_helpers.create_grpc_channel(ASSISTANT_API_ENDPOINT,
                                                    creds)
    logging.info('Connecting to %s', ASSISTANT_API_ENDPOINT)

    # Create Google Assistant API gRPC client.
    global assistant
    assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)
    return 0
Пример #2
0
    def __init__(self, conversation_stream, channel, deadline_sec,
                 conversation_stream_wav, nsrunning_nao, nsrunning_laptop,
                 nsrunning_laptop_nao, nsdevice_name, nsconversation_contd,
                 nsbehavior_proxy):
        print("enetered into contructor of assistant")
        self.running_nao = nsrunning_nao
        self.running_laptop = nsrunning_laptop
        self.running_laptop_nao = nsrunning_laptop_nao
        self.device_name = nsdevice_name
        self.conversation_contd = nsconversation_contd

        self.behavior_proxy = nsbehavior_proxy

        self.conversation_stream = None
        #self.conversation_stream = conversation_stream
        self.conversation_stream_sd = conversation_stream
        self.conversation_stream_wav = conversation_stream_wav
        self.conversation_state = None

        # Create Google Assistant API gRPC client.
        # Running from laptop or laptop connected with nao uses pb2_grpc.EmbeddedAssistantStub
        #Running in nao uses pb2.EmbeddedAssistantStub.
        if (self.running_laptop == self.device_name
                or self.running_laptop_nao == self.device_name):
            self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
                channel)
        else:
            self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
                channel)
            #self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)

        self.deadline = deadline_sec
        print("end of the assisant")
Пример #3
0
    def _create_assistant(self):
        # Create gRPC channel
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)

        self.logger.info('Connecting to %s', self.api_endpoint)
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)
Пример #4
0
 def _create_assistant(self):
     # Create gRPC channel
     grpc_channel = auth_helpers.create_grpc_channel(
         self.api_endpoint, self.creds)
     self.logger.info('Connecting to %s', self.api_endpoint)
     # Create Google Assistant API gRPC client.
     self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
         grpc_channel)
Пример #5
0
    def __init__(self):
        self.api_endpoint = ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
        # Setup logging.
        logging.basicConfig(
        )  # filename='assistant.log', level=logging.DEBUG if self.verbose else logging.INFO)
        self.logger = logging.getLogger("assistant")
        self.logger.setLevel(logging.DEBUG)

        # Load OAuth 2.0 credentials.
        try:
            with open(self.credentials, 'r') as f:
                self.credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                self.http_request = google.auth.transport.requests.Request()
                self.credentials.refresh(self.http_request)
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error('Run google-oauthlib-tool to initialize '
                          'new OAuth 2.0 credentials.')
            return

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)
        logging.info('Connecting to %s', self.api_endpoint)

        self.audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = DEFAULT_GRPC_DEADLINE

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
            self.grpc_channel)

        # Stores an opaque blob provided in ConverseResponse that,
        # when provided in a follow-up ConverseRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Converse()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state_bytes = None

        # Stores the current volument percentage.
        # Note: No volume change is currently implemented in this sample
        self.volume_percentage = 80
Пример #6
0
    def __init__(self, conversation_stream, channel, deadline_sec):
        self.conversation_stream = conversation_stream

        # Opaque blob provided in ConverseResponse that,
        # when provided in a follow-up ConverseRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Converse()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(channel)
        self.deadline = deadline_sec
Пример #7
0
    def init_grpc(self):
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error(
                'Run auth_helpers to initialize new OAuth2 credentials.')

        # Create gRPC channel
        grpc_channel = auth_helpers.create_grpc_channel(
            api_endpoint,
            creds,
            ssl_credentials_file=None,
            grpc_channel_options=None)
        logging.info('Connecting to %s', api_endpoint)
        # Create Google Assistant API gRPC client.
        self._assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
            grpc_channel)
Пример #8
0
  def initialize(self):
    self.ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
    self.END_OF_UTTERANCE = embedded_assistant_pb2.ConverseResponse.END_OF_UTTERANCE
    self.DIALOG_FOLLOW_ON = embedded_assistant_pb2.ConverseResult.DIALOG_FOLLOW_ON
    self.CLOSE_MICROPHONE = embedded_assistant_pb2.ConverseResult.CLOSE_MICROPHONE
    api_endpoint=self.ASSISTANT_API_ENDPOINT
    credentials=os.path.join(click.get_app_dir(common_settings.ASSISTANT_APP_NAME), common_settings.ASSISTANT_CREDENTIALS_FILENAME)
    verbose=False
    self.audio_sample_rate=common_settings.DEFAULT_AUDIO_SAMPLE_RATE
    self.audio_sample_width=common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
    self.audio_iter_size=common_settings.DEFAULT_AUDIO_ITER_SIZE
    self.audio_block_size=common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
    self.audio_flush_size=common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
    self.grpc_deadline=common_settings.DEFAULT_GRPC_DEADLINE

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        self.error('Error loading credentials: %s', e)
        self.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create gRPC channel
    grpc_channel = auth_helpers.create_grpc_channel(api_endpoint, creds, ssl_credentials_file="", grpc_channel_options="")
    self.log('Connecting to google')
    # Create Google Assistant API gRPC client.
    self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)

    # Configure audio source and sink.
    self.audio_device = None
    self.audio_source = self.audio_device = (self.audio_device or audio_helpers.SoundDeviceStream(sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size))
    self.audio_sink = self.audio_device = (self.audio_device or audio_helpers.SoundDeviceStream(sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size))

    # Create conversation stream with the given audio source and sink.
    self.conversation_stream = audio_helpers.ConversationStream(source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size)
    self.conversation_state_bytes = None
    self.volume_percentage = 70

    self.listen_state(self.startGH,self.args["activation_boolean"],new="on")        
    self.log("App started. now listening to Homeassistant input")
Пример #9
0
    def __init__(self):
        self.api_endpoint = Assistant.ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir(common_settings.ASSISTANT_APP_NAME),
            common_settings.ASSISTANT_CREDENTIALS_FILENAME)
        self.verbose = False
        self.audio_sample_rate = common_settings.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = common_settings.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = common_settings.DEFAULT_GRPC_DEADLINE

        # Setup logging.
        logging.basicConfig(
        )  # filename='assistant.log', level=logging.DEBUG if self.verbose else logging.INFO)

        self.logger = logging.getLogger("assistant")
        self.logger.setLevel(logging.DEBUG)
        self.creds = auth_helpers.load_credentials(
            self.credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])

        # Create gRPC channel
        grpc_channel = auth_helpers.create_grpc_channel(
            self.api_endpoint, self.creds)
        self.logger.info('Connecting to %s', self.api_endpoint)
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2.EmbeddedAssistantStub(
            grpc_channel)

        # Stores an opaque blob provided in ConverseResponse that,
        # when provided in a follow-up ConverseRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Converse()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state_bytes = None

        # Stores the current volument percentage.
        # Note: No volume change is currently implemented in this sample
        self.volume_percentage = 50
Пример #10
0
def main(api_endpoint, credentials, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE]
        )
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create gRPC channel
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint, creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option')
    )
    logging.info('Connecting to %s', api_endpoint)
    # Create Google Assistant API gRPC client.
    assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
    )

    # Interactive by default.
    wait_for_user_trigger = True
    # If file arguments are supplied, don't wait for user trigger.
    if input_audio_file or output_audio_file:
        wait_for_user_trigger = False

    # Stores an opaque blob provided in ConverseResponse that,
    # when provided in a follow-up ConverseRequest,
    # gives the Assistant a context marker within the current state
    # of the multi-Converse()-RPC "conversation".
    # This value, along with MicrophoneMode, supports a more natural
    # "conversation" with the Assistant.
    conversation_state_bytes = None

    # Stores the current volument percentage.
    # Note: No volume change is currently implemented in this sample
    volume_percentage = 50

    while True:
        if wait_for_user_trigger:
            click.pause(info='Press Enter to send a new request...')

        conversation_stream.start_recording()
        logging.info('Recording audio request.')

        # This generator yields ConverseRequest to send to the gRPC
        # Google Assistant API.
        def gen_converse_requests():
            converse_state = None
            if conversation_state_bytes:
                logging.debug('Sending converse_state: %s',
                              conversation_state_bytes)
                converse_state = embedded_assistant_pb2.ConverseState(
                    conversation_state=conversation_state_bytes,
                )
            config = embedded_assistant_pb2.ConverseConfig(
                audio_in_config=embedded_assistant_pb2.AudioInConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                ),
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                    volume_percentage=volume_percentage,
                ),
                converse_state=converse_state
            )
            # The first ConverseRequest must contain the ConverseConfig
            # and no audio data.
            yield embedded_assistant_pb2.ConverseRequest(config=config)
            for data in conversation_stream:
                # Subsequent requests need audio data, but not config.
                yield embedded_assistant_pb2.ConverseRequest(audio_in=data)

        def iter_converse_requests():
            for c in gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in assistant.Converse(iter_converse_requests(),
                                       grpc_deadline):
            assistant_helpers.log_converse_response_without_audio(resp)
            if resp.error.code != code_pb2.OK:
                logging.error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                logging.info('Transcript of user request: "%s".',
                             resp.result.spoken_request_text)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                logging.info(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spoken_response_text)
            if resp.result.conversation_state:
                conversation_state_bytes = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                volume_percentage = resp.result.volume_percentage
                logging.info('Volume should be set to %s%%', volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                wait_for_user_trigger = False
                logging.info('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                wait_for_user_trigger = True
        logging.info('Finished playing assistant response.')
        conversation_stream.stop_playback()
        # If file arguments are supplied, end the conversation.
        if input_audio_file or output_audio_file:
            break

    conversation_stream.close()
Пример #11
0
 def _make_service(self, channel):
     return embedded_assistant_pb2.EmbeddedAssistantStub(channel)