Exemple #1
0
    def __init__(self, language_code, device_model_id, device_id,
                 conversation_stream, display,
                 channel, deadline_sec, device_handler):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.conversation_stream = conversation_stream
        self.display = display
        if GPIOcontrol:
            self.t3 = Thread(target=self.stopbutton)
            self.t3.start()
        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel
        )
        self.deadline = deadline_sec

        self.device_handler = device_handler
Exemple #2
0
    def assist(self, text_query):

        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, ASSISTANT_API_ENDPOINT)
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)

        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code='en-US',
                    conversation_state=None,
                    is_new_conversation=True,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id='5a1b2c3d4',
                    device_model_id='assistant',
                ),
                text_query=text_query)
            req = embedded_assistant_pb2.AssistRequest(config=config)
            yield req

        [
            resp for resp in self.assistant.Assist(iter_assist_requests(),
                                                   GRPC_DEADLINE)
        ]
Exemple #3
0
 def __init__(self,
              language_code,
              device_model_id,
              device_id,
              cred_json: Path,
              display=True,
              deadline_sec=DEFAULT_GRPC_DEADLINE):
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_state = None
     # Force reset of first conversation.
     self.is_new_conversation = True
     self.display = display
     # open credentials
     with open(cred_json, 'r') as _file:
         credentials = google.oauth2.credentials.Credentials(
             token=None, **json.load(_file))
         http_request = google.auth.transport.requests.Request()
         credentials.refresh(http_request)
     # Create an authorized gRPC channel.
     grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
         credentials, http_request, ASSISTANT_API_ENDPOINT)
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         grpc_channel)
     self.deadline = deadline_sec
Exemple #4
0
    def __init__(self, language_code, device_model_id, device_id,
                 conversation_stream, display, channel, deadline_sec,
                 device_handler):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.conversation_stream = conversation_stream
        self.display = display

        self.END_OF_UTTERANCE = embedded_assistant_pb2.AssistResponse.END_OF_UTTERANCE
        self.DIALOG_FOLLOW_ON = embedded_assistant_pb2.DialogStateOut.DIALOG_FOLLOW_ON
        self.CLOSE_MICROPHONE = embedded_assistant_pb2.DialogStateOut.CLOSE_MICROPHONE
        self.PLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel)
        self.deadline = deadline_sec

        self.device_handler = device_handler
Exemple #5
0
    def __init__(self, language_code, device_model_id, device_id,
                 conversation_stream, display, channel, deadline_sec,
                 device_handler):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.conversation_stream = conversation_stream
        self.display = display

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        self.is_new_conversation = False
        self.said_keyword = False
        self.said_emotion = ''
        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel)
        self.deadline = deadline_sec

        self.device_handler = device_handler
Exemple #6
0
 def __init__(self, language_code, device_model_id, device_id, channel):
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_state = None
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         channel)
Exemple #7
0
    def __init__(self, language_code='en-US', volume_percentage=100):
        self._volume_percentage = volume_percentage  # Mutable state.
        self._conversation_state = None              # Mutable state.
        self._language_code = language_code

        ##
        credentials = auth_helpers.get_assistant_credentials()
        device_model_id, device_id = device_helpers.get_ids_for_service(credentials)

        logger.info('device_model_id: %s', device_model_id)
        logger.info('device_id: %s', device_id)

        http_request = google.auth.transport.requests.Request()
        try:
            credentials.refresh(http_request)
        except Exception as e:
            raise RuntimeError('Error loading credentials: %s', e)

        api_endpoint = ASSISTANT_API_ENDPOINT
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            credentials, http_request, api_endpoint)
        logger.info('Connecting to %s', api_endpoint)
        ##

        self._assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)
        self._device_config = embedded_assistant_pb2.DeviceConfig(
            device_model_id=device_model_id,
            device_id=device_id)
    def __init__(self,
                 secret,
                 language_code="it-IT",
                 device_model_id="cult-robot-telegram",
                 device_id="cult-robot-telegram-app",
                 deadline_sec=185):
        try:
            #credentials, project = google.auth.default(scopes=['https://www.googleapis.com/auth/assistant-sdk-prototype'])

            credentials = google.oauth2.credentials.Credentials(
                token=None,
                token_uri=secret.get('token_uri'),
                client_id=secret.get('client_id'),
                client_secret=secret.get('client_secret'),
                refresh_token=secret.get('refresh_token'))
            #credentials = google.oauth2.credentials.Credentials(token=None, token_uri=secret.get('token_uri'), client_id=secret.get('client_id'), client_secret=secret.get('client_secret'))
            http_request = google.auth.transport.requests.Request()
            #credentials.refresh(http_request)
            grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
                credentials, http_request, 'embeddedassistant.googleapis.com')
            self.device_model_id = device_model_id
            self.device_id = device_id
            self.conversation_state = None
            self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
                grpc_channel, )
            self.deadline = deadline_sec
            self.language_code = language_code
            self.ready = True
        except Exception as e:
            self.ready = False
            print(e)
Exemple #9
0
    def __init__(self,
                 language_code,
                 device_model_id,
                 device_id,
                 conversation_stream,
                 channel,
                 deadline_sec,
                 device_handler,
                 on_conversation_start=None,
                 on_conversation_end=None,
                 on_speech_recognized=None):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.conversation_stream = conversation_stream

        self.on_conversation_start = on_conversation_start
        self.on_conversation_end = on_conversation_end
        self.on_speech_recognized = on_speech_recognized

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel)
        self.deadline = deadline_sec

        self.device_handler = device_handler
Exemple #10
0
    def __init__(self, config):
        """
        Google Assistant integration.
        This class manages the connection to the Google Assistant API
        """

        self.language = "en-US"
        self.deviceID = ""
        self.modelID = ""
        self.loadConfig(config)

        f = open(CREDENTIALS_PATH, "r")
        self.credentials = google.oauth2.credentials.Credentials(
            token=None, **json.load(f))
        f.close()

        try:
            req = google.auth.transport.requests.Request()
            self.credentials.refresh(req)
        except:
            self.logger.error(
                "Google Cloud API credentials validation failed.")

        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, req, 'embeddedassistant.googleapis.com')

        self.conversation_state = None
        self.gassistInstance = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)
Exemple #11
0
    def __init__(
        self,
        device_id,
        conversation_stream,
        channel,
    ):
        self.language_code = LANGUAGE_CODE
        self.device_model_id = DEVICE_MODEL_ID
        self.display = False
        self.device_id = device_id
        self.conversation_stream = conversation_stream

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)
        self.deadline = DEFAULT_GRPC_DEADLINE
Exemple #12
0
    def __init__(self, language_code, device_model_id, device_id, channel,
                 deadline_sec):
        self.language_code = language_code
        self.device_model_id = device_model_id
        self.device_id = device_id
        self.sample_rate = 16000
        self.volume_percentage = 100

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel)
        self.deadline = deadline_sec

        self.player = Player()
        self.playing = False

        self.listening = False
        self.audio_queue = queue.Queue()

        self.volume_percentage = 100

        self.done = False
        self.listening_event = threading.Event()
        self.thread = None
 def __create_assistant(self):
     self.channel = google.auth.transport.grpc.secure_authorized_channel(
         self.credential, self.http_request,
         GoogleAssistant.ASSISTANT_API_ENDPOINT)
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         self.channel)
     # To enable conversation with Google Assitant
     self.conversationStateBytes = None
     self.isNewConversation = True  # Whenever API client is created, must be
Exemple #14
0
def gassist(text_query, lang_code='en-US'):
    logging.info(text_query)
    # Load OAuth 2.0 credentials.
    try:
        with open(Config.CREDENTIALS, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            session = requests.Session()
            http_request = google.auth.transport.requests.Request(session)
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials', exc_info=True)
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, ASSISTANT_API_ENDPOINT)
    # Create an assistant.
    assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)

    def assist(text_query):
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=lang_code,
                    conversation_state=None,
                    is_new_conversation=True,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=Config.DEVICE_ID,
                    device_model_id=Config.DEVICE_MODEL_ID,
                ),
                text_query=text_query,
            )
            req = embedded_assistant_pb2.AssistRequest(config=config)
            yield req

        text_response = None
        html_response = None
        for resp in assistant.Assist(iter_assist_requests(),
                                     DEFAULT_GRPC_DEADLINE):
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response, html_response

    text, html = assist(text_query)
    logging.info(text)
    grpc_channel.close()
    session.close()
    return text
Exemple #15
0
 def __init__(self, language_code, device_model_id, device_id, display, channel, deadline_sec):
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_state = None
     self.is_new_conversation = True
     self.display = display
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)
     self.deadline = deadline_sec
Exemple #16
0
    def _create_assistant(self):
        # Create gRPC channel
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)
        self.logging.info('Connecting to %s', self.api_endpoint)

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)
Exemple #17
0
    def connect(self):
        creds, reqs = getcredentials(self.cfg.credentials_file)
        # Create an authorized gRPC channel.
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            creds, reqs, ASSISTANT_API_ENDPOINT)
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            grpc_channel)

        logging.info(f"Connecting to {ASSISTANT_API_ENDPOINT}")
Exemple #18
0
        def get_assistant():
            # Create an authorized gRPC channel.
            http_request = google.auth.transport.requests.Request()
            self.credentials.refresh(http_request)
            grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
                self.credentials, http_request, ASSISTANT_API_ENDPOINT)

            _LOGGER.debug('Connecting to %s', ASSISTANT_API_ENDPOINT)
            return embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
                grpc_channel)
 def __init__(self, channel):
     self.language_code = 'en-US'
     self.device_model_id = 'unity-chatbot-24f7a'
     self.device_id = 'unity-chatbot-24f7a-unity-chatbot-c3ln9f'
     self.conversation_state = None
     # Force reset of first conversation.
     self.is_new_conversation = True
     self.display = False
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         channel)
     self.deadline = 60 * 3 + 5
Exemple #20
0
 def __init__(
         self, language_code, device_model_id, device_id, conversation_stream, channel, deadline_sec, device_handler
 ):
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_stream = conversation_stream
     self.conversation_state = None
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)
     self.deadline = deadline_sec
     self.device_handler = device_handler
 def __init__(self, language_code, device_model_id, device_id, channel,
              deadline_sec, audio_priority, volume):
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_state = None
     # Force reset of first conversation.
     self.is_new_conversation = True
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         channel)
     self.deadline = deadline_sec
     self.audio_priority = audio_priority
     self.volume = volume
Exemple #22
0
 def __init__(self, language_code, device_model_id, device_id,
              display, channel, deadline_sec):
     device_model_id = 'gestures-ba928-desktoppc-6wh4mk '
     device_id = 'gestures-ba928'
     self.language_code = language_code
     self.device_model_id = device_model_id
     self.device_id = device_id
     self.conversation_state = None
     # Force reset of first conversation.
     self.is_new_conversation = True
     self.display = display
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         channel
     )
     self.deadline = deadline_sec
Exemple #23
0
    def __init__(self, lab, language_code='en-US', deadline_sec=grpc_deadline):

        self.conversation_state = None

        # Create Google Assistant API gRPC client.
        self.deadline = deadline_sec
        self.logger = logging.getLogger("fermi")
        self.quiet = True
        self.language_code = language_code

        # Define the labwork
        self.lab = lab


        # Load OAuth 2.0 credentials.
        credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'),
                                   'credentials.json')
        try:
            with open(credentials, 'r') as f:
                credentials = google.oauth2.credentials.Credentials(token=None,
                                                                    **json.load(f))
                http_request = google.auth.transport.requests.Request()
                credentials.refresh(http_request)
        except Exception as e:
            logging.error('Error loading credentials: %s', e)
            logging.error('Run google-oauthlib-tool to initialize '
                          'new OAuth 2.0 credentials.')
            sys.exit(-1)
            
        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True
    
        # Create an authorized gRPC channel.
        grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            credentials, http_request, api_endpoint)
        self.logger.info('Connecting to %s', api_endpoint)
    
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)
Exemple #24
0
    def __init__(self, credentials, language_code, commands, device_model_id,
                 device_id):
        """ Initializer
        
        :param credentials: credentials filename. Should be defined in file config.txt
        :param language_code: language code e.g. en-US
        :param commands: dictionary of commands for the language
        :param device_model_id: model ID of the registered device
        :param device_id: ID or the registered device
        """
        self.language_code = language_code
        self.commands = commands.values()
        self.device_model_id = device_model_id
        self.device_id = device_id

        try:
            with open(credentials, 'r') as f:
                c = google.oauth2.credentials.Credentials(token=None,
                                                          **json.load(f))
                http_request = google.auth.transport.requests.Request()
                c.refresh(http_request)
                self.channel = google.auth.transport.grpc.secure_authorized_channel(
                    c, http_request, ASSISTANT_API_ENDPOINT)
        except Exception as e:
            logging.debug("Cannot connect to Google API")
            raise e

        try:
            audio_source = SoundDeviceStream()
            audio_sink = None
            self.conversation_stream = ConversationStream(
                audio_source, audio_sink)
        except Exception as e:
            raise e

        self.conversation_state = None
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.channel)
        self.GRPC_DEADLINE = TIMEOUT
        self.text_listeners = []
        self.start_conversation_listeners = []
        self.stop_conversation_listeners = []
        self.run_assistant = False
 def __init__(self):
     root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
     self.language_code = 'en-US'
     self.device_config_path = os.path.join(root_dir, 'device_config.json')
     self.device_credentials_path = os.path.join(root_dir,
                                                 'credentials.json')
     self._set_credentials()
     self._load_device_config()
     self._create_conversation_stream()
     self.display = False
     self._set_http_request()
     self._create_gprc_channel()
     self.conversation_state = None
     self.is_new_conversation = True
     self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
         self.channel)
     self.deadline = 60 * 3 + 5
     self.device_handler = device_helpers.DeviceRequestHandler(
         self.device_id)
Exemple #26
0
 def _make_service(self, channel):
     return embedded_assistant_pb2_grpc.EmbeddedAssistantStub(channel)
def assist(handler_input: HandlerInput, text_query: str) -> Response:
    _logger.info('Input to be processed is: %s', text_query)

    # Get constants
    api_endpoint = data.GOOGLE_ASSISTANT_API['api_endpoint']
    deadline_sec = data.DEFAULT_GRPC_DEADLINE

    # Create an authorized gRPC channel.
    credentials = skill_helpers.get_credentials(handler_input)
    http_request = Request()
    grpc_channel = secure_authorized_channel(credentials, http_request,
                                             api_endpoint)
    _logger.info('Connecting to %s', api_endpoint)

    # Create Assistant stub
    assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)

    # Initial state
    text_response = None
    mic_open = False

    # Open the response PCM file in which we are going to stream Assistant's response
    fp = open(data.RESPONSE_PCM_FILE, 'wb')

    # Init WAVE file parser
    wavep = wave.open(fp, 'wb')
    wavep.setsampwidth(data.DEFAULT_AUDIO_SAMPLE_WIDTH)
    wavep.setnchannels(1)
    wavep.setframerate(data.DEFAULT_AUDIO_SAMPLE_RATE)

    # The magic happens
    for resp in assistant.Assist(
            _iter_assist_requests(handler_input, text_query), deadline_sec):
        if len(resp.audio_out.audio_data) > 0:
            _logger.info('Playing assistant response.')
            buf = resp.audio_out.audio_data
            buf = audio_helpers.align_buf(buf, data.DEFAULT_AUDIO_SAMPLE_WIDTH)
            wavep.writeframes(buf)
        if resp.dialog_state_out.conversation_state:
            conversation_state = resp.dialog_state_out.conversation_state
            conversation_state = list(
                conversation_state) if conversation_state is not None else None
            _logger.debug('Updating conversation state.')
            skill_helpers.set_session_attribute(handler_input,
                                                'conversation_state',
                                                conversation_state)
        if resp.dialog_state_out.microphone_mode == _DIALOG_FOLLOW_ON:
            mic_open = True
            _logger.info('Expecting follow-on query from user.')
        elif resp.dialog_state_out.microphone_mode == _CLOSE_MICROPHONE:
            mic_open = False
        if resp.dialog_state_out.supplemental_display_text:
            text_response = resp.dialog_state_out.supplemental_display_text
            _logger.info('Supplemental display text: %s', text_response)

    _logger.info('Finished playing assistant response.')

    # TODO: info on audio file, error if response is empty
    wavep.close()
    fp.close()

    # Encode Assistant's response in an MP3 we can stream to Alexa
    audio_helpers.encode_from_pcm_to_mp3(data.RESPONSE_PCM_FILE,
                                         data.RESPONSE_MP3_FILE)

    # S3 bucket
    bucket = os.environ['S3_BUCKET']
    key = skill_helpers.get_device_id(handler_input)

    # Upload the response MP3 to the bucket
    _s3.upload_file(data.RESPONSE_MP3_FILE, Bucket=bucket, Key=key)

    # Generate a short-lived signed url to the MP3
    params = {'Bucket': bucket, 'Key': key}
    url = _s3.generate_presigned_url(ClientMethod='get_object',
                                     Params=params,
                                     ExpiresIn=10)
    url = escape(url)

    # Create Alexa response
    response_builder = handler_input.response_builder
    response_builder.speak(f'<audio src="{url}"/>')
    if text_response:
        response_builder.set_card(
            SimpleCard(title='Google Assistant', content=text_response))
    response_builder.set_should_end_session(not mic_open)

    return response_builder.response
Exemple #28
0
    def __init__(self):
        self.language_code = "en-US" 
        self.device_model_id = "iot-a2-275604-iot-a3-yx3rus"
        self.device_id = DEVICE_ID
        self.display = None

        self.userSpeech = ""
        
        device_handler = device_helpers.DeviceRequestHandler(self.device_id) 

        credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json')

        # Load OAuth 2.0 credentials.
        try:
            with open(credentials, 'r') as f:
                credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))
                http_request = google.auth.transport.requests.Request()
                credentials.refresh(http_request)
        except Exception as e:
            print('Error loading credentials: %s', e)
            print('Run google-oauthlib-tool to initialize '
                    'new OAuth 2.0 credentials.')
            sys.exit(-1)

        channel = google.auth.transport.grpc.secure_authorized_channel(credentials, 
        http_request, 'embeddedassistant.googleapis.com')
        print('Connecting to %s', 'embeddedassistant.googleapis.com')


        audio_device = None

        audio_source = audio_device = (
                audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
                sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
                block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
                flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
            )
        )

        audio_sink = audio_device = (
                audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
                sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
                block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
                flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
            )
        )

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
            sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
        )

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None
        # Force reset of first conversation.
        self.is_new_conversation = True

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            channel
        )
        self.deadline = 60 * 3 + 5
Exemple #29
0
    def __init__(self):

        self.api_endpoint = ASSISTANT_API_ENDPOINT
        self.credentials = os.path.join(
            click.get_app_dir('google-oauthlib-tool'), 'credentials.json')
        self.project_id = 'tpvsmartdemo'
        self.device_model_id = 'tpvsmartdemo-googlepi-7rwwpw'
        self.device_config = os.path.join(
            click.get_app_dir('googlesamples-assistant'), 'device_config.json')
        self.language_code = 'en-US'
        self.audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE
        self.audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH
        self.audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE
        self.audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        self.audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
        self.grpc_deadline = DEFAULT_GRPC_DEADLINE
        #self.device_handler = device_handler
        #self.device_id = device_id
        #self.conversation_stream = conversation_stream

        # Setup self.logging.
        logging.basicConfig()  #if verbose else self.logging.INFO)
        self.logging = logging.getLogger('ASSISTANT')
        self.logging.setLevel(logging.DEBUG)

        # Opaque blob provided in AssistResponse that,
        # when provided in a follow-up AssistRequest,
        # gives the Assistant a context marker within the current state
        # of the multi-Assist()-RPC "conversation".
        # This value, along with MicrophoneMode, supports a more natural
        # "conversation" with the Assistant.
        self.conversation_state = None

        self.current_state = "Online"

        # Load OAuth 2.0 credentials.
        try:
            with open(self.credentials, 'r') as f:
                self.credentials = google.oauth2.credentials.Credentials(
                    token=None, **json.load(f))
                self.http_request = google.auth.transport.requests.Request()
                self.credentials.refresh(self.http_request)
        except Exception as e:
            self.logging.error('Error loading credentials: %s', e)
            self.logging.error('Run google-oauthlib-tool to initialize '
                               'new OAuth 2.0 credentials.')
            return

        # Create an authorized gRPC channel.
        self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
            self.credentials, self.http_request, self.api_endpoint)
        self.logging.info('Connecting to %s', self.api_endpoint)

        # Create Google Assistant API gRPC client.
        self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
            self.grpc_channel)

        try:
            with open(self.device_config) as f:
                self.device = json.load(f)
                self.device_id = self.device['id']
                self.device_model_id = self.device['model_id']
                self.logging.info("Using device model %s and device id %s",
                                  self.device_model_id, self.device_id)
        except Exception as e:
            self.logging.warning('Device config not found: %s' % e)
            self.logging.info('Registering device')
            if not self.device_model_id:
                self.logging.error('Option --device-model-id required '
                                   'when registering a device instance.')

            if not self.project_id:
                self.logging.error('Option --project-id required '
                                   'when registering a device instance.')

            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (self.api_endpoint, self.project_id))
            self.device_id = str(uuid.uuid1())
            payload = {
                'id': self.device_id,
                'model_id': self.device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            self.session = google.auth.transport.requests.AuthorizedSession(
                self.credentials)
            r = self.session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                self.logging.error('Failed to register device: %s', r.text)

            self.logging.info('Device registered: %s', self.device_id)
            pathlib.Path(os.path.dirname(
                self.device_config)).mkdir(exist_ok=True)
            with open(self.device_config, 'w') as f:
                json.dump(payload, f)

        device_handler = device_helpers.DeviceRequestHandler(self.device_id)
        self.logging.info('Init Google Assistant Success')
        self.current_state = "Online"
        self.user_request = ""
        self.update_state("Online")
        self.update_data("")
def main(input_audio_file,output_audio_file):
    """File based sample for the Google Assistant API.

    Examples:
      $ python -m audiofileinput -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
    credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'),'credentials.json')
    
    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Create gRPC stubs
    assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)
    
    audio = pyaudio.PyAudio()
    stream_out = audio.open(
        format=audio.get_format_from_width(2),
        channels=1,
        rate=16000, input=False, output=True)
    stream_out.start_stream()    
    # Generate gRPC requests.
    def gen_assist_requests(input_stream):
        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
            language_code=lang,
            conversation_state=b''
        )
        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
                volume_percentage=100,
            ),
            dialog_state_in=dialog_state_in,
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=device_id,
                device_model_id=device_model_id,
            )
        )
        # Send first AssistRequest message with configuration.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        while True:
            # Read user request from file.
            data = input_stream.read(block_size)
            if not data:
                break
            # Send following AssitRequest message with audio chunks.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)

    for resp in assistant.Assist(gen_assist_requests(input_audio_file),
                                 grpc_deadline):
        # Iterate on AssistResponse messages.
        if resp.event_type == END_OF_UTTERANCE:
            logging.info('End of audio request detected')
        if resp.speech_results:
            ts = ' '.join(r.transcript for r in resp.speech_results)
            logging.info('Transcript of user request: "%s".',ts)  
                            
        if resp.dialog_state_out.supplemental_display_text:
            logging.info('Assistant display text: "%s"',
                         resp.dialog_state_out.supplemental_display_text)
            s = socket.socket()         # creat socket
            s.connect((host, port))     # connect serve
            s.send(('I :'+ ts +'<br/>Robot :'+resp.dialog_state_out.supplemental_display_text).encode('UTF-8'))          # recieve data
            s.close()                  # Close the connection
        if len(resp.audio_out.audio_data) > 0:
            #Write assistant response to supplied file.
            #output_audio_file.write(resp.audio_out.audio_data)
            stream_out.write(resp.audio_out.audio_data)
        if resp.device_action.device_request_json:
            device_request = json.loads(resp.device_action.device_request_json)
            logging.info('Device request: %s', device_request)
    stream_out.stop_stream()
    stream_out.close()
    audio.terminate()