Exemple #1
0
def ticker_loop(delta_time):
    try:
        loop.stop()
        loop.run_forever()
    except Exception as e:
        ue.log_error(e)
    return True
Exemple #2
0
	def run(self):
		"""Send a voice request to the Assistant and playback the response.

		Returns: True if conversation should continue.
		"""
		continue_conversation = False

		ue_site.conversation_stream.start_recording()
		ue.log('Recording audio request.')

		# This generator yields ConverseResponse proto messages
		# received from the gRPC Google Assistant API.
		for resp in ue_site.assistant.Converse(self.gen_converse_requests(),
											self.deadline):
			# Something went wrong
			if resp.error.code != code_pb2.OK:
				ue.log_error('Server error: ' + str(resp.error.message))
				break
			
			# Detected the user is done talking
			if resp.event_type == END_OF_UTTERANCE:
				ue.log('End of audio request detected')
				ue_site.conversation_stream.stop_recording()
			
			# We parsed what the user said
			if resp.result.spoken_request_text:
				ue.log('Transcript of user request: ' +
							 str(resp.result.spoken_request_text))
							 
			# We have a response ready to play out the speakers
			if len(resp.audio_out.audio_data) > 0:
				ue_site.conversation_stream.write(resp.audio_out.audio_data)
			
			# We have an updated conversation state
			if resp.result.conversation_state:
				self.conversation_state = resp.result.conversation_state
			
			# Volume level needs to be updated
			if resp.result.volume_percentage != 0:
				ue_site.conversation_stream.volume_percentage = (
					resp.result.volume_percentage
				)
			
			# Check if user should reply
			if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
				# Expecting user to reply
				continue_conversation = True
				ue.log('Expecting follow-on query from user.')
			elif resp.result.microphone_mode == CLOSE_MICROPHONE:
				# Not expecting user to reply
				continue_conversation = False

		ue.log('Finished playing assistant response.')
		ue_site.conversation_stream.stop_playback()
		return continue_conversation
Exemple #3
0
    def write(self, buf):
        """Write bytes to the stream. Used to play audio."""

        #underflow = self._system_audio_stream.write(buf)
        #if underflow:
        #	 ue.log_warning('SoundDeviceStream write underflow (size: ' + str(len(buf)) + ')')
        try:
            self.ue_procedural_audio_wave.queue_audio(buf)
        except Exception as err:
            ue.log_error("Could not write audio to buffer! Error: " + str(err))
        return len(buf)
    def converse(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False

        self.conversation_stream.start_recording()
        ue.log('Recording audio request.')

        def iter_converse_requests():
            for c in self.gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Converse(iter_converse_requests(),
                                            self.deadline):
            assistant_helpers.log_converse_response_without_audio(resp)
            if resp.error.code != code_pb2.OK:
                ue.log_error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                ue.log('End of audio request detected')
                self.conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                ue.log('Transcript of user request: "%s".',
                       resp.result.spoken_request_text)
                ue.log('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                ue.log(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spnoken_response_text)
            if resp.result.conversation_state:
                self.conversation_state = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                self.conversation_stream.volume_percentage = (
                    resp.result.volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                ue.log('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
        ue.log('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
Exemple #5
0
def setup_assistant():
	""" This sets up the OAuth credentials for the Google Assistant. """
	
	ue.log("Initializing Google Assistant.")
	# Initialize credentials
	credentials = os.path.join(sys.path[0],
								common_settings.ASSISTANT_CREDENTIALS_FILENAME)

	# Load credentials.
	try:
		global creds
		creds = auth_helpers.load_credentials(
			credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE, common_settings.PUBSUB_OAUTH_SCOPE]
		)
	except Exception:
		# Maybe we didn't load the credentials yet?
		# This could happen on first run
		client_secret = os.path.join(sys.path[0], 'client_secrets.json')
		creds = auth_helpers.credentials_flow_interactive(client_secret, common_settings.ASSISTANT_OAUTH_SCOPE)
		auth_helpers.save_credentials(credentials, creds)
		try:
			creds = auth_helpers.load_credentials(
				credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE]
			)
		except Exception as e:
			ue.log_error('Error loading credentials: ' + str(e))
			ue.log_error('Run auth_helpers to initialize new OAuth2 credentials.')
			# Return invalid status code
			return -1
			
	# Define endpoint
	# This might where you can inject custom API.AI behaviors?
	api_endpoint = ASSISTANT_API_ENDPOINT

	# Create an authorized gRPC channel.
	grpc_channel = auth_helpers.create_grpc_channel(
		api_endpoint, creds
	)
	ue.log('Connecting to '+ str(api_endpoint))
	
	global assistant
	assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)
	
	global msg_queue
	msg_queue = []
	
	return 0 # Initialized Google Assistant successfully
Exemple #6
0
  def __init__(self, msg_queue):

    Thread.__init__(self)

    self.shutdown_flag = Event()

    # Create a new pull subscription on the given topic
    pubsub_client = pubsub.Client(project=PUBSUB_PROJECT_ID, credentials=creds)
    topic_name = 'unreal_google_assistant'
    topic = pubsub_client.topic(topic_name)

    subscription_name = 'UnrealGoogleAssistantSub'
    self.subscription = topic.subscription(subscription_name)
    try:
      self.subscription.create()
      ue.log('Subscription created')
    except Exception as e:
      ue.log_error('Subscription already exists! '+str(e))
Exemple #7
0
  def run(self):
    """ Poll for new messages from the pull subscription """

    while True:
      # pull messages
      results = self.subscription.pull(return_immediately=True)

      for ack_id, message in results:

          # convert bytes to string and slice string
          # http://stackoverflow.com/questions/663171/is-there-a-way-to-substring-a-string-in-python
          json_string = str(message.data)[3:-2]
          json_string = json_string.replace('\\\\', '')
          ue.log(json_string)

          # create dict from json string
          try:
              json_obj = json.loads(json_string)
          except Exception as e:
              ue.log_error('JSON Error: %s', e)

          # get intent from json
          intent = json_obj['intent']
          ue.log('pub/sub: ' + intent)

          # perform action based on intent
          if intent == 'move_character':
			ue.log(str(json_obj['move'])

          #elif intent == 'prime_pump_end':
          #  if PRIME_WHICH != None:
          #    ue.log('Stop priming pump ' + PRIME_WHICH)
          #    self.msg_queue.put('b' + PRIME_WHICH + 'l!') # turn off relay
          #    PRIME_WHICH = None

          #elif intent == 'make_drink':
          #  make_drink(json_obj['drink'], self.msg_queue)

      # ack received message
      if results:
        self.subscription.acknowledge([ack_id for ack_id, message in results])

      time.sleep(0.25)
 def tick(self, delta_time):
     self.timer -= delta_time
     if self.timer <= 0:
         ue.log_error('1 second elapsed !')
         self.timer = 1.0
 def tick(self, delta_time):
     self.timer -= delta_time
     if self.timer <= 0:
         ue.log_error('1 second elapsed !')
         self.timer = 1.0
 def is_grpc_error_unavailable(e):
     is_grpc_error = isinstance(e, grpc.RpcError)
     if is_grpc_error and (e.code() == grpc.StatusCode.UNAVAILABLE):
         ue.log_error('grpc unavailable error: %s', e)
         return True
     return False
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')
        # Setup logging.
        logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception as e:
            #logging.error('Error loading credentials: %s', e)
            #logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
            ue.log_error('Error loading credentials: %s', e)
            ue.log_error(
                'Run auth_helpers to initialize new OAuth2 credentials.')
            return

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(
            api_endpoint,
            creds,
            ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
            grpc_channel_options=kwargs.get('grpc_channel_option'))
        ue.log('Connecting to %s', api_endpoint)

        # Configure audio source and sink.
        audio_device = None
        if input_audio_file:
            audio_source = audio_helpers.WaveSource(
                open(input_audio_file, 'rb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_source = audio_device = (audio_device
                                           or audio_helpers.SoundDeviceStream(
                                               sample_rate=audio_sample_rate,
                                               sample_width=audio_sample_width,
                                               block_size=audio_block_size,
                                               flush_size=audio_flush_size))
        if output_audio_file:
            audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_sink = audio_device = (audio_device
                                         or audio_helpers.SoundDeviceStream(
                                             sample_rate=audio_sample_rate,
                                             sample_width=audio_sample_width,
                                             block_size=audio_block_size,
                                             flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')

        # Initialize defaults
        credentials = os.path.join(
            sys.path[0], common_settings.ASSISTANT_CREDENTIALS_FILENAME)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception:
            # Maybe we didn't load the credentials yet?
            # This could happen on first run
            creds = auth_helpers.credentials_flow_interactive(
                credentials, common_settings.ASSISTANT_OAUTH_SCOPE)
            auth_helpers.save_credentials(credentials, creds)
            try:
                creds = auth_helpers.load_credentials(
                    credentials,
                    scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
            except Exception as e:
                ue.log_error('Error loading credentials: ' + str(e))
                ue.log_error(
                    'Run auth_helpers to initialize new OAuth2 credentials.')
                return

        ue.log('Begin play done!')

        # Define endpoint
        # This might where you can inject custom API.AI behaviors?
        api_endpoint = ASSISTANT_API_ENDPOINT

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(api_endpoint, creds)
        ue.log('Connecting to ' + str(api_endpoint))

        # Set up audio parameters
        audio_sample_rate = common_settings.DEFAULT_AUDIO_SAMPLE_RATE
        audio_sample_width = common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
        audio_iter_size = common_settings.DEFAULT_AUDIO_ITER_SIZE
        audio_block_size = common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        audio_flush_size = common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE

        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))

        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        ue.log('Audio device: ' + str(audio_device))

        self.assistant = SampleAssistant(conversation_stream, grpc_channel,
                                         common_settings.DEFAULT_GRPC_DEADLINE)
        self.assistant.converse()