Example #1
0
    def converse(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_converse_requests():
            for c in self.gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Converse(iter_converse_requests(),
                                            self.deadline):
            assistant_helpers.log_converse_response_without_audio(resp)

            if len(resp.audio_out.audio_data) == 0:
                print(resp)
                self.chatbox.append(resp)
                print(len(self.chatbox))
            if resp.error.code != code_pb2.OK:
                logging.error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                logging.info('Transcript of user request: "%s".',
                             resp.result.spoken_request_text)
                chatbox.append(resp.result.spoken_request_text)
                print(len(chatbox))
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                logging.info(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spoken_response_text)
            if resp.result.conversation_state:
                self.conversation_state = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                self.conversation_stream.volume_percentage = (
                    resp.result.volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
Example #2
0
  def startGH(self, entity, attribute, old, new, kwargs):

    wait_for_user = False
    while not wait_for_user:
      self.conversation_stream.start_recording()
      self.log('Recording audio request.')

      def gen_converse_requests():
        converse_state = None
        if self.conversation_state_bytes:
            converse_state = embedded_assistant_pb2.ConverseState( conversation_state=self.conversation_state_bytes)
        config = embedded_assistant_pb2.ConverseConfig(audio_in_config=embedded_assistant_pb2.AudioInConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate)), audio_out_config=embedded_assistant_pb2.AudioOutConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate), volume_percentage=self.volume_percentage), converse_state=converse_state)
        yield embedded_assistant_pb2.ConverseRequest(config=config)
        for data in self.conversation_stream:
           yield embedded_assistant_pb2.ConverseRequest(audio_in=data)

      def iter_converse_requests():
        for c in gen_converse_requests():
            assistant_helpers.log_converse_request_without_audio(c)
            yield c
        self.conversation_stream.start_playback()

      for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline):
        assistant_helpers.log_converse_response_without_audio(resp)
        if resp.error.code != code_pb2.OK:
            self.error('server error: ' + resp.error.message)
            return
        if resp.event_type == self.END_OF_UTTERANCE:
            self.log('End of audio request detected')
            self.conversation_stream.stop_recording()
        if resp.result.spoken_request_text:
            self.log('Transcript of user request: ' + resp.result.spoken_request_text)
            self.log('Playing assistant response.')
        if len(resp.audio_out.audio_data) > 0:
            self.conversation_stream.write(resp.audio_out.audio_data)
        if resp.result.spoken_response_text:
            self.log('Transcript of TTS response (only populated from IFTTT): ' + resp.result.spoken_response_text)
        if resp.result.conversation_state:
            conversation_state_bytes = resp.result.conversation_state
        if resp.result.volume_percentage != 0:
            volume_percentage = resp.result.volume_percentage
            self.log('Volume should be set to ' + str(volume_percentage))
        if resp.result.microphone_mode == self.DIALOG_FOLLOW_ON:
            wait_for_user = False
            self.log('Expecting follow-on query from user.')
        elif resp.result.microphone_mode == self.CLOSE_MICROPHONE:
            wait_for_user = True
            self.log("Closing the microphone now.")
      self.log('Finished playing assistant response.')
      self.conversation_stream.stop_playback()
    self.turn_off(entity)
Example #3
0
    def converse(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        subprocess.Popen(
            ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_converse_requests():
            for c in self.gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Converse(iter_converse_requests(),
                                            self.deadline):
            assistant_helpers.log_converse_response_without_audio(resp)
            if resp.error.code != code_pb2.OK:
                logging.error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                logging.info('Transcript of user request: "%s".',
                             resp.result.spoken_request_text)
                usr = resp.result.spoken_request_text
                if 'trigger' in str(usr):

                    if 'shut down'.lower() in str(usr).lower():
                        subprocess.Popen([
                            "aplay",
                            "/home/pi/GassistPi/sample-audio-files/Pi-Close.wav"
                        ],
                                         stdin=subprocess.PIPE,
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
                        time.sleep(10)
                        os.system("sudo shutdown -h now")
                        break
                    else:
                        for num, name in enumerate(var):
                            if name.lower() in str(usr).lower():
                                pinout = gpio[num]
                                if 'on'.lower() in str(usr).lower():
                                    GPIO.output(pinout, 1)
                                    subprocess.Popen([
                                        "aplay",
                                        "/home/pi/GassistPi/sample-audio-files/Device-On.wav"
                                    ],
                                                     stdin=subprocess.PIPE,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.PIPE)
                                elif 'off'.lower() in str(usr).lower():
                                    GPIO.output(pinout, 0)
                                    subprocess.Popen([
                                        "aplay",
                                        "/home/pi/GassistPi/sample-audio-files/Device-Off.wav"
                                    ],
                                                     stdin=subprocess.PIPE,
                                                     stdout=subprocess.PIPE,
                                                     stderr=subprocess.PIPE)
                                return continue_conversation
                else:
                    continue
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                logging.info(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spoken_response_text)
            if resp.result.conversation_state:
                self.conversation_state = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                self.conversation_stream.volume_percentage = (
                    resp.result.volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
Example #4
0
    def converse(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        print("\nTOP OF CONVERSE FUNCTION\n")
        continue_conversation = None
        while True:

            if not continue_conversation:
                afile = audio_helpers.WaveSource(open(iaud, 'rb'),
                                                 sample_rate=audsw,
                                                 sample_width=audsw)
                asink = (audio_helpers.SoundDeviceStream(sample_rate=audsr,
                                                         sample_width=audsw,
                                                         block_size=audbs,
                                                         flush_size=audfs))
                convstream = audio_helpers.ConversationStream(
                    source=afile,
                    sink=asink,
                    iter_size=auditer,
                    sample_width=audsw)
                self.conversation_stream = self.csf

            fdetect()

            self.conversation_stream.start_recording()
            logging.info('Recording audio request.')

            def iter_converse_requests():
                for c in self.gen_converse_requests():
                    assistant_helpers.log_converse_request_without_audio(c)
                    yield c
                self.conversation_stream.start_playback()

            # This generator yields ConverseResponse proto messages
            # received from the gRPC Google Assistant API.
            for resp in self.assistant.Converse(iter_converse_requests(),
                                                self.deadline):

                assistant_helpers.log_converse_response_without_audio(resp)

                if resp.error.code != code_pb2.OK:
                    logging.error('server error: %s', resp.error.message)
                    break

                if resp.event_type == END_OF_UTTERANCE:
                    logging.info('End of audio request detected')
                    self.conversation_stream.stop_recording()
                    self.conversation_stream = self.conversation_stream_mic

                if resp.result.spoken_request_text:
                    logging.info('Transcript of user request: "%s".',
                                 resp.result.spoken_request_text)
                    logging.info('Playing assistant response.')

                if len(resp.audio_out.audio_data) > 0:
                    self.conversation_stream.write(resp.audio_out.audio_data)

                if resp.result.spoken_response_text:
                    logging.info(
                        'Transcript of TTS response '
                        '(only populated from IFTTT): "%s".',
                        resp.result.spoken_response_text)

                if resp.result.conversation_state:
                    self.conversation_state = resp.result.conversation_state

                if resp.result.volume_percentage != 0:
                    self.conversation_stream.volume_percentage = (
                        resp.result.volume_percentage)

                if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                    continue_conversation = True
                    logging.info('Expecting follow-on query from user.')

                elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                    continue_conversation = False

            logging.info('Finished playing assistant response.')
            self.conversation_stream.stop_playback()