예제 #1
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code, conversation_state=b'')
            if self.conversation_state:
                dialog_state_in.conversation_state = self.conversation_state
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=dialog_state_in,
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        display_text = None
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text
        return display_text
예제 #2
0
    def assistant_triggered(self):
        while True:
            self.conversation_stream.start_recording()
            if (self.conversation_stream.read(10) != b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'):
                print(self.conversation_stream.read(10))
                logging.info('Heard Sound')

                def iter_log_assist_requests():
                    for c in self.gen_assist_requests():
                        assistant_helpers.log_assist_request_without_audio(c)
                        yield c
                    # logging.debug('Reached end of AssistRequest iteration.')

                # This generator yields AssistResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Assist(iter_log_assist_requests(),
                                                  self.deadline):
                    assistant_helpers.log_assist_response_without_audio(resp)
                    if resp.event_type == END_OF_UTTERANCE:
                        # logging.info('End of audio request detected.')
                        logging.info('Stopping recording.')
                        self.conversation_stream.stop_recording()
                        break
                    if resp.speech_results:
                        logging.info('Transcript of user request: "%s".',
                                     ' '.join(r.transcript
                                              for r in resp.speech_results))
                        # sys.stdout.flush()
                        if any(word.lower() in (' '.join(r.transcript for r in resp.speech_results)).lower() for word in hot_phrases):
                            print('message==|DTriggered')
                            sys.stdout.flush()
                            return
예제 #3
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #4
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = None
        html_response = None
        device_actions_futures = []

        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            concurrent.futures.wait(device_actions_futures)

        return text_response, html_response
예제 #5
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        #Need to start a new conversation stream to allow playback of assistant
        self.conversation_stream.start_recording()
        self.conversation_stream.stop_recording()
        #Set default language to English for the text to speech
        def iter_assist_requests():
            dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code='en-US',
                conversation_state=b''
            )
            if self.conversation_state:
                dialog_state_in.conversation_state = self.conversation_state
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=dialog_state_in,
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req
            self.conversation_stream.start_playback()

        display_text = None
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)

        
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        self.conversation_stream.close()
        return display_text
예제 #6
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        def get_assistant():
            # Create an authorized gRPC channel.
            http_request = google.auth.transport.requests.Request()
            self.credentials.refresh(http_request)
            grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
                self.credentials, http_request, ASSISTANT_API_ENDPOINT)

            _LOGGER.debug('Connecting to %s', ASSISTANT_API_ENDPOINT)
            return embedded_assistant_pb2_grpc.EmbeddedAssistantStub(
                grpc_channel)

        text_response = None
        html_response = None
        assistant = get_assistant()
        for resp in assistant.Assist(iter_assist_requests(), self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response, html_response
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            # This can be used to output the assistant request
            # assistant_helpers.log_assist_request_without_audio(req)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = None
        html_response = None
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            # This can be used to output the assistant response
            # assistant_helpers.log_assist_response_without_audio(resp)
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text(
                )
        return text_response, html_response
예제 #8
0
파일: assist.py 프로젝트: algobot76/firebot
    def assist_text(self, text_query):
        """Send a text request to the Assistant and receive text back.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = 'Google Assistant gave no response'
        html_response = None
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        if any(p in text_response.lower()
               for p in ['public ip', 'ip address', '::; 1']):
            text_response = 'I need permission to display that information'
        return text_response, html_response
예제 #9
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')
        
		#사용자 추가 액션여부
        isUserAction = False
        led.turnOn(Colors.BLUE)
        
        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
			
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                speech = (''.join(r.transcript for r in resp.speech_results)).replace(' ', '')
                logging.info('Transcript of user request: "%s".', speech)
                
                if speech in ['전등꺼', '불꺼', '꺼주세요', '빨간색', '초록색', '흰색']:
                    isUserAction = True
                
                    if speech in ['전등꺼', '불꺼', '꺼주세요']:
                        led.turnOff()
                        self.currentColor = None
                    if speech == '빨간색':
                        led.turnOn(Colors.RED)
                        self.currentColor = Colors.RED
                    if speech == '초록색':
                        led.turnOn(Colors.GREEN)
                        self.currentColor = Colors.GREEN
                    if speech == '흰색':
                        led.turnOn(Colors.WHITE)
                        self.currentColor = Colors.WHITE
                    
                    
            if len(resp.audio_out.audio_data) > 0 and isUserAction == False:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)
        
        if self.currentColor == None:
            led.turnOff()
        else:
            led.turnOn(self.currentColor)
        
        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #10
0
    def assist(self, commands=None, is_respon=True):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        moving = False
        device_actions_futures = []
        # ----
        stt_list = []
        stt_tmp = ''
        # ----
        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)

            # voice recognition end
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            # stt
            if resp.speech_results:
                stt_tmp = ' '.join(r.transcript for r in resp.speech_results)
                #logging.info('Transcript of user  request: %s.',
                #             ' '.join(r.transcript
                #                      for r in resp.speech_results))

                if commands != None:
                    for command in commands:
                        if command in stt_tmp:
                            moving = True
                            break
            # response
            if moving == False and is_respon == True:
                if len(resp.audio_out.audio_data) > 0:
                    if not self.conversation_stream.playing:
                        self.conversation_stream.stop_recording()
                        self.conversation_stream.start_playback()
                        logging.info('Playing assistant response.')
                    self.conversation_stream.write(resp.audio_out.audio_data)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state

            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage

            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()

        return continue_conversation, stt_tmp
예제 #11
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        face_flag = False

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()

            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                #logging.info('Assistant display text: "%s"',
                #             resp.dialog_state_out.supplemental_display_text)
                string = resp.dialog_state_out.supplemental_display_text
                #logging.info("output data: \"%s\"", string)
                target_string = "face recognition"
                #logging.info(string)
                if target_string in string.lower():
                    logging.info("Executing face recognition!!!!!!")
                    #os.system('ls -l /home/pi/MagicMirror/modules/third_party/FaceRecognition/')
                    #os.system('. /home/pi/MagicMirror/modules/third_party/FaceRecognition/my_import.sh')
                    #os.system('cd /home/pi/MagicMirror/modules/third_party/FaceRecognition/')
                    #os.system('ls -l')
                    #os.system('./my_recog_run.sh')
                    face_flag = True
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        if face_flag:
            face_flag = False
            logging.info("Executing face recognition!!!!!!")
            os.system('./my_recog_run.sh')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #12
0
    def assist(self, text_query=None):
        """Send a voice request to the Assistant and playback the response.

        :returns: True if conversation should continue.
        :rtype: boolean
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            if (text_query):
                config = embedded_assistant_pb2.AssistConfig(
                    audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                        encoding='LINEAR16',
                        sample_rate_hertz=16000,
                        volume_percentage=0,
                    ),
                    dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                        language_code=self.language_code,
                        conversation_state=self.conversation_state,
                        is_new_conversation=self.is_new_conversation,
                    ),
                    device_config=embedded_assistant_pb2.DeviceConfig(
                        device_id=self.device_id,
                        device_model_id=self.device_model_id,
                    ),
                    text_query=text_query,
                )
                self.is_new_conversation = False
                req = embedded_assistant_pb2.AssistRequest(config=config)
                assistant_helpers.log_assist_request_without_audio(req)
                yield req
            else:
                for c in self.gen_assist_requests():
                    assistant_helpers.log_assist_request_without_audio(c)
                    yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #13
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.

        previous_event_type = 0
        for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                print('End of audio request detected.')
                print('Stopping recording.')
                print("test")
                self.conversation_stream.stop_recording()
            if resp.speech_results:              
                if previous_event_type == END_OF_UTTERANCE:
                    print(resp.speech_results[0].transcript)
                    transcript = str(resp.speech_results[0].transcript).lower()

                    [query, itemType] = audioIsSpotifySearch(transcript)
                    if query != "":
                        data = {"query": query, "type":itemType}
                        client.publish("smartmirror/search", str(data))
                        break
                    elif transcript.find("skip song") != -1:
                        client.publish("smartmirror/skip", "")
                        break
                    elif transcript.find("pause music") != -1:
                        client.publish("smartmirror/pause", "")
                        break
                    elif transcript.find("play music") != -1:
                        client.publish("smartmirror/resume", "")
                        break


            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    print('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)
            
            previous_event_type = resp.event_type

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #14
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.
        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('무엇을 도와드릴까요? :)')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                myscript = ''.join(r.transcript for r in resp.speech_results)
                logging.info('Transcript of user request: "%s".', myscript)

                # auto_control
                if '자동 제어' in myscript:
                    client.publish("home/auto", "1")
                    return continue_conversation
                if '수동 제어' in myscript:
                    client.publish("home/auto", "0")
                    return continue_conversation

                # bathroom_control
                if '화장실 불 켜' in myscript:
                    client.publish("home/bathroom_state/led", "1")
                    return continue_conversation
                if '화장실 불 꺼' in myscript:
                    client.publish("home/bathroom_state/led", "0")
                    return continue_conversation

                # room_control
                if '방 불 켜' in myscript:
                    client.publish("home/room_state/room_led", "1")
                    return continue_conversation
                if '방 불 꺼' in myscript:
                    client.publish("home/room_state/room_led", "0")
                    return continue_conversation

                # livingroom_control
                if '거실 불 켜' in myscript:
                    client.publish("home/livingroom_state/livingroom_led", "1")
                    return continue_conversation
                if '거실 불 꺼' in myscript:
                    client.publish("home/livingroom_state/livingroom_led", "0")
                    return continue_conversation

                if '블라인드 열어' in myscript:
                    client.publish("home/livingroom_state/blind", "1")
                    return continue_conversation
                if '블라인드 닫아' in myscript:
                    client.publish("home/livingroom_state/blind", "0")
                    return continue_conversation

                if '에어컨 켜' in myscript:
                    client.publish("home/livingroom_state/aircon", "1")
                    return continue_conversation
                if '에어컨 꺼' in myscript:
                    client.publish("home/livingroom_state/aircon", "0")
                    return continue_conversation

                if '히터 켜' in myscript:
                    client.publish("home/livingroom_state/heater", "1")
                    return continue_conversation
                if '히터 꺼' in myscript:
                    client.publish("home/livingroom_state/heater", "0")
                    return continue_conversation

                if '제습기 켜' in myscript:
                    client.publish("home/livingroom_state/airdry", "1")
                    return continue_conversation
                if '제습기 꺼' in myscript:
                    client.publish("home/livingroom_state/airdry", "0")
                    return continue_conversation

                if '환풍기 켜' in myscript:
                    client.publish("home/livingroom_state/fan", "1")
                    return continue_conversation
                if '환풍기 꺼' in myscript:
                    client.publish("home/livingroom_state/fan", "0")
                    return continue_conversation

                # entrance_control
                if '차고 문 열어' in myscript:
                    client.publish("home/entrance_state/garage_door", "1")
                    return continue_conversation
                if '차고 문 닫아' in myscript:
                    client.publish("home/entrance_state/garage_door", "0")
                    return continue_conversation

                if '현관문 열어' in myscript:
                    client.publish("home/entrance_state/door", "1")
                    return continue_conversation
                if '현관문 닫아' in myscript:
                    client.publish("home/entrance_state/door", "0")
                    return continue_conversation

                if '정원 불 켜' in myscript:
                    client.publish("home/entrance_state/garden_led", "1")
                    return continue_conversation
                if '정원 불 꺼' in myscript:
                    client.publish("home/entrance_state/garden_led", "0")
                    return continue_conversation

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #15
0
    def assist(self):
        global spokenAnswerOld
        global spokenAnswer
        global followUp
        global followUpSentence
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))

                #Function to store the complete spoken text to use as input for NLP model
                self.storeAsText(resp.speech_results)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state

            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text
                spokenAnswer = display_text

            if len(resp.audio_out.audio_data) > 0:

                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()

                    self.spokenQuestion = str(self.spokenQuestion)

                    # I only want the text
                    self.spokenQuestion = re.findall(r'"(.+?)"',
                                                     self.spokenQuestion)

                    text = str(self.spokenQuestion[0]).lower()

                    # Sometimes the SDK can't recongize "Reachy"
                    matches = [
                        "reachy", "richie", "reach", "ritchie", "re", "read",
                        "rich", 'rici', 'robot', 'robots'
                    ]
                    text = "\"" + text + "\""
                    if (text != ""):
                        results = predictor.predict(sentence=text)

                        if (followUp == True):
                            if (text == "\"yes\""):
                                if (followUpSentence == "cross"):
                                    self.cross_arms(robot)
                                if (followUpSentence == "raise"):
                                    self.raise_hand(robot)

                                followUp = False
                        else:
                            #If our NLP model can classify the sentence
                            if (results["verbs"]):
                                semantic_labeling = results["verbs"][0][
                                    "description"]
                                print(semantic_labeling)
                                subject = self.find_between(
                                    semantic_labeling, "[ARG0: ", "]")
                                verb = self.find_between(
                                    semantic_labeling, "[V: ", "]")
                                part = self.find_between(
                                    semantic_labeling, "[ARG1: ", "]")
                                print(part)

                                #If we recognize a sentence to move on
                                if (verb in dict_bewegen.keys()):
                                    #If Reachy is the subject
                                    if (subject or part in matches):
                                        if (verb == "cross"):
                                            self.cross_arms(robot)

                                        if (verb == "raise"):
                                            self.raise_hand(robot)

                                    #But Reachy is not the subject...
                                    else:
                                        synthesizer.speak_text_async(
                                            "Do you want me to %s %s" %
                                            (verb, part))
                                        followUpSentence = verb
                                        followUp = True

                                #Just play the googla assistant SDK's answer
                                else:
                                    if (spokenAnswer == ""):
                                        synthesizer.speak_text_async(
                                            "I can't answer to that question, i'm sorry."
                                        )
                                    else:
                                        synthesizer.speak_text_async(
                                            str(spokenAnswer))

                        logging.info('Playing assistant response.')

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #16
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        print("Recording...")
        sys.stdout.flush()

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                print('end')
                sys.stdout.flush()
                self.conversation_stream.stop_recording()
                looptime = 0
            if resp.speech_results:
                looptime = 1
                print('~in ' + ' '.join(r.transcript
                                        for r in resp.speech_results))
                sys.stdout.flush()
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    #logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if looptime == 2:
                data = '~out ' + resp.dialog_state_out.supplemental_display_text
                dataencode = data.encode("utf-8")
                print(dataencode)
                sys.stdout.flush()
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            looptime = looptime + 1
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        #print('Finished playing assistant response')
        sys.stdout.flush()
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #17
0
    def assist(self, text_query, flag):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """

        continue_conversation = False
        device_actions_futures = []
        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')
        display_text = None

        def iter_log_assist_requests(text_query, flag):
            for c in self.gen_assist_requests(text_query, flag):
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(
                iter_log_assist_requests(text_query, flag), self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text
                print(display_text)
            if resp.speech_results:
                voice_query = ' '.join(r.transcript
                                       for r in resp.speech_results)
                print(voice_query)
                if '이게 뭐야' in voice_query:
                    # vision code
                    self.conversation_stream.stop_playback()
                    res_vision = myvision.getText(1)
                    return self.assist(res_vision, 1)
                # self.assistant.assist('a',0)
                if '이게 누구야' in voice_query:
                    # vision code
                    self.conversation_stream.stop_playback()
                    res_vision = myvision.getText(0)
                    return self.assist(res_vision, 1)

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #18
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        text_response = None
        user_input = None
        rthing = None

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            #print (resp.device_action.device_request_json)
            #print (type(resp.device_action.device_request_json))
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                user_input = ' '.join(r.transcript for r in resp.speech_results)
                print('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    
                    #self.conversation_stream.start_playback()
                    #print('Playing assistant response.')
                    #print (resp)
                #self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                print('Updating conversation state.')
                #self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                print('Setting volume to %s%%', volume_percentage)
                #self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')

            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text

            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False

            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
                    print (device_actions_futures)
            # if self.display and resp.screen_out.data:
            #     system_browser = browser_helpers.system_browser
            #     system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        #self.conversation_stream.stop_playback()
        self.conversation_stream.close()
        return {"continue" : continue_conversation, "response" : text_response, "input" : user_input}
예제 #19
0
    def assist(self, text_query):
        """Send a request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        if self.MODE is 0 or self.MODE is 1:

            def iter_log_assist_requests():
                for c in self.gen_text_assist_requests(text_query):
                    assistant_helpers.log_assist_request_without_audio(c)
                    yield c
                logging.debug('Reached end of text AssistRequest iteration.')
        elif self.MODE is 2:
            self.conversation_stream.start_recording()
            logging.info('Recording audio request.')

            def iter_log_assist_requests():
                for c in self.gen_voice_assist_requests():
                    assistant_helpers.log_assist_request_without_audio(c)
                    yield c
                logging.debug('Reached end of audio AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        text_response = None
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
                #print(str(conversation_state))
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return text_response, continue_conversation
예제 #20
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        self.conversation_stream.start_recording()
        # logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            # logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        recording = True
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            # print("message==|D" + len(resp))
            # sys.stdout.flush()
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                # logging.info('End of audio request detected.')
                # logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
                recording = False
            if resp.speech_results and recording:
                print('transcript==|D',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                sys.stdout.flush()
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    # logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
                # print('AUDIO==|D' + str(resp.audio_out.audio_data))
                # sys.stdout.flush()
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                # logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                # logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                # logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.screen_out.data:
                # print(resp.screen_out.data)
                filePath = os.path.join(os.getcwd(),'modules/MMM-GoogleAssistant/results.html')
                with open(filePath,'w') as doc:
                    try:
                        startIndex = re.search(r'<div class="popout-content" id="assistant-card-content"',resp.screen_out.data.decode('utf-8')).start()
                        endIndex = re.search(r'<div class="popout-overflow-shadow-down"',resp.screen_out.data.decode('utf-8')).start()
                        html_answer = resp.screen_out.data.decode('utf-8')[startIndex:endIndex]
                    except:
                        html_answer = resp.screen_out.data.decode('utf-8')
                    doc.write(html_answer)
                print('HTML==|D' + filePath)
                sys.stdout.flush()
                # webbrowser.open_new_tab('file:///Users/pareekravi/Desktop/results.html')
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                pass
                # system_browser = browser_helpers.system_browser
                # system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            # logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        print('message==|DFinished playing assistant response.')
        sys.stdout.flush()
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #21
0
    def assist(self):
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        GPIO.output(GPIO_LED, True)
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        GPIO.output(GPIO_LED, False)
        return continue_conversation
예제 #22
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.
        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        global dimstart
        global pin_num
        
        dimstart = 1 #Start dimming LED to signal ready status
        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')
        t1 = threading.Thread(target=leddim,args=(5,)) #Dim second pin
        t1.start()

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
                dimstart = 0 #Stop dimming
		#speech text
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                logging.info('Playing assistant response.')
		#Possible text from google
                print(resp.dialog_state_out.supplemental_display_text)
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
                print('stop conversation')
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        
        self.conversation_stream.stop_playback()
        print('stopped playback')
        #Need to close stream in order to release audio back to speech recog program
        if continue_conversation == False: self.conversation_stream.close()
        return continue_conversation
예제 #23
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

		Returns: True if conversation should continue.
		"""
        user_query = ""
        global updt_time, resp_text, mute, startMouth, mouthMove
        t2status = False

        def mouthMove():
            #~ global startMouth
            while True:
                #~ print "mouth"
                TezHead.SetMouth(0xFFF10E00)
                time.sleep(0.1)
                TezHead.SetMouth(0xFFF1F10E)
                time.sleep(0.1)
                TezHead.SetMouth(0xFFFF0E00)
                time.sleep(0.1)
                TezHead.SetMouth(0x00FF0000)
                time.sleep(0.1)

        t2 = Process(target=mouthMove)
        continue_conversation = True
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
                if mute == False and user_query != "":
                    print user_query
                    socket.emit('event-user-message', user_query)
                user_query = ""

            if resp.speech_results:
                for r in resp.speech_results:
                    user_query = r.transcript
                #~ for r in resp.speech_results:
                #~ user_query=r.transcript

            if len(resp.audio_out.audio_data) > 0 and mute == False:
                if not self.conversation_stream.playing:
                    resp.dialog_state_out.supplemental_display_text

                    self.conversation_stream.stop_recording()
                    t2.start()
                    t2status = True
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                    updt_time = time.time()

                    #mouthMove(True)https://askubuntu.com/questions/895397/microphone-is-not-working-on-ubuntu-16-04
                    mute = False

                    #print text
                #wf.write("/home/pi/hello.wav",44100,resp.audio_out.audio_data)
                #print resp.audio_out.audio_data
                self.conversation_stream.write(resp.audio_out.audio_data)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                #DialogStateOut.supplemental_display_text()
                logging.debug(
                    'Updating convhttps://askubuntu.com/questions/895397/microphone-is-not-working-on-ubuntu-16-04ersation state.'
                )
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                resp_text = resp.dialog_state_out.supplemental_display_text
                updt_time = time.time()
                #updt_time=time.time()

                if mute == False:
                    socket.emit('event-robot-message', resp_text)
                    logging.info('Resp: %s', resp_text)

            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = True
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        if t2status == True:
            print "stopped"
            time.sleep(0.5)
            t2.terminate()
            TezHead.SetMouth(0x110E00)
        logging.info('Finished playing assistant response.')

        #mute=False
        self.conversation_stream.stop_playback()
        time.sleep(0.5)

        return continue_conversation
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))


#해냈다...우리는 할 수 있어..

                if '또또야' in ' '.join(r.transcript for r in resp.speech_results):

                    if '불 켜 줄래'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('led on')
                        GPIO.output(21,True)
                        GPIO.output(20,True)
                        return continue_conversation


                    if '불 꺼 줄래'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('led off')
                        GPIO.output(21,False)
                        GPIO.output(20,False)
                        return continue_conversation


                    if '창문 닫아 줄래'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('window open')
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(0)
                        try:
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(15) 
                        except:
                          p.stop()
                          GPIO.cleanup()
                        return continue_conversation


                    if '창문 열어 줄래'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('window close')
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(15)
                        try:
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(2.5)
                        except:
                          p.stop()

                        return continue_conversation


                    if '집이야' in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('home in')
                        GPIO.output(21,True)
                        GPIO.output(20,True)
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(15)
                        try:
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(2.5)
                        except:
                          p.stop()

                        return continue_conversation


                    if '다녀올게'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('home out')
                        GPIO.output(21,False)
                        GPIO.output(20,False)
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(0)
                        try:
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(15) 
                        except:
                          p.stop()
                          GPIO.cleanup()


                        return continue_conversation


                    if '모터 돌려' in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('motor on')
                        for pin in control_pins:
                          GPIO.setup(pin, GPIO.OUT)
                          halfstep_seq = [
                          [1,0,0,0],
                          [1,1,0,0],
                          [0,1,0,0],
                          [0,1,1,0],
                          [0,0,1,0],
                          [0,0,1,1],
                          [0,0,0,1],
                          [1,0,0,1]
                        ]
                        for i in range(1024):
                          for halfstep in range(8):
                            for pin in range(4):
                              GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
                            time.sleep(0.001)
                        return continue_conversation







                if '鈴木さん' in ' '.join(r.transcript for r in resp.speech_results):

                    if '電気つけてくれ'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('led on')
                        GPIO.output(21,True)
                        GPIO.output(20,True)
                        return continue_conversation


                    if '電気けしていくれ'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('led off')
                        GPIO.output(21,False)
                        GPIO.output(20,False)
                        return continue_conversation


                    if '窓を閉めて'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('window close')
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(0)
                        try:
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(15) 
                        except:
                          p.stop()
                          GPIO.cleanup()
                        return continue_conversation


                    if '窓を開けて'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('window open')
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(15)
                        try:
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(2.5)
                        except:
                          p.stop()

                        return continue_conversation


                    if 'ただいま' in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('home in')
                        GPIO.output(21,True)
                        GPIO.output(20,True)
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(15)
                        try:
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(2.5)
                        except:
                          p.stop()

                        return continue_conversation


                    if '行ってくる'  in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('home out')
                        GPIO.output(21,False)
                        GPIO.output(20,False)
                        p = GPIO.PWM(servoPIN, 50)
                        p.start(0)
                        try:
                          p.ChangeDutyCycle(5)
                          time.sleep(1)
                          p.ChangeDutyCycle(7.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(10)
                          time.sleep(1)
                          p.ChangeDutyCycle(12.5)
                          time.sleep(1)
                          p.ChangeDutyCycle(15) 
                        except:
                          p.stop()
                          GPIO.cleanup()


                        return continue_conversation


                    if 'モーター回す' in ' '.join(r.transcript for r in resp.speech_results):
                        logging.info('motor on')
                        for pin in control_pins:
                          GPIO.setup(pin, GPIO.OUT)
                          halfstep_seq = [
                          [1,0,0,0],
                          [1,1,0,0],
                          [0,1,0,0],
                          [0,1,1,0],
                          [0,0,1,0],
                          [0,0,1,1],
                          [0,0,0,1],
                          [1,0,0,1]
                        ]
                        for i in range(1024):
                          for halfstep in range(8):
                            for pin in range(4):
                              GPIO.output(control_pins[pin], halfstep_seq[halfstep][pin])
                            time.sleep(0.001)
                        return continue_conversation




#우리는...해낼꺼야..



            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)


        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
    def assist(self):
        global spokenAnswer
        global followUp
        global followUpSentence
        global canPlay
        global couldAnswer
        couldAnswer = False
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))

                #Functie om onze zelf gestelde vraag / mededeling op te slaan in een variabele: spokentext
                self.storeAsText(resp.speech_results)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state

            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text

                #Manier om antwoord van google assistant op te vangen
                spokenAnswer = display_text
                #Enkel als hij hier geraakt is en dus een antwoord kan verzinnen wordt deze variabele op true gezet
                couldAnswer = True

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    #De TTS gaat mogen spreken...
                    canPlay = True

                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    speaking_text = ""

                    #Eigen gestelde vraag ophalen
                    self.spokenQuestion = re.findall(r'"(.+?)"',
                                                     str(self.spokenQuestion))
                    question = str(self.spokenQuestion[0]).lower()

                    #In welke taal gaat de assistant antwoorden?
                    language = detect_language(spokenAnswer)

                    #Is wat er gezegd wordt een mededeling of een vraag
                    type = selector.predict(vectorizer.transform([question]))

                    #De naam van het aanspreekpunt van de robot
                    matches = ['leonard', 'lennert']

                    #Als het geen lege audio opname is
                    if (question != ""):
                        #Laat ons SRL model onze vraag labelen
                        labeling = predictor.predict(sentence=question)

                        #Als de vraag een follow-up is van de vorige vraag
                        if (followUp == True):
                            #Als de gebruiker yes zegt
                            if (question == "yes"):
                                #Cross..
                                if (followUpSentence == "cross"):
                                    self.cross_arms(robot)
                                    #Hier zetten we canPlay op false omdat de robot anders 2 maal gaat spreken
                                    canPlay = False
                                #Raise...
                                if (followUpSentence == "raise"):
                                    self.raise_hand(robot)
                                    # Hier zetten we canPlay op false omdat de robot anders 2 maal gaat spreken
                                    canPlay = False

                                followUp = False
                                break

                            else:
                                break
                                followUp = False

                        else:
                            #Als ons SRL model structuren in de zin kan herkennen...
                            for item in labeling["verbs"]:
                                #If there is a verb in the question we want to know
                                if (item["verb"] in list_bewegen):
                                    #Labelen via SRL
                                    subject = self.find_between(
                                        item["description"], "[ARG0: ", "]")
                                    subject2 = self.find_between(
                                        item["description"], "[ARGM-DIS: ",
                                        "]")
                                    verb = self.find_between(
                                        item["description"], "[V: ", "]")
                                    part = self.find_between(
                                        item["description"], "[ARG1: ", "]")

                                    #Are we talking to the robot?
                                    if (subject in matches
                                            or subject2 in matches):
                                        if (verb == "cross"):
                                            # Als het gelabelde part in onze lijst zit
                                            if ([
                                                    ele for ele in list_parts
                                                    if (ele in part)
                                            ]):
                                                self.cross_arms(robot)

                                        if (verb == "raise"):
                                            #Als het gelabelde part in onze lijst zit
                                            if ([
                                                    ele for ele in list_parts
                                                    if (ele in part)
                                            ]):
                                                self.raise_hand(robot)
                                    else:
                                        if (str(type[0]) == "ynQuestion" or
                                                str(type[0]) == "whQuestion"):
                                            speaking_text = "Do you want me to %s %s" % (
                                                verb, part)
                                            #We willen een antwoordo op deze vraag in volgende audio
                                            followUpSentence = verb
                                            #Daarom zetten we followup op true
                                            followUp = True
                                        else:
                                            #Het is een gewone mededeling met een werkwoord die wij willen opvangen
                                            speaking_text = "I can't understand that, i'm sorry"
                                else:
                                    #Geen werkwoord dat we willen opvangen: google assistant antwoord met zijn zelf gevonden antwoord
                                    speaking_text = str(spokenAnswer)
                    else:
                        #Mocht er een lege audio opname zijn
                        speaking_text = "I can't understand that, i'm sorry"

                    #Als de assistant mag spreken
                    if (canPlay == True):
                        self.speak_message(language, speaking_text)
                        logging.info('Playing assistant response.')

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #26
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        #logging.info('assist-sjxls')
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        counter = 0
        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                resp_str = ' '.join(r.transcript for r in resp.speech_results)
                logging.info('Transcript of user request: "%s".', resp_str)
                try:
                    wiki_response = entitywiki.getWiki(resp_str)
                except:
                    wiki_response = ''
                if wiki_response is None:
                    logging.info('no wiki')
                else:
                    if len(wiki_response) > 2:
                        if (counter == 0):
                            texttospeech.synthesize_ssml(
                                "<speak> {} </speak".format(wiki_response))
                            counter += 1
                            temp_data = resp.audio_out.audio_data
                            resp.audio_out.audio_data = open('output',
                                                             'rb').read()

                            #self.conversation_stream.stop_recording()
                            #self.conversation_stream.start_playback()
                            #logging.info('Playing assistant response.')
                            #self.conversation_stream.write(resp.audio_out.audio_data)#self.conversation_stream.stop_recording()
                            #self.conversation_stream.start_playback()
                            #logging.info('My test response')

                #print(' '.join(r.transcript for r in resp.speech_results))
                #print(entitywiki.getWiki(''.join(r.transcript for r in resp.speech_results)))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            #logging.info('pointAAAAA')
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            #logging.info('pointA1A1A1')
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            #logging.info('pointB1B1B1')
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            #logging.info('pointB12B12B12')
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)
            if (counter == 1):
                break
        #logging.info('pointBBBBB')
        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        #logging.info('sssssss')
        return continue_conversation
예제 #27
0
    def assist(self, quiz, continue_quiz):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        end_of_user_request = False
        
        #If a process mpv is in running, terminate it before run another one
        if player:
            player.terminate()
            
        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
                end_of_user_request = True
            if resp.speech_results:
                # Intercepts with research on Youtube
                if end_of_user_request:
                    request = ' '.join(r.transcript
                                      for r in resp.speech_results)
                    #if we are in quiz game
                    if quiz:
                        logging.info('quiz : ' + str(quiz))
                        logging.info('continue_quiz : ' + str(continue_quiz))
                        if request.lower() == "un gland":
                            logging.debug('Gagné')
                            continue_quiz = True
                        else:
                            logging.debug('Perdu')
                            continue_quiz = True
                        logging.info('Finished playing assistant response.')
                        self.conversation_stream.stop_playback()
                        continue_conversation = True
                        return continue_conversation, quiz, continue_quiz
                    
                    #Search word from request    
                    search_response = search.word_search(request)
                    
                    if search_response:
                        #If a mpv command is called, call the mpv command
                        if search_response[1] == 'command':
                            logging.info('MPV command called : "%s"', search_response[0])
                            if search_response[0] == 'suivant':
                                player.playlist_next('weak')
                            #play_mpv_command(search_response[0])
                            logging.info('Finished playing assistant response.')
                            self.conversation_stream.stop_playback()
                            return continue_conversation, quiz, continue_quiz
                        #If a process mpv is in running, terminate it before run another one
                        #if player:
                        #    player.terminate()
                        #If a quiz game is called
                        logging.info('quiz : ' + str(quiz))
                        logging.info('continue_quiz : ' + str(continue_quiz))
                        logging.info('search_response : ' + search_response[1].lower())
                        if search_response[1] == 'quiz':
                            logging.info('Finished playing assistant response.')
                            self.conversation_stream.stop_playback()
                            quiz = True
                            if continue_quiz:
                                logging.info('Launch quiz game')                               
                                quiz_game.read_question(1)
                                quiz_game.read_reponses(1)
                                #player2 = mpv.MPV(vid=False)
                                #player2.play('/home/pi/GoogleEnv/lib/python3.5/site-packages/childassistant/quiz/animaux_celebres/debutant/audio/questions/question1.mp3')
                                #player2.wait_for_playback()
                                #player2.play('/home/pi/GoogleEnv/lib/python3.5/site-packages/childassistant/quiz/animaux_celebres/debutant/audio/reponses/reponses1.mp3')
                                #player2.wait_for_playback()
                                #player2.terminate()
                                #logging.info('Réponse : ' + response_quiz)
                                continue_quiz = False
                            else:
                                if search_response[1].lower() == "un gland":
                                    logging.debug('Gagné')
                                    playback.playback_audio_file('/home/pi/GoogleEnv/lib/python3.5/site-packages/childassistant/quiz/sons/applaude.mp3')
                                    continue_quiz = True
                                else:
                                    logging.debug('Perdu')
                                    playback.playback_audio_file('/home/pi/GoogleEnv/lib/python3.5/site-packages/childassistant/quiz/sons/huee.mp3')
                                    continue_quiz = True

                            continue_conversation = True
                            return continue_conversation, quiz, continue_quiz
                        logging.info('Finished playing assistant response.')
                        self.conversation_stream.stop_playback()
                        play_audio(search_response[0],search_response[1])
                        return continue_conversation, quiz, continue_quiz
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #28
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        def set_everloop_color(red, green, blue, white):
            color_array = bytearray()
            for x in range(0, 35):
                color_array += bytearray([red, green, blue, white])
            with open('/dev/matrixio_everloop', 'wb') as bin_file:
                bin_file.write(color_array)
            bin_file.close()

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')
        set_everloop_color(0, 0, 10, 0)  # blue

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')

                set_everloop_color(0, 0, 0, 0)  # black

                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
                set_everloop_color(0, 10, 0, 0)  # green

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                    set_everloop_color(0, 0, 0, 10)  # white
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        set_everloop_color(0, 0, 0, 0)  # black
        self.conversation_stream.stop_playback()
        return continue_conversation
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

            Returns: True if conversation should continue.
            """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        last_request = None
        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()

            if resp.speech_results:
                last_request = ' '.join(r.transcript
                                        for r in resp.speech_results)
                logging.info('Transcript of user request: "%s".', last_request)

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state

            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage

            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')

            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False

            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                logging.info('Device request: %s', device_request)
                fs = self.device_handler(device_request)

                if fs:
                    device_actions_futures.extend(fs)

            if self.display and resp.screen_out.data:

                if len(device_actions_futures):
                    # Custom device actions triggered. Ignore the response from Assistant
                    continue

                # Showing data from the assistant (for big screens)
                system_browser = browser_helpers.system_browser
                html = resp.screen_out.data.decode('utf-8')
                system_browser.display(html.encode('utf-8'))

                # # Showing formatted data for mobile devices
                # if last_request:
                #     # Showing asynchronously
                #     threading.Thread(
                #         target=self.display_preview,
                #         args=[last_request]).start()
                #     # Showing synchronously (can be slow)
                #     # self.display_preview(last_request)
                #     last_request = None

            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
                logging.info('Supplemental text: %s', text_response)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
예제 #30
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.
        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
                myscript = ''.join(r.transcript for r in resp.speech_results)
                print(myscript)
                # 핀의 번호 할당 방법 지정
                GPIO.setmode(GPIO.BCM)
                # 사용할 GPIO 핀 설정
                GPIO.setup(23, GPIO.OUT, initial=GPIO.LOW)

                if ((myscript == '불 켜') or (myscript == '켜')
                        or (myscript == '불 켜 줘')):
                    GPIO.output(23, 1)
                elif ((myscript == '불 꺼') or (myscript == '꺼')
                      or (myscript == '불 꺼 줘')):
                    GPIO.output(23, 0)

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation