示例#1
0
def train_ga(persons):
    global spotify_status

    logger.info("Ask RecognizeMe to train for new person")

    spotify_status = mirror_spotify_status()
    mirror_spotify("Pause Spotify")
    stop_youtube(True)

    res_dict = ga_handler.recognize_me("meet a new friend "+ ",".join(persons))

    logger.info(res_dict)

    if "new_person" in res_dict and "response" in res_dict:
        if res_dict["response"] == "True":
            new_person = res_dict["new_person"]
            logger.info("Take pictures for {}".format(new_person))
            recognizer.build_face_dataset(new_person)
            logger.info("Extract embeddings for {}".format(new_person))
            face_trainer.extract_embeddings()
            logger.info("Train model to add {}".format(new_person))
            face_trainer.train_model()

            # reload model after training
            recognizer.reset_model()

    elif "skip_fallback" not in res_dict:
        ga_handler.call("stop")

    if spotify_status:
        mirror_spotify("Play Spotify")
示例#2
0
def stop_youtube(all=False):
    global spotify_status, keep_screen_on

    keep_screen_on = False

    mirror_youtube("false")
    if spotify_status and not all:
        mirror_spotify("Play Spotify")
示例#3
0
def play_youtube(url):
    global spotify_status, keep_screen_on

    logger.info("Play youtube on MM")
    keep_screen_on = True

    spotify_status = mirror_spotify_status()
    if spotify_status:
        mirror_spotify("Pause Spotify")

    mirror_youtube("true", url)
示例#4
0
def wake_ga(lang="en-US"):
    global spotify_status,spotify_changed

    logger.info("Wakeword detected within timeframe")

    spotify_status = mirror_spotify_status()
    if spotify_status:
        mirror_spotify("Pause Spotify")
    stop_youtube(True)

    response = ga_handler.call("ok google", lang)
    logger.info(response)

    if spotify_status and not spotify_changed:
        mirror_spotify("Play Spotify")
    def assist(self, text_query=None, language_code='en-US', display=None):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        text_response = None
        continue_conversation = False
        give_audio = True
        device_actions_futures = []
        user_response = None
        self.language_code = language_code
        if display:
            self.display = display == "True"

        def iter_log_assist_requests():
            for c in self.gen_assist_requests(text_query):
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        if text_query is None:
            self.conversation_stream.start_recording()
            logger.info('Recording audio request.')
            os.system("aplay resources/soundwav/start.wav")

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)

            if text_query is None:
                if resp.event_type == self.END_OF_UTTERANCE:
                    logger.info('End of audio request detected.')
                    logger.info('Stopping recording.')
                    mirror_call("user_reply", user_response)

                    if mirror_spotify(user_response, True):
                        return False, "None"

                    self.conversation_stream.stop_recording()
                if resp.speech_results:
                    user_response = ' '.join(r.transcript
                                             for r in resp.speech_results)
                    logger.info('Transcript of user request: "%s".',
                                user_response)
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text

            if text_response is not None:

                s = text_response.split(";")
                res_dct = {
                    x.split(":")[0]: x.split(":")[1]
                    for x in s if ":" in x
                }

                if "RecognizeMe_talk" in res_dct:
                    if res_dct["RecognizeMe_talk"] == "False":
                        give_audio = False
                    else:
                        give_audio = True

            if len(resp.audio_out.audio_data) > 0 and give_audio:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logger.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logger.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == self.DIALOG_FOLLOW_ON:
                continue_conversation = True
                logger.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == self.CLOSE_MICROPHONE:
                continue_conversation = False
            # if resp.device_action.device_request_json:
            #     device_request = json.loads(
            #         resp.device_action.device_request_json
            #     )
            #     fs = self.device_handler(device_request)
            #     if fs:
            #         device_actions_futures.extend(fs)
            if resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logger.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logger.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()

        if text_response:
            logger.info(text_response)

        return continue_conversation, text_response