def get_response(self, state: State) -> ResponseGeneratorResult:

        # Get the cur_entity
        cur_entity = self.state_manager.current_state.entity_tracker.cur_entity
        if cur_entity is None:
            return emptyResult(state)

        topic = cur_entity.common_name  # lowercase
        logger.primary_info(
            'Chose this topic for Showerthoughts: {}'.format(topic))
        thread = self.get_showerthoughts_result(state, topic)
        if thread:
            logger.primary_info(
                'Chose this ShowerThought thread: {}'.format(thread))
            return ResponseGeneratorResult(
                text=random.choice(INFORM_SHOWER_THOUGHTS).format(
                    thread['title']),
                priority=ResponsePriority.CAN_START,
                needs_prompt=True,
                state=state,
                cur_entity=cur_entity,
                conditional_state=ConditionalState(
                    used_thread_id=thread['threadId']))

        # If we found nothing, return empty response
        return emptyResult(state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        # If the user hasn't been asked whether they want to exit, look for closing intent
        if not state['has_just_asked_to_exit']:

            # If closing intent is detected, closing confirmation RG should respond
            if self.user_trying_to_stop():
                return ResponseGeneratorResult(text=random.choice(CLOSING_CONFIRMATION_QUESTION),
                                               priority=ResponsePriority.FORCE_START, needs_prompt=False, state=state,
                                               cur_entity=None, conditional_state={'has_just_asked_to_exit': True})
            else:
                return emptyResult(state)

        # If the user has been asked a confirmation question, handle their response
        else:

            # If the user wants to continue talking, request prompt and continue
            if self.state_manager.current_state.dialog_act['is_no_answer'] or \
                ClosingNegativeConfirmationTemplate().execute(self.state_manager.current_state.text) is not None:
                return ResponseGeneratorResult(text=random.choice(CLOSING_CONFIRMATION_CONTINUE),
                                               priority=ResponsePriority.STRONG_CONTINUE, needs_prompt=True,
                                               state=state, cur_entity=None,
                                               conditional_state={'has_just_asked_to_exit': False})

            # If the user wants to end the conversation, exit
            if self.state_manager.current_state.dialog_act['is_yes_answer'] or \
                ClosingPositiveConfirmationTemplate().execute(self.state_manager.current_state.text) is not None:
                return ResponseGeneratorResult(text=CLOSING_CONFIRMATION_STOP,
                                               priority=ResponsePriority.STRONG_CONTINUE, needs_prompt=False,
                                               state=state, cur_entity=None,
                                               conditional_state={'has_just_asked_to_exit': False})

            # If neither matched, allow another RG to handle
            return emptyResult(state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:

        # Don't run if the cur entity exists (isn't None) and is a non-category entity
        cur_entity = self.state_manager.current_state.entity_tracker.cur_entity
        NON_CATEGORY_ENTITY = cur_entity is not None and not cur_entity.is_category
        if NON_CATEGORY_ENTITY:
            logger.info("entity_tracker.cur_entity exists and is not a category, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # Don't run if LAUNCH RG is the currently active RG or if this is the first turn
        LAUNCH_ACTIVE = (self.state_manager.last_state_active_rg == 'LAUNCH' and self.state_manager.last_state.response_generator_states['LAUNCH'].next_treelet) or len(self.state_manager.current_state.history)<=1
        if LAUNCH_ACTIVE:
            logger.info("LAUNCH RG active, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # Don't run if OPINION, NEURAL_CHAT, CATEGORIES is currently active
        if self.state_manager.last_state_active_rg in {'OPINION', 'NEURAL_CHAT', 'CATEGORIES'}:
            logger.info("self.state_manager.last_state_active_rg RG active, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # If we haven't already run gpt2ed in the NLP pipeline, run it now, and save it in the current state
        if not hasattr(self.state_manager.current_state, 'gpt2ed'):
            default_gpt2ed_output = GPT2ED(self.state_manager).execute()
            setattr(self.state_manager.current_state, 'gpt2ed', default_gpt2ed_output)

        # Choose the best response and return it
        neural_fallback = get_random_fallback_neural_response(self.state_manager.current_state)
        if neural_fallback:
            return ResponseGeneratorResult(text=neural_fallback, priority=ResponsePriority.UNIVERSAL_FALLBACK,
                                           needs_prompt=True, state=state,
                                           cur_entity=self.state_manager.current_state.entity_tracker.cur_entity,
                                           conditional_state={'used_neural_fallback_response': True})
        else:
            return emptyResult(state=state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        current_state = self.state_manager.current_state
        cur_entity = current_state.entity_tracker.cur_entity

        # If the cur_entity isn't a non-None entity initiated by the user on this turn, do nothing
        if not current_state.entity_tracker.cur_entity_initiated_by_user_this_turn(current_state):
            logger.primary_info(f'cur_entity {cur_entity} is not a non-None entity initiated by the user on this turn, so '
                        f'Acknowledgment RG is doing nothing')
            return emptyResult(state)

        # Don't acknowledge entities that OPINION has Twitter opinions on (to avoid contradiction)
        if cur_entity.name in opinionable_entity_names:
            logger.primary_info(f'Opinion RG has Twitter opinions for cur_entity {cur_entity}, so Acknowledgment RG is doing nothing (to avoid contradiction)')
            return emptyResult(state)

        # If we've already acknowledged cur_entity, do nothing
        if cur_entity.name in state.acknowledged:
            logger.primary_info(f'We have already acknowledged cur_entity {cur_entity}, so Acknowledgment RG is doing nothing')
            return emptyResult(state)

        # Go through all possible EntityGroups, from most specific to least specific.
        # For the first one matching cur_entity, that we have acknowledgments for, give the acknowledgment
        for ent_group_name, ent_group in ENTITY_GROUPS_FOR_CLASSIFICATION.ordered_items:
            if ent_group.matches(cur_entity) and ent_group_name in ACKNOWLEDGMENT_DICTIONARY:
                logger.primary_info(f'cur_entity {cur_entity} matches EntityGroup "{ent_group_name}" which we have an acknowledgment for, so giving acknowledgment')
                acknowledgments = [a.format(entity=cur_entity.common_name) for a in ACKNOWLEDGMENT_DICTIONARY[ent_group_name]]
                acknowledgment = self.state_manager.current_state.choose_least_repetitive(acknowledgments)

                # Set priority to FORCE_START if the last active RG was Categories or Fallback (which ask questions that they don't handle), or if the user gave PosNav intent on this turn
                # Otherwise, set priority to CAN_START (so we don't interrupt the active RG's STRONG_CONTINUE)
                if ent_group_name in ['musician', 'musical_group', 'musical_work']:
                    logger.info(f'The best matching group is {ent_group_name}, so Acknowledgment RG is using CAN_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.CAN_START
                elif self.state_manager.last_state_active_rg in ['CATEGORIES', 'FALLBACK']:
                    logger.info(f'Last active RG was Categories or Fallback, so Acknowledgment RG is using FORCE_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.FORCE_START
                elif self.state_manager.current_state.navigational_intent.pos_intent:
                    logger.info(f'User has PosNav intent on this turn, so Acknowledgment RG is using FORCE_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.FORCE_START
                else:
                    logger.info(f"The last active RG is not Categories or Fallback, and the user doesn't have PosNav intent on this turn, so Acknowledgment RG is using CAN_START priority to acknowledge cur_entity {cur_entity}")
                    priority = ResponsePriority.CAN_START

                response = ResponseGeneratorResult(text=acknowledgment, priority=priority, needs_prompt=True, state=state,
                                                   cur_entity=cur_entity, conditional_state=ConditionalState(cur_entity.name))
                return response

        # Return an empty response if all else fails.
        logger.primary_info(f"cur_entity {cur_entity} didn't match any EntityGroups that we have acknolwedgments for, so Acknowledgment RG is giving no response")
        return emptyResult(state)
Example #5
0
    def get_response(self, state: State,
                     state_manager) -> ResponseGeneratorResult:
        """Ask the first unasked question for state.cur_category_name"""

        category_name = state.cur_category_name
        question = state.get_first_category_response(
            category_name, state_manager)  # CategoryQuestion or None
        if question:
            question_str = None
            if question.statement is None:
                question_str = question.question
            elif question.question is None:
                question_str = question.statement
            else:
                question_str = ' '.join(
                    (question.statement, question.question))
            response = "{} {}".format(choice(ACKNOWLEDGEMENTS), question_str)
            priority = ResponsePriority.CAN_START if category_name == HistoryCategory.__name__ else ResponsePriority.FORCE_START
            cur_entity = get_entity_by_wiki_name(question.cur_entity_wiki_name,
                                                 state_manager.current_state)
            conditional_state = ConditionalState(HandleAnswerTreelet.__name__,
                                                 category_name,
                                                 question.statement,
                                                 question.question, False)
            return ResponseGeneratorResult(
                text=response,
                priority=priority,
                needs_prompt=False,
                state=state,
                cur_entity=cur_entity,
                expected_type=question.expected_type,
                conditional_state=conditional_state)
        else:
            return emptyResult(state)
Example #6
0
    def get_response(self, state: State) -> ResponseGeneratorResult:

        # Init all treelets
        name2initializedtreelet = {treelet_name: treelet_class(self) for treelet_name, treelet_class in NAME2TREELET.items()}

        # Run update_state for all treelets
        for treelet in name2initializedtreelet.values():
            treelet.update_state(state)

        # If there is a next treelet to run, get response from it
        if state.next_treelet is not None:
            next_treelet = name2initializedtreelet[state.next_treelet]  # initialized treelet
            logger.primary_info(f'Continuing GPT2ED conversation in {state.next_treelet}')
            return next_treelet.get_response(state)

        # If any unused treelet has a starter question response, give it
        unused_treelet_names = [treelet_name for treelet_name in name2initializedtreelet if not state.treelet_has_been_used(treelet_name)]  # list of treelet names
        logger.primary_info(f'Getting starter question responses from these unused treelets: {unused_treelet_names}')
        treeletname2responseresult = {treelet_name: name2initializedtreelet[treelet_name].get_starter_question_response(state) for treelet_name in unused_treelet_names}
        treeletname2responseresult = {treelet_name: response_result for treelet_name, response_result in treeletname2responseresult.items() if response_result is not None}
        logger.primary_info("Got these starter questions from neural chat treelets:\n{}".format('\n'.join([f"{treelet_name}: {response_result}" for treelet_name, response_result in treeletname2responseresult.items()])))
        if treeletname2responseresult:
            top_priority = max([response_result.priority for response_result in treeletname2responseresult.values()])
            treeletname2responseresult = {treelet_name: response_result for treelet_name, response_result in treeletname2responseresult.items() if response_result.priority == top_priority}
            logger.primary_info(f"Restricting to just these results with top_priority={top_priority.name}: {treeletname2responseresult.keys()}")
            sampled_response = choose_treelet_result(treeletname2responseresult, self.state_manager.current_state)
            return sampled_response

        return emptyResult(state)
Example #7
0
    def get_response(self, state: State) -> ResponseGeneratorResult:

        # If the user is requesting a category, set cur_treelet to Introductory with that category. Otherwise run state.cur_treelet
        utterance = self.state_manager.current_state.text.lower()
        user_initiated_category, user_has_posnav = get_user_initiated_category(
            utterance, self.state_manager.current_state)
        if user_initiated_category is not None:
            state.cur_category_name = user_initiated_category
            logger.primary_info(
                f'Getting response from {IntroductoryTreelet.__name__}')
            cur_treelet = IntroductoryTreelet.__name__  # str
        else:
            cur_treelet = state.cur_treelet  # str

        # Get response from cur_treelet
        if cur_treelet:
            logger.primary_info(f'Running categories treelet {cur_treelet}')
            response_result = self.treeletname2treelet[
                cur_treelet].get_response(state, self.state_manager)
            if cur_treelet == 'IntroductoryTreelet' and response_result.priority == ResponsePriority.FORCE_START and not user_has_posnav:
                logger.primary_info(
                    "Setting response priority to CAN_START as the user does not have posnav"
                )
                response_result.priority = ResponsePriority.CAN_START
            return response_result
        else:
            return emptyResult(state)
Example #8
0
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        utterance = self.state_manager.current_state.text

        # If we asked user why they said what they said in the previous turn.
        if state['handle_response']:
            # Handle response to our why question.
            if state['followup']:
                bot_response = state['followup']
            else:
                bot_response = "Okay."
            needs_prompt = True
            state['handle_response'] = False
            return ResponseGeneratorResult(
                text=bot_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=needs_prompt,
                state=state,
                cur_entity=None,
                conditional_state={'handled_response': True})

        # If the user is criticizing us, give criticism response

        for word in YES + NO:
            if word in utterance.split():
                logger.primary_info(
                    'User\'s utterance "{}" was classified as offensive, but it contains yes/no '  # type: ignore
                    'word "{}", so OFFENSIVE_USER RG is not responding'.format(
                        utterance, word))
                return emptyResult(state)

        bot_response, needs_prompt = self._get_experimental_bot_response(state)
        if bot_response is not None:
            logger.primary_info(
                'User\'s utterance "{}" was classified as offensive, so giving OFFENSIVE_USER_RESPONSE'
                .format(utterance))  # type: ignore
            return ResponseGeneratorResult(
                text=bot_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=needs_prompt,
                state=state,
                cur_entity=None,
                conditional_state={'used_offensiveuser_response': True})
        return emptyResult(state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        utterance = self.state_manager.current_state.text.lower()
        nav_intent_output = self.state_manager.current_state.navigational_intent

        if self.talk_about_george_floyd(state, utterance):
            blm_entity = get_entity_by_wiki_name("Black Lives Matter")
            return ResponseGeneratorResult(text=RESPONSE_TO_QUESTION_ONE_GEORGE_FLOYD, 
                                        priority=ResponsePriority.FORCE_START,
                                        needs_prompt=True, state=state,
                                        cur_entity=blm_entity, conditional_state={"talked_about_blm": True},
                                        smooth_handoff=SmoothHandoff.ONE_TURN_TO_WIKI_GF)

        # Check for chatty phrases in utterance
        slots = ChattyTemplate().execute(utterance)
        my_name_slots = MyNameIsNonContextualTemplate().execute(utterance)
        not_my_name_slots = MyNameIsNotTemplate().execute(utterance)
        if slots is not None:
            chatty_phrase = slots["chatty_phrase"]
            logger.primary_info('Detected chatty phrase intent with slots={}'.format(slots))

            # Step 3: Get response from dictionary of hand-written responses
            response, needs_prompt = one_turn_responses[chatty_phrase]
            logger.primary_info('Chatty RG returned user_response={}'.format(response))

        # Check for user hesitating while trying to navigate to a topic
        elif nav_intent_output.pos_intent and nav_intent_output.pos_topic_is_hesitate and "depends on" not in utterance:
            logger.primary_info('User has PositiveNavigationalIntent with topic=HESITATE, so asking them for topic again')
            response, needs_prompt = "I think I missed the last part of that sentence. Can you tell me one more time what you want to talk about?", False

        # Check for user giving general positive talking intent (e.g. "i want to chat")
        # If WIKI is supposed to handle the utterance and it contains tell, it typically means user is asking for more info (and hence doesn't really specify topic)
        elif nav_intent_output.pos_intent and nav_intent_output.pos_topic is None and not (self.state_manager.last_state_active_rg == 'WIKI' and contains_phrase(utterance, {'tell'})):
            logger.primary_info('User has PositiveNavigationalIntent with topic=None, so ONE_TURN_HACK is responding with "What would you like to talk about?"')
            response, needs_prompt = "Ok, I'd love to talk to you! What would you like to talk about?", False

        # Check for user correcting their name
        elif (my_name_slots and self.state_manager.last_state_active_rg and not self.state_manager.last_state_active_rg == 'LAUNCH') or not_my_name_slots:
            logger.primary_info('User is attempting to correct name.')
            response = "Oops, it sounds like I got your name wrong. I'm so sorry about that! I won't make that mistake again."
            needs_prompt = True
            setattr(self.state_manager.user_attributes, 'name', None)

        # Otherwise return empty
        else:
            return emptyResult(state)

        # Step 7: set priority
        priority = ResponsePriority.FORCE_START
        is_safe = True

        # Step 8: return result
        return ResponseGeneratorResult(text=response, priority=priority, needs_prompt=needs_prompt, state=state,
                                       cur_entity=None, conditional_state=state)
    def get_can_start_response(self, state):
        """
        Get's a response that starts discussion on an entity without any prompt from the previous turn

        Args:
            state: the rg state

        Returns:
            ResponseGeneratorResult: Should either respond with CAN_START OR NONE

        Raises:
            CantRespondError if it is unable to respond
        """
        return emptyResult(state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        text = self.state_manager.current_state.text

        if text == '':  # e.g. on first turn
            return emptyResult(state)

        # If text mentions siri or cortana, say don't know
        for virtual_assistant in ['siri', 'cortana']:
            if utterance_contains_word(text, virtual_assistant):
                return ResponseGeneratorResult(
                    text=DONT_KNOW_RESPONSE.format(virtual_assistant),
                    priority=ResponsePriority.FORCE_START,
                    needs_prompt=True,
                    state=state,
                    cur_entity=None)

        # If text is asking an identity question, deflect
        identity_response = get_identity_deflection_response(text)
        if identity_response:
            return ResponseGeneratorResult(
                text=identity_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=True,
                state=state,
                cur_entity=None)

        # If text is asking a banned advice question, deflect
        advice_type = self.advice_type(text)
        if advice_type is not None:
            return ResponseGeneratorResult(
                text=DEFLECTION_RESPONSE.format(advice_type),
                priority=ResponsePriority.FORCE_START,
                needs_prompt=True,
                state=state,
                cur_entity=None)

        return emptyResult(state)
 def get_response(self, state: State) -> ResponseGeneratorResult:
     if state.next_treelet is None:
         return emptyResult(state)
     next_treelet = NAME2TREELET[state.next_treelet]  # Treelet class
     return next_treelet(self).get_response(state)
Example #13
0
    def get_response(self, state: State) -> ResponseGeneratorResult:
        """This method will return a response depending on which treelet we are in

        :param state: the current state
        :type state: State
        :return: the result
        :rtype: ResponseGeneratorResult
        """

        # Expected to STRONG_CONTINUE
        base_response = emptyResult(state)
        if self.state_manager.last_state_active_rg == 'WIKI':
            neg_intent = self.state_manager.current_state.navigational_intent.neg_intent  # bool
            if neg_intent:
                logger.primary_info(
                    'NavigationalIntent is negative, so doing a hard switch out of WIKI'
                )
                return ResponseGeneratorResult(
                    text=get_neural_fallback_handoff(
                        self.state_manager.current_state) or "Ok, no problem.",
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=None,
                    conditional_state=ConditionalState())

        tracked_entity = self.get_recommended_entity(state)
        prompted_options = state.prompted_options
        if self.state_manager.last_state_active_rg == 'WIKI':
            # Should have some idea of how to continue
            # Based on what is in the state, figure out which treelets are applicable for STRONG_CONTINUE
            # If there's a designated continuer, try that, but have a fallback plan
            # Refactor treelets to remove fallbacks from there and have them here centrally
            try:
                logger.info(
                    f"Wiki handing over to prompt handler {state.prompt_handler}.handle_prompt"
                )
                prompt_handler = state.prompt_handler.split(':')[0]
                base_response = self.all_treelets[
                    prompt_handler].handle_prompt(state)
            except CantRespondError:
                logger.info(
                    f"{state.prompt_handler}.handle_prompt return CantRespondError, will try other treelets next"
                )

        state.reset()

        if base_response.priority == ResponsePriority.NO:  # No response so far
            # CAN_START
            # Look for entities and how much they've been talked about
            # If it hasn't been talked about, have a sequence of treelets that can check for availability of information
            # try them in sequence
            try:
                base_response = IntroductoryTreelet(
                    self).get_can_start_response(state)
            except CantRespondError:
                try:
                    base_response = OpenQuestionTreelet(
                        self).get_can_start_response(state)
                except CantRespondError:
                    try:
                        if self.state_manager.current_state.experiments.look_up_experiment_value(
                                'convpara'):
                            try:
                                base_response = ConvParaTILTreelet(
                                    self).get_can_start_response(state)
                            except CantRespondError:
                                base_response = TILTreelet(
                                    self).get_can_start_response(state)
                        else:
                            base_response = TILTreelet(
                                self).get_can_start_response(state)
                    except CantRespondError:

                        try:
                            base_response = HandleSectionTreelet(
                                self).get_can_start_response(state)
                        except CantRespondError:
                            try:
                                base_response = IntroduceEntityTreelet(
                                    self).get_can_start_response(state)
                            except:
                                if tracked_entity:
                                    logger.primary_info(
                                        f"WIKI has exhausted all treelets for this entity. Handing over with a weak continue."
                                    )
                                    apology_text = self.state_manager.current_state.choose_least_repetitive(
                                        HANDOVER_TEXTS)
                                    apology_state = deepcopy(state)
                                    apology_state.entity_state[
                                        tracked_entity.
                                        name].finished_talking = True
                                    apology_response = ResponseGeneratorResult(
                                        text=apology_text,
                                        priority=ResponsePriority.
                                        WEAK_CONTINUE,
                                        needs_prompt=True,
                                        cur_entity=None,
                                        state=apology_state,
                                        conditional_state=ConditionalState())

                                    return apology_response

        #return base_response
        # If the base response needs a prompt and we have not finished talking about an entity, then get a section prompt
        if base_response.needs_prompt and tracked_entity and not base_response.state.entity_state[
                tracked_entity.name].finished_talking:
            try:
                # This is a stub for sake of completeness, intro treelet doesn't have ability to continue response
                return IntroductoryTreelet(self).continue_response(
                    base_response)
            except CantContinueResponseError:
                try:
                    return OpenQuestionTreelet(self).continue_response(
                        base_response)
                except CantContinueResponseError:
                    try:
                        if self.state_manager.current_state.experiments.look_up_experiment_value(
                                'convpara'):
                            try:
                                return ConvParaTILTreelet(
                                    self).continue_response(base_response)
                            except CantContinueResponseError:
                                return TILTreelet(self).continue_response(
                                    base_response)
                        else:
                            return TILTreelet(self).continue_response(
                                base_response)
                    except CantContinueResponseError:
                        try:
                            return HandleSectionTreelet(
                                self).continue_response(base_response)
                        except CantContinueResponseError:
                            pass
        return base_response
Example #14
0
    def get_response(self, state: State,
                     state_manager) -> ResponseGeneratorResult:
        """
        Handle the user's answer to a categories question on the previous turn.

        If the cur_entity in the entity tracker is still the same category we set at the end of last turn, give a vague
        acknowledgement and ask a followup question. Otherwise, say nothing.
        """

        entity_tracker_state = state_manager.current_state.entity_tracker  # EntityTrackerState
        prev_turn_entity = entity_tracker_state.history[-2][
            'response']  # the cur_entity at the end of the previous turn e.g. 'Food' or 'Art'
        if 'prompt' in entity_tracker_state.history[-2]:
            prev_turn_entity = entity_tracker_state.history[-2]['prompt']

        # If the cur_entity has changed from the end of the last turn (e.g. because the user indicated they don't want
        # to talk about this category, or because the user named an entity which is now the cur_entity), say nothing.
        # Additionally, if we have just used prompt from another RG (state.just_asked == True), return an empty prompt so that RG can take over.
        if entity_tracker_state.cur_entity != prev_turn_entity or state.just_asked:
            logger.primary_info(
                f'cur_entity changed from previous turn, so not asking a second question'
            )
            return emptyResult(state)

        category_name = state.cur_category_name

        # # Otherwise, the cur_entity is still the category entity we set at the end of the previous turn e.g. 'Food'
        # # In this case, get another unasked question for the category and ask it with WEAK_CONTINUE
        # priority = ResponsePriority.WEAK_CONTINUE

        # question = state.get_first_category_response(category_name, state_manager)  # CategoryQuestion or None
        # if question:
        #     logger.primary_info(f'cur_entity {entity_tracker_state.cur_entity} is still the category entity we set at '
        #                         f'the end of the last turn, so asking a followup question on {category_name}')
        #     question_str = None
        #     if question.statement is None:
        #         question_str = question.question
        #     elif question.question is None:
        #         question_str = question.statement
        #     else:
        #         question_str = ' '.join((question.statement, question.question))
        #     response = choice(ACKNOWLEDGE_AND_ASK_SECOND_QUESTION).format(
        #         CATEGORYNAME2CLASS[category_name].activation_phrases[0], question_str)  # this is a hack to get a more natural-sounding name for the category
        #     cur_entity = get_entity_by_wiki_name(question.cur_entity_wiki_name, state_manager.current_state)
        #     conditional_state = ConditionalState(HandleAnswerTreelet.__name__, category_name, question.statement, question.question)
        #     return ResponseGeneratorResult(text=response, priority=priority, needs_prompt=False,
        #                                 state=state, cur_entity=cur_entity, expected_type=question.expected_type,
        #                                 conditional_state=conditional_state)

        # else:
        #     logger.primary_info(f'No unasked questions left for category "{category_name}", so returning empty result')
        #     return emptyResult(state)

        # If the entity does not change, i.e. Entity Linker may not have been triggered,
        # and CATEGORY RG has not responded, we want to use Regex / GPT2 to generate a good response, then ask for prompt from another RG.

        text = "Thanks for answering my questions!"  # Default response that will be overwritten!
        cur_entity = prev_turn_entity
        conditional_state = ConditionalState(HandleAnswerTreelet.__name__,
                                             category_name, None, None, True)

        about_alexa = ""
        if WhatAboutYouTemplate().execute(
                state_manager.current_state.text) is not None:
            if "What TV show are you watching right now?" in state_manager.current_state.history[
                    -1]:
                about_alexa = "I watched the office again. I've re-watched it so many times!"
            elif "What did you eat for dinner last night?" in state_manager.current_state.history[
                    -1]:
                about_alexa = "I had some delicious spaghetti."
            else:
                about_alexa = state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_WHAT_ABOUT_YOU)

        if DontKnowTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_DONT_KNOW), about_alexa))
            cur_entity = None
        elif BackChannelingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_BACK_CHANNELING), about_alexa))
            cur_entity = prev_turn_entity
        elif EverythingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_EVERYTHING_ANS), about_alexa))
            cur_entity = prev_turn_entity
        elif NotThingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_NOTHING_ANS), about_alexa))
            cur_entity = None

        else:
            if not hasattr(state_manager.current_state, 'gpt2ed'):
                logger.primary_info(f"CATEGORIES RG is running gpt2ed")
                default_gpt2ed_output = GPT2ED(state_manager).execute()
                setattr(state_manager.current_state, 'gpt2ed',
                        default_gpt2ed_output)

            text = get_random_fallback_neural_response(
                state_manager.current_state)
            if text is None:
                return emptyResult(state)
            cur_entity = prev_turn_entity

        return ResponseGeneratorResult(
            text=text,
            priority=ResponsePriority.STRONG_CONTINUE,
            needs_prompt=True,
            state=state,
            cur_entity=cur_entity,
            expected_type=None,
            conditional_state=conditional_state)
Example #15
0
    def _get_response(self, state: State) -> ResponseGeneratorResult:
        # Update the state.
        self.update_state(state)

        # Variables tracking whether we can respond to cur_entity or phrases.
        trigger_entity_treelet_name = None
        trigger_entity = None
        trigger_phrase = None

        # Check if there is a treelet that can respond to the user initiated cur_entity, if it exists.
        # We filter the discussed entities here.
        cur_entity = self.state_manager.current_state.entity_tracker.cur_entity
        current_state = self.state_manager.current_state
        user_initiated = current_state.entity_tracker.cur_entity_initiated_by_user_this_turn(
            current_state)
        if user_initiated and cur_entity and cur_entity not in state.discussed_entities:
            trigger_entity_treelet_name = self._get_treelet_name_for_entity(
                cur_entity)
            if trigger_entity_treelet_name: trigger_entity = cur_entity

        # Check if the user explicitly requests one of the treelets
        utterance = self.state_manager.current_state.text
        chat_slots = ChatTemplate().execute(utterance)

        # Set state.cur_treelet if there is a treelet that can handle the user utterance.
        top_dialog_act = current_state.dialog_act['top_1']
        neg_intent = self.state_manager.current_state.navigational_intent.neg_intent
        pos_topic = self.state_manager.current_state.navigational_intent.pos_topic
        last_rg = self.state_manager.last_state_active_rg

        if self.state_manager.current_state.question['is_question']:
            logger.primary_info(
                'We got a question, so Music RG is returning an empty response.'
            )
            #handoff_response = Treelet.get_handoff_response(self.state_manager, state)
            return emptyResult(state)
        elif last_rg == self.name and (neg_intent
                                       or top_dialog_act == 'abandon'):
            logger.primary_info(
                'NavigationalIntent is negative, so Music RG is ending the conversation and asking for prompts.'
            )
            handoff_response = Treelet.get_handoff_response(
                self.state_manager, state)
            handoff_response.priority = ResponsePriority.STRONG_CONTINUE
            return handoff_response
        elif trigger_entity_treelet_name:
            logger.primary_info(
                '{} can handle the cur_entity {}, so Music RG will generate a response.'
                .format(trigger_entity_treelet_name, trigger_entity))
            if trigger_entity_treelet_name != state.cur_treelet:
                state = self.update_state_if_not_chosen(state)
            state.cur_treelet = trigger_entity_treelet_name
        elif pos_topic or chat_slots:
            if pos_topic:
                phrase = pos_topic[0]
            else:
                phrase = chat_slots['trigger_word']

            treelets_for_phrase = [
                name for name, treelet in self.treelets.items()
                if phrase in treelet.trigger_phrases
            ]
            if treelets_for_phrase:
                logger.primary_info(
                    '{} can handle the positive navigation with the topic {}, so Music RG will generate a response.'
                    .format(treelets_for_phrase[0], phrase))
                if treelets_for_phrase[0] != state.cur_treelet:
                    state = self.update_state_if_not_chosen(state)
                state.cur_treelet = treelets_for_phrase[0]
                trigger_phrase = phrase
            else:
                logger.primary_info(
                    'Music RG cannot handle the positive navigation with the topic {}, so returning an empty response.'
                    .format(phrase))
                state.cur_treelet = None

        # If no treelet can handle the user utterance, return an empty response.
        if not state.cur_treelet:
            logger.primary_info(
                'None of the Music RG treelets can handle the user utterance. Returning an empty response.'
            )
            return emptyResult(state)

        # Get the response from the specified treelet.
        cur_treelet = self.treelets[state.cur_treelet]
        logger.primary_info(
            '{} treelet in Music RG will generate a response.'.format(
                cur_treelet))
        response = cur_treelet.get_response(state,
                                            trigger_entity=trigger_entity,
                                            trigger_phrase=trigger_phrase)

        number_of_turns = len(response.state.treelet_history) + 1

        if response.conditional_state.needs_internal_prompt:
            if number_of_turns > TURN_THRESHOLD:
                response.conditional_state.next_treelet = None
            else:
                prompt = self._get_random_prompt(
                    state, conditional_state=response.conditional_state)
                if prompt:
                    #response.text = self._add_connector(self.state_manager, response.text, prompt.text, connector_probability=1)
                    response.text = "{} {}".format(response.text, prompt.text)
                    response.state = prompt.state
                    response.conditional_state = prompt.conditional_state
                    response.cur_entity = prompt.cur_entity
                    response.expected_type = prompt.expected_type
                else:
                    response.conditional_state.next_treelet = None
                    # We don't have any internal prompt remaining and the last RG was not music.
                    if last_rg != self.name:
                        return emptyResult(state)

        if not response.conditional_state.next_treelet:
            response.needs_prompt = True
            response.conditional_state.needs_external_prompt = True

        return response
    def get_response(self, state : State) -> ResponseGeneratorResult:
        """This function defines the stages that we go through to generate the result. The procedure is

        1. First populate the "additional_features"
        2. Incorporate unconditional information such as user's likes and dislikes, phrases that were detected
        3. Advance the state to the next state depending on the user's utterance and additional features
        4. Define the action space for the policy
        5. Select an action using a policy
        6. Utterancify the action chosen using additional information like lists of reasons and alternatives
        7. Post process the state conditioned on the action
        
        :param state: the current state
        :type state: State
        :return: a result that can be used for chirpy
        :rtype: ResponseGeneratorResult
        """
        self.initialize_turn()
        neg_intent = self.state_manager.current_state.navigational_intent.neg_intent  # type: ignore
        if neg_intent and self.state_manager.last_state_active_rg == 'OPINION': # type: ignore
            return self.respond_neg_nav(state, None)
        utterance = self.state_manager.current_state.text
        additional_features = self.populate_features(state, utterance)
        high_prec = self.state_manager.current_state.entity_linker.high_prec # type: ignore
        # should_evaluate = len(state.action_history) > 4 and not state.evaluated \
        #     and not state.first_episode and state.cur_policy != '' and state.cur_policy != repr(OneTurnAgreePolicy())
        should_evaluate = False # Turning off evaluation question. 
        if self.state_manager.current_state.entity_linker.high_prec and state.cur_phrase != '': # type: ignore
            cur_entity_name = self.opinionable_phrases[state.cur_phrase].wiki_entity_name
            if (cur_entity_name is None and state.cur_phrase not in [linked_span.span for linked_span in high_prec]) \
                    or (cur_entity_name is not None and cur_entity_name not in [linked_span.top_ent.name for linked_span in high_prec]):
                # If the above condition passes, it means that the linked entity is not the currently opinionating phrase.
                if len(additional_features.detected_phrases) == 0:
                    # User no longer want to talk about an opinionable phrase
                    return self.respond_neg_nav(state, random.choice(self.state_manager.current_state.entity_linker.high_prec).top_ent) # type: ignore
        if state.last_turn_prompt or state.last_turn_select:
            priority = ResponsePriority.STRONG_CONTINUE
        elif len(high_prec) > 0 and \
                not any(linked_span.span in self.opinionable_phrases \
                        or linked_span.top_ent.name in self.opinionable_entities for linked_span in high_prec): # type: ignore
            self.logger.primary_info(f'Opinion realized that there is a high precision entity, will not CAN_START our conversation') # type: ignore
            priority = ResponsePriority.NO
        # if WhatsYourOpinion().execute(utterance) is not None:
        #     self.logger.primary_info(f"Opinion detected user is asking for our opinion, raising priority to FORCE_START") # type: ignore
        #     priority = ResponsePriority.FORCE_START
        else:
            priority = ResponsePriority.CAN_START
        if len(state.action_history) > 0 and state.action_history[-1].exit:
            self.logger.primary_info(f'Opinion detected our previous action is to exit and we were not selected, will reset the state before this turn starts') # type: ignore
            state = state.reset_state()
            priority = ResponsePriority.CAN_START # Drop the priority to CAN_START because we already ended a convo before
        # First need to incorporate the unconditional information learned from this turn
        state.detected_opinionated_phrases += additional_features.detected_phrases
        if len(additional_features.detected_phrases) > 0:
            # Here we only use regex since sentiment analysis may not always do well
            if utils.is_like(utterance)[0]:
                state.user_sentiment_history += tuple((phrase, 4) for phrase in additional_features.detected_phrases)
            elif utils.is_not_like(utterance)[0]:
                state.user_sentiment_history += tuple((phrase, 0) for phrase in additional_features.detected_phrases)
        additional_features.detected_phrases = tuple([phrase for phrase in additional_features.detected_phrases if phrase not in state.phrases_done])

        # Then need to advance the state using the utterance
        state_p = state_actions.next_state(state, utterance, additional_features)
        if state_p is None or state_p.cur_phrase is None:
            return emptyResult(state.reset_state())
        reasons_used = dict(state.reasons_used)
        phrase_reasons_used = set(reasons_used[state_p.cur_phrase]) if state_p.cur_phrase in reasons_used else []
        pos_reasons, neg_reasons = utils.get_reasons(state_p.cur_phrase)
        pos_reasons = [reason for reason in pos_reasons if reason not in phrase_reasons_used]
        neg_reasons = [reason for reason in neg_reasons if reason not in phrase_reasons_used]
        related_entities = [phrase.text for phrase in self.opinionable_phrases.values() \
            if phrase.category is not None and  phrase.category == self.opinionable_phrases[state_p.cur_phrase].category \
                and phrase.wiki_entity_name != self.opinionable_phrases[state_p.cur_phrase].wiki_entity_name]
        related_entities = [e for e in related_entities if e not in state_p.phrases_done]
    
        # Then need to define the action space
        action_space = self.get_action_space(state_p, pos_reasons, neg_reasons, related_entities)
        # Then need to select a policy if we don't have one
        ab_test_policy = self.state_manager.current_state.experiments.look_up_experiment_value('opinion_policy') # type: ignore
        if state_p.cur_policy != '':
            self.logger.primary_info(f'OPINION is using current policy {state_p.cur_policy} to respond to the user') # type: ignore
        elif ab_test_policy != 'random' or ab_test_policy == 'not_defined': 
            state_p.cur_policy = ab_test_policy 
            self.logger.primary_info(f'Opinion detected a/b test policy is {ab_test_policy}, will set current episode accordingly ') # type: ignore
        elif state_p.num_turns_since_long_policy < 20:
            policies, weights = zip(*self.short_policy_rates)
            state_p.cur_policy = random.choices(policies, weights, k=1)[0] # type: ignore
            self.logger.primary_info(f'Opinion had a long conversation {state_p.num_turns_since_long_policy} < 20 turns ago. Will use policy {state_p.cur_policy}') # type: ignore
        else:
            if state_p.last_policy in set([p for p, _ in self.disagree_policy_rates]):
                policies, weights = zip(*self.agree_policies_rates)
            elif state_p.last_policy in set([p for p, _ in self.agree_policies_rates]):
                policies, weights = zip(*self.agree_policies_rates)
            else:
                policies, weights = zip(*self.policy_rates)
            state_p.cur_policy = random.choices(policies, weights, k=1)[0] # type: ignore
            self.logger.primary_info(f'OPINION have no current policy, randomly picked {state_p.cur_policy} to respond to the user, resetting turn count') # type: ignore
        state_p.last_policy = state_p.cur_policy
        policy = self.policies[state_p.cur_policy] # type: ignore
        # Then need to get the action from a policy
        action = policy.get_action(state_p, action_space, additional_features)
        self.logger.primary_info(f'OPINION\'s strategy chose action {action}') # type: ignore
        action_space = self.get_action_space(state_p, pos_reasons, neg_reasons, related_entities) # Redefine action space for checks in case cur_phrase changed
        if action not in action_space:
            self.logger.error(f'OPINION policy {repr(policy)} generated an action {action} that is not in the action space {action_space}. Check policy implementation.')
            new_state = state.reset_state()
            return emptyResult(new_state)
        # Then need to utterancify the action
        text, phrase, reason = fancy_utterancify(state_p, action, pos_reasons, neg_reasons, 
                                                related_entities, should_evaluate, self.state_manager.current_state.choose_least_repetitive) # type: ignore
        # Then need to fill the rest of the fields of state_p (through mutation)
        state_p = state_actions.fill_state_on_action(state_p, action, text, phrase, additional_features, reason, 
            self.opinionable_phrases, self.opinionable_entities)

        state_p.last_turn_select = True
        state_p.last_turn_prompt = False
        user_sentiment_history_dict = dict(state.user_sentiment_history)
        wiki_entity = None
        if phrase != '' and phrase in user_sentiment_history_dict and user_sentiment_history_dict[phrase] > 2 \
                and self.opinionable_phrases[phrase].good_for_wiki:
            wiki_entity = get_entity_by_wiki_name(self.opinionable_phrases[phrase].wiki_entity_name)
        state.last_turn_prompt, state.last_turn_select = False, False
        needs_prompt = False
        if action.exit:
            if len(state_p.action_history) > 6:
                self.logger.primary_info(f"Opinion had a conversation of length {len(state_p.action_history)}, will reset long_policy count") # type: ignore
                state_p.num_turns_since_long_policy = 0
            if not should_evaluate:
                needs_prompt = True
            if len(state_p.action_history) < 4:
                self.logger.primary_info(f"Opinion only had 4 turns. Will WEAK_CONTINUE the conversation") # type: ignore
                priority = ResponsePriority.WEAK_CONTINUE
            state_p.first_episode = False
        return ResponseGeneratorResult(text, priority, needs_prompt, state, wiki_entity, conditional_state=state_p)