def continue_response(
            self,
            base_response_result: ResponseGeneratorResult,
            new_entity: Optional[str] = None) -> ResponseGeneratorResult:
        state = base_response_result.state
        conditional_state = base_response_result.conditional_state

        new_state = state.update(conditional_state)
        if not (base_response_result.cur_entity or new_entity):
            raise CantContinueResponseError(
                "Neither base_response_result.cur_entity nor new_entity was given"
            )
        entity = new_entity or base_response_result.cur_entity
        if len(new_state.entity_state[entity.name].highlights_used) >= 2:
            raise CantContinueResponseError(
                "Already gave open response twice, not prompting anymore")

        try:
            text, new_conditional_state = self.get_prompt_question(
                new_state, entity)
        except CantPromptError as e:
            raise CantContinueResponseError(*e.args) from e

        text = base_response_result.text + ' ' + text
        conditional_state.prompt_handler = self.__repr__()
        conditional_state.cur_doc_title = new_conditional_state.cur_doc_title
        conditional_state.open_question = new_conditional_state.open_question
        base_response_result.conditional_state = conditional_state
        base_response_result.text = text
        base_response_result.needs_prompt = False
        base_response_result.cur_entity = entity
        return base_response_result
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        # If the user hasn't been asked whether they want to exit, look for closing intent
        if not state['has_just_asked_to_exit']:

            # If closing intent is detected, closing confirmation RG should respond
            if self.user_trying_to_stop():
                return ResponseGeneratorResult(text=random.choice(CLOSING_CONFIRMATION_QUESTION),
                                               priority=ResponsePriority.FORCE_START, needs_prompt=False, state=state,
                                               cur_entity=None, conditional_state={'has_just_asked_to_exit': True})
            else:
                return emptyResult(state)

        # If the user has been asked a confirmation question, handle their response
        else:

            # If the user wants to continue talking, request prompt and continue
            if self.state_manager.current_state.dialog_act['is_no_answer'] or \
                ClosingNegativeConfirmationTemplate().execute(self.state_manager.current_state.text) is not None:
                return ResponseGeneratorResult(text=random.choice(CLOSING_CONFIRMATION_CONTINUE),
                                               priority=ResponsePriority.STRONG_CONTINUE, needs_prompt=True,
                                               state=state, cur_entity=None,
                                               conditional_state={'has_just_asked_to_exit': False})

            # If the user wants to end the conversation, exit
            if self.state_manager.current_state.dialog_act['is_yes_answer'] or \
                ClosingPositiveConfirmationTemplate().execute(self.state_manager.current_state.text) is not None:
                return ResponseGeneratorResult(text=CLOSING_CONFIRMATION_STOP,
                                               priority=ResponsePriority.STRONG_CONTINUE, needs_prompt=False,
                                               state=state, cur_entity=None,
                                               conditional_state={'has_just_asked_to_exit': False})

            # If neither matched, allow another RG to handle
            return emptyResult(state)
Exemple #3
0
    def continue_response(
            self,
            base_response_result: ResponseGeneratorResult,
            new_entity: Optional[str] = None) -> ResponseGeneratorResult:
        state = base_response_result.state
        conditional_state = base_response_result.conditional_state

        new_state = state.update(conditional_state)
        if not (base_response_result.cur_entity or new_entity):
            raise CantContinueResponseError(
                "Neither base_response_result.cur_entity nor new_entity was given"
            )
        entity = base_response_result.cur_entity

        try:
            text, new_conditional_state = self.prompt_sections(
                new_state,
                entity,
                repeat=conditional_state.discussed_section is not None,
                have_response=base_response_result.text != '')
        except CantPromptError as e:
            raise CantContinueResponseError(*e.args) from e

        text = base_response_result.text + ' ' + text
        conditional_state.suggested_sections = new_conditional_state.suggested_sections
        conditional_state.prompted_options = new_conditional_state.prompted_options
        conditional_state.cur_doc_title = new_conditional_state.cur_doc_title
        conditional_state.prompt_handler = new_conditional_state.prompt_handler

        base_response_result.conditional_state = conditional_state
        base_response_result.text = text
        base_response_result.needs_prompt = False
        return base_response_result
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        utterance = self.state_manager.current_state.text.lower()
        nav_intent_output = self.state_manager.current_state.navigational_intent

        if self.talk_about_george_floyd(state, utterance):
            blm_entity = get_entity_by_wiki_name("Black Lives Matter")
            return ResponseGeneratorResult(text=RESPONSE_TO_QUESTION_ONE_GEORGE_FLOYD, 
                                        priority=ResponsePriority.FORCE_START,
                                        needs_prompt=True, state=state,
                                        cur_entity=blm_entity, conditional_state={"talked_about_blm": True},
                                        smooth_handoff=SmoothHandoff.ONE_TURN_TO_WIKI_GF)

        # Check for chatty phrases in utterance
        slots = ChattyTemplate().execute(utterance)
        my_name_slots = MyNameIsNonContextualTemplate().execute(utterance)
        not_my_name_slots = MyNameIsNotTemplate().execute(utterance)
        if slots is not None:
            chatty_phrase = slots["chatty_phrase"]
            logger.primary_info('Detected chatty phrase intent with slots={}'.format(slots))

            # Step 3: Get response from dictionary of hand-written responses
            response, needs_prompt = one_turn_responses[chatty_phrase]
            logger.primary_info('Chatty RG returned user_response={}'.format(response))

        # Check for user hesitating while trying to navigate to a topic
        elif nav_intent_output.pos_intent and nav_intent_output.pos_topic_is_hesitate and "depends on" not in utterance:
            logger.primary_info('User has PositiveNavigationalIntent with topic=HESITATE, so asking them for topic again')
            response, needs_prompt = "I think I missed the last part of that sentence. Can you tell me one more time what you want to talk about?", False

        # Check for user giving general positive talking intent (e.g. "i want to chat")
        # If WIKI is supposed to handle the utterance and it contains tell, it typically means user is asking for more info (and hence doesn't really specify topic)
        elif nav_intent_output.pos_intent and nav_intent_output.pos_topic is None and not (self.state_manager.last_state_active_rg == 'WIKI' and contains_phrase(utterance, {'tell'})):
            logger.primary_info('User has PositiveNavigationalIntent with topic=None, so ONE_TURN_HACK is responding with "What would you like to talk about?"')
            response, needs_prompt = "Ok, I'd love to talk to you! What would you like to talk about?", False

        # Check for user correcting their name
        elif (my_name_slots and self.state_manager.last_state_active_rg and not self.state_manager.last_state_active_rg == 'LAUNCH') or not_my_name_slots:
            logger.primary_info('User is attempting to correct name.')
            response = "Oops, it sounds like I got your name wrong. I'm so sorry about that! I won't make that mistake again."
            needs_prompt = True
            setattr(self.state_manager.user_attributes, 'name', None)

        # Otherwise return empty
        else:
            return emptyResult(state)

        # Step 7: set priority
        priority = ResponsePriority.FORCE_START
        is_safe = True

        # Step 8: return result
        return ResponseGeneratorResult(text=response, priority=priority, needs_prompt=needs_prompt, state=state,
                                       cur_entity=None, conditional_state=state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:

        # Don't run if the cur entity exists (isn't None) and is a non-category entity
        cur_entity = self.state_manager.current_state.entity_tracker.cur_entity
        NON_CATEGORY_ENTITY = cur_entity is not None and not cur_entity.is_category
        if NON_CATEGORY_ENTITY:
            logger.info("entity_tracker.cur_entity exists and is not a category, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # Don't run if LAUNCH RG is the currently active RG or if this is the first turn
        LAUNCH_ACTIVE = (self.state_manager.last_state_active_rg == 'LAUNCH' and self.state_manager.last_state.response_generator_states['LAUNCH'].next_treelet) or len(self.state_manager.current_state.history)<=1
        if LAUNCH_ACTIVE:
            logger.info("LAUNCH RG active, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # Don't run if OPINION, NEURAL_CHAT, CATEGORIES is currently active
        if self.state_manager.last_state_active_rg in {'OPINION', 'NEURAL_CHAT', 'CATEGORIES'}:
            logger.info("self.state_manager.last_state_active_rg RG active, skipping NeuralFallbackResponseGenerator")
            return emptyResult(state=state)

        # If we haven't already run gpt2ed in the NLP pipeline, run it now, and save it in the current state
        if not hasattr(self.state_manager.current_state, 'gpt2ed'):
            default_gpt2ed_output = GPT2ED(self.state_manager).execute()
            setattr(self.state_manager.current_state, 'gpt2ed', default_gpt2ed_output)

        # Choose the best response and return it
        neural_fallback = get_random_fallback_neural_response(self.state_manager.current_state)
        if neural_fallback:
            return ResponseGeneratorResult(text=neural_fallback, priority=ResponsePriority.UNIVERSAL_FALLBACK,
                                           needs_prompt=True, state=state,
                                           cur_entity=self.state_manager.current_state.entity_tracker.cur_entity,
                                           conditional_state={'used_neural_fallback_response': True})
        else:
            return emptyResult(state=state)
Exemple #6
0
    def get_response(self, state: State,
                     state_manager) -> ResponseGeneratorResult:
        """Ask the first unasked question for state.cur_category_name"""

        category_name = state.cur_category_name
        question = state.get_first_category_response(
            category_name, state_manager)  # CategoryQuestion or None
        if question:
            question_str = None
            if question.statement is None:
                question_str = question.question
            elif question.question is None:
                question_str = question.statement
            else:
                question_str = ' '.join(
                    (question.statement, question.question))
            response = "{} {}".format(choice(ACKNOWLEDGEMENTS), question_str)
            priority = ResponsePriority.CAN_START if category_name == HistoryCategory.__name__ else ResponsePriority.FORCE_START
            cur_entity = get_entity_by_wiki_name(question.cur_entity_wiki_name,
                                                 state_manager.current_state)
            conditional_state = ConditionalState(HandleAnswerTreelet.__name__,
                                                 category_name,
                                                 question.statement,
                                                 question.question, False)
            return ResponseGeneratorResult(
                text=response,
                priority=priority,
                needs_prompt=False,
                state=state,
                cur_entity=cur_entity,
                expected_type=question.expected_type,
                conditional_state=conditional_state)
        else:
            return emptyResult(state)
    def get_response(self, state: State) -> ResponseGeneratorResult:

        # Get the cur_entity
        cur_entity = self.state_manager.current_state.entity_tracker.cur_entity
        if cur_entity is None:
            return emptyResult(state)

        topic = cur_entity.common_name  # lowercase
        logger.primary_info(
            'Chose this topic for Showerthoughts: {}'.format(topic))
        thread = self.get_showerthoughts_result(state, topic)
        if thread:
            logger.primary_info(
                'Chose this ShowerThought thread: {}'.format(thread))
            return ResponseGeneratorResult(
                text=random.choice(INFORM_SHOWER_THOUGHTS).format(
                    thread['title']),
                priority=ResponsePriority.CAN_START,
                needs_prompt=True,
                state=state,
                cur_entity=cur_entity,
                conditional_state=ConditionalState(
                    used_thread_id=thread['threadId']))

        # If we found nothing, return empty response
        return emptyResult(state)
Exemple #8
0
    def get_can_start_response(self, state: State) -> ResponseGeneratorResult:
        """This method gets the response of this treelet as well as next transitions

        :param state: the current state
        :type state: chirpy.response_generators.wiki.dtypes.State
        :return: the response
        :rtype: ResponseGeneratorResult

        """
        entity = self.rg.get_recommended_entity(state)
        if not entity:
            raise CantRespondError("No recommended entity")
        try:
            text, conditional_state = self.prompt_sections(state,
                                                           entity,
                                                           have_response=False)
            conditional_state.responding_treelet = self.__repr__()
        except CantPromptError as e:
            raise CantRespondError(*e.args) from e
        return ResponseGeneratorResult(text=text,
                                       priority=ResponsePriority.CAN_START,
                                       needs_prompt=False,
                                       state=state,
                                       cur_entity=entity,
                                       conditional_state=conditional_state)
 def get_response(self, state, utterance):
     text = "MUSIC This is GENRE treelet"
     return ResponseGeneratorResult(text=text,
                                    priority=ResponsePriority.CAN_START,
                                    needs_prompt=True,
                                    state=state,
                                    cur_entity=None,
                                    conditional_state=state)
Exemple #10
0
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        utterance = self.state_manager.current_state.text

        # If we asked user why they said what they said in the previous turn.
        if state['handle_response']:
            # Handle response to our why question.
            if state['followup']:
                bot_response = state['followup']
            else:
                bot_response = "Okay."
            needs_prompt = True
            state['handle_response'] = False
            return ResponseGeneratorResult(
                text=bot_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=needs_prompt,
                state=state,
                cur_entity=None,
                conditional_state={'handled_response': True})

        # If the user is criticizing us, give criticism response

        for word in YES + NO:
            if word in utterance.split():
                logger.primary_info(
                    'User\'s utterance "{}" was classified as offensive, but it contains yes/no '  # type: ignore
                    'word "{}", so OFFENSIVE_USER RG is not responding'.format(
                        utterance, word))
                return emptyResult(state)

        bot_response, needs_prompt = self._get_experimental_bot_response(state)
        if bot_response is not None:
            logger.primary_info(
                'User\'s utterance "{}" was classified as offensive, so giving OFFENSIVE_USER_RESPONSE'
                .format(utterance))  # type: ignore
            return ResponseGeneratorResult(
                text=bot_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=needs_prompt,
                state=state,
                cur_entity=None,
                conditional_state={'used_offensiveuser_response': True})
        return emptyResult(state)
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        utterance = self.state_manager.current_state.text

        response_text = None
        priority = ResponsePriority.NO

        if ComplaintMisheardTemplate().execute(utterance) is not None:
            logger.primary_info(
                f'User\'s utterance "{utterance}" matches matches misheard template. Responding with MISHEARD_COMPLAINT_RESPONSE'
            )
            priority = ResponsePriority.FORCE_START
            response_text = self.state_manager.current_state.choose_least_repetitive(
                MISHEARD_COMPLAINT_RESPONSE)

        elif ComplaintClarificationTemplate().execute(
                utterance) is not None and not (
                    self.state_manager.last_state_active_rg == 'WIKI'):
            logger.primary_info(
                f'User\'s utterance "{utterance}" matches matches clarificaton template. Responding with CLARIFICATION_COMPLAINT_RESPONSE'
            )
            priority = ResponsePriority.FORCE_START
            response_text = self.state_manager.current_state.choose_least_repetitive(
                CLARIFICATION_COMPLAINT_RESPONSE)

        # Sometimes when doing convpara in wiki, they ask a "surprised/doubtful" what which is handled there
        elif ComplaintRepetitionTemplate().execute(utterance) is not None:
            logger.primary_info(
                f'User\'s utterance "{utterance}" matches matches repetition template. Responding with REPETITION_COMPLAINT_RESPONSE'
            )
            priority = ResponsePriority.FORCE_START
            response_text = self.state_manager.current_state.choose_least_repetitive(
                REPETITION_COMPLAINT_RESPONSE)

        elif ComplaintPrivacyTemplate().execute(utterance) is not None:
            logger.primary_info(
                f'User\'s utterance "{utterance}" matches matches privacy template. Responding with PRIVACY_COMPLAINT_RESPONSE'
            )
            priority = ResponsePriority.FORCE_START
            response_text = self.state_manager.current_state.choose_least_repetitive(
                PRIVACY_COMPLAINT_RESPONSE)

        elif self.state_manager.current_state.dialog_act['probdist'][
                'complaint'] > COMPLAINT_THRESHOLD:
            logger.primary_info(
                f'User\'s utterance "{utterance}" matches was classified as a complaint. Responding with GENERIC_COMPLAINT_RESPONSE'
            )
            priority = ResponsePriority.FORCE_START
            response_text = self.state_manager.current_state.choose_least_repetitive(
                GENERIC_COMPLAINT_RESPONSE)

        return ResponseGeneratorResult(text=response_text,
                                       priority=priority,
                                       needs_prompt=True,
                                       state=state,
                                       cur_entity=None)
 def get_can_start_response(self, state : State) -> ResponseGeneratorResult:
     entity = self.rg.get_recommended_entity(state)
     if entity:
         raise CantRespondError(f"cur_entity {entity} has been set. Will not try to introduce a previously user mentioned untalked entity")
     latest_untalked_entity, text, conditional_state = self.introduce_entity(state)
     return ResponseGeneratorResult(text=text,
                                    priority=ResponsePriority.CAN_START,
                                    needs_prompt=False,
                                    state=state,
                                    cur_entity=latest_untalked_entity,
                                    conditional_state=conditional_state)
Exemple #13
0
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        state_manager = self.state_manager
        text = None
        if state_manager.last_state_active_rg == 'FALLBACK':
            if DontKnowTemplate().execute(state_manager.current_state.text) is not None:
                text = state_manager.current_state.choose_least_repetitive(RESPONSE_TO_DONT_KNOW)
            elif BackChannelingTemplate().execute(state_manager.current_state.text) is not None:
                text = state_manager.current_state.choose_least_repetitive(RESPONSE_TO_BACK_CHANNELING)
            elif EverythingTemplate().execute(state_manager.current_state.text) is not None:
                text = state_manager.current_state.choose_least_repetitive(RESPONSE_TO_EVERYTHING_ANS)
            elif NotThingTemplate().execute(state_manager.current_state.text) is not None:
                text = state_manager.current_state.choose_least_repetitive(RESPONSE_TO_NOTHING_ANS)

        if text:
            return ResponseGeneratorResult(text=text, priority=ResponsePriority.WEAK_CONTINUE,
                                           needs_prompt=True, state=state, cur_entity=None,
                                           conditional_state={'used_fallback_response': True})
        else:
            return ResponseGeneratorResult(text=FALLBACK_RESPONSE, priority=ResponsePriority.UNIVERSAL_FALLBACK,
                                           needs_prompt=True, state=state, cur_entity=None,
                                           conditional_state={'used_fallback_response': True})
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        current_state = self.state_manager.current_state
        cur_entity = current_state.entity_tracker.cur_entity

        # If the cur_entity isn't a non-None entity initiated by the user on this turn, do nothing
        if not current_state.entity_tracker.cur_entity_initiated_by_user_this_turn(current_state):
            logger.primary_info(f'cur_entity {cur_entity} is not a non-None entity initiated by the user on this turn, so '
                        f'Acknowledgment RG is doing nothing')
            return emptyResult(state)

        # Don't acknowledge entities that OPINION has Twitter opinions on (to avoid contradiction)
        if cur_entity.name in opinionable_entity_names:
            logger.primary_info(f'Opinion RG has Twitter opinions for cur_entity {cur_entity}, so Acknowledgment RG is doing nothing (to avoid contradiction)')
            return emptyResult(state)

        # If we've already acknowledged cur_entity, do nothing
        if cur_entity.name in state.acknowledged:
            logger.primary_info(f'We have already acknowledged cur_entity {cur_entity}, so Acknowledgment RG is doing nothing')
            return emptyResult(state)

        # Go through all possible EntityGroups, from most specific to least specific.
        # For the first one matching cur_entity, that we have acknowledgments for, give the acknowledgment
        for ent_group_name, ent_group in ENTITY_GROUPS_FOR_CLASSIFICATION.ordered_items:
            if ent_group.matches(cur_entity) and ent_group_name in ACKNOWLEDGMENT_DICTIONARY:
                logger.primary_info(f'cur_entity {cur_entity} matches EntityGroup "{ent_group_name}" which we have an acknowledgment for, so giving acknowledgment')
                acknowledgments = [a.format(entity=cur_entity.common_name) for a in ACKNOWLEDGMENT_DICTIONARY[ent_group_name]]
                acknowledgment = self.state_manager.current_state.choose_least_repetitive(acknowledgments)

                # Set priority to FORCE_START if the last active RG was Categories or Fallback (which ask questions that they don't handle), or if the user gave PosNav intent on this turn
                # Otherwise, set priority to CAN_START (so we don't interrupt the active RG's STRONG_CONTINUE)
                if ent_group_name in ['musician', 'musical_group', 'musical_work']:
                    logger.info(f'The best matching group is {ent_group_name}, so Acknowledgment RG is using CAN_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.CAN_START
                elif self.state_manager.last_state_active_rg in ['CATEGORIES', 'FALLBACK']:
                    logger.info(f'Last active RG was Categories or Fallback, so Acknowledgment RG is using FORCE_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.FORCE_START
                elif self.state_manager.current_state.navigational_intent.pos_intent:
                    logger.info(f'User has PosNav intent on this turn, so Acknowledgment RG is using FORCE_START priority to acknowledge cur_entity {cur_entity}')
                    priority = ResponsePriority.FORCE_START
                else:
                    logger.info(f"The last active RG is not Categories or Fallback, and the user doesn't have PosNav intent on this turn, so Acknowledgment RG is using CAN_START priority to acknowledge cur_entity {cur_entity}")
                    priority = ResponsePriority.CAN_START

                response = ResponseGeneratorResult(text=acknowledgment, priority=priority, needs_prompt=True, state=state,
                                                   cur_entity=cur_entity, conditional_state=ConditionalState(cur_entity.name))
                return response

        # Return an empty response if all else fails.
        logger.primary_info(f"cur_entity {cur_entity} didn't match any EntityGroups that we have acknolwedgments for, so Acknowledgment RG is giving no response")
        return emptyResult(state)
Exemple #15
0
    def continue_response(
        self, base_response_result: ResponseGeneratorResult
    ) -> ResponseGeneratorResult:
        state = base_response_result.state
        conditional_state = base_response_result.conditional_state
        new_state = state.update(conditional_state)
        entity = base_response_result.cur_entity
        if not entity:
            raise CantContinueResponseError(
                "base_response_result.cur_entity was not set")
        til_response = self.get_til(entity.name, new_state)
        if not til_response:
            raise CantContinueResponseError("Not prompting for more TILs.")

        text = base_response_result.text + ' ' + self.rg.state_manager.current_state.choose_least_repetitive(
            WANNA_KNOW_MORE)
        conditional_state.prompted_options = [entity.name]
        conditional_state.prompt_handler = self.__repr__()
        base_response_result.conditional_state = conditional_state
        base_response_result.text = text
        base_response_result.needs_prompt = False

        return base_response_result
    def get_response(self, state: dict) -> ResponseGeneratorResult:
        text = self.state_manager.current_state.text

        if text == '':  # e.g. on first turn
            return emptyResult(state)

        # If text mentions siri or cortana, say don't know
        for virtual_assistant in ['siri', 'cortana']:
            if utterance_contains_word(text, virtual_assistant):
                return ResponseGeneratorResult(
                    text=DONT_KNOW_RESPONSE.format(virtual_assistant),
                    priority=ResponsePriority.FORCE_START,
                    needs_prompt=True,
                    state=state,
                    cur_entity=None)

        # If text is asking an identity question, deflect
        identity_response = get_identity_deflection_response(text)
        if identity_response:
            return ResponseGeneratorResult(
                text=identity_response,
                priority=ResponsePriority.FORCE_START,
                needs_prompt=True,
                state=state,
                cur_entity=None)

        # If text is asking a banned advice question, deflect
        advice_type = self.advice_type(text)
        if advice_type is not None:
            return ResponseGeneratorResult(
                text=DEFLECTION_RESPONSE.format(advice_type),
                priority=ResponsePriority.FORCE_START,
                needs_prompt=True,
                state=state,
                cur_entity=None)

        return emptyResult(state)
    def handle_prompt(self, state: State) -> ResponseGeneratorResult:
        utterance = self.rg.state_manager.current_state.text.lower()
        last_entity = self.rg.state_manager.current_state.entity_tracker.last_turn_end_entity
        entity = self.rg.get_recommended_entity(state)
        if entity != last_entity:
            raise CantRespondError("Recommended entity changed from last turn")
        if self.is_no(utterance):
            state.entity_state[entity.name].finished_talking = True
            conditional_state = ConditionalState(responding_treelet=self.__repr__())
            return ResponseGeneratorResult(text=self.rg.state_manager.current_state.choose_least_repetitive(HANDOVER_TEXTS),
                                           priority=ResponsePriority.STRONG_CONTINUE,
                                           needs_prompt=True,
                                           state=state,
                                           cur_entity=None,
                                           conditional_state=conditional_state)
        elif self.is_yes(utterance):
            conditional_state = ConditionalState(
                cur_doc_title=entity.name,
                responding_treelet=self.__repr__())

            return ResponseGeneratorResult(text=self.rg.state_manager.current_state.choose_least_repetitive(AGREED),
                                           priority=ResponsePriority.STRONG_CONTINUE,
                                           needs_prompt=True,
                                           state=state,
                                           cur_entity=entity,
                                           conditional_state=conditional_state)

        else:
            # Ambiguous case
            state.entity_state[entity.name].finished_talking = True
            conditional_state = ConditionalState(responding_treelet=self.__repr__())
            return ResponseGeneratorResult(text=self.rg.state_manager.current_state.choose_least_repetitive(ABMIGUOUS),
                                           priority=ResponsePriority.STRONG_CONTINUE,
                                           needs_prompt=True,
                                           state=state,
                                           cur_entity=None,
                                           conditional_state=conditional_state)
    def get_response(self, state: State) -> ResponseGeneratorResult:

        # If the user's name is already set in user_attributes, wipe it so we start fresh (this avoids problems if e.g.
        # Alice talks to our bot and gives their name, then Bob talks to our bot and refuses to tell us their
        # name, then we might refer to Bob as Alice in the second conversation).
        # In the future we may want to support greeting users by remembered name, but until then let's do this
        setattr(self.state_manager.user_attributes, 'name', None)

        return ResponseGeneratorResult(text=LAUNCH_PHRASE,
                                       priority=ResponsePriority.FORCE_START,
                                       needs_prompt=False,
                                       state=state,
                                       cur_entity=None,
                                       conditional_state=ConditionalState(
                                           HandleNameTreelet.__name__))
 def get_can_start_response(self, state: State) -> ResponseGeneratorResult:
     entity = self.rg.get_recommended_entity(state)
     if not entity:
         raise CantRespondError("No recommended entity")
     if len(state.entity_state[entity.name].highlights_used) >= 2:
         raise CantRespondError(
             "Already gave open response twice, not prompting anymore")
     try:
         text, conditional_state = self.get_prompt_question(state)
     except CantPromptError as e:
         raise CantRespondError(*e.args) from e
     return ResponseGeneratorResult(text=text,
                                    priority=ResponsePriority.CAN_START,
                                    state=state,
                                    cur_entity=entity,
                                    needs_prompt=False,
                                    conditional_state=conditional_state)
Exemple #20
0
    def get_can_start_response(self, state: State) -> ResponseGeneratorResult:
        """This method returns the response if we are currently at this treelet.
        :type state: chirpy.response_generators.wiki.dtypes.State
        :return: the result of the current turn
        :rtype: ResponseGeneratorResult
        state = state.get_reset_state()
        """

        entity = self.rg.get_recommended_entity(state)
        utterance = self.rg.state_manager.current_state.text
        history = self.rg.state_manager.current_state.history
        previous_bot_response = history[-1] if len(history) > 0 else None
        # No good high precision spans to talk about
        if not entity:
            raise CantRespondError("No recommended entity")

        appropriate, reason = self.is_appropriate(
            utterance=utterance,
            entity=entity,
            previous_bot_response=previous_bot_response)
        if not appropriate:
            raise CantRespondError(reason)

        logger.info(f"{entity.name} appropriate for overview because {reason}")
        overview = self.get_overview(entity.name)
        if not overview:
            raise CantRespondError(
                f"No unused overview found for entity {entity.name}")

        logger.primary_info(f'Wiki has found an overview section for {entity}')
        # If we have an overview, read it
        text = overview
        cur_doc_title = entity.name
        conditional_state = ConditionalState(
            cur_doc_title=cur_doc_title, responding_treelet=self.__repr__())
        base_response_result = ResponseGeneratorResult(
            text=text,
            priority=ResponsePriority.CAN_START,
            needs_prompt=True,
            state=state,
            cur_entity=entity,
            conditional_state=conditional_state)
        return base_response_result
 def prepare_rg_result(text,
                       state,
                       priority=None,
                       needs_prompt=False,
                       cur_entity=None,
                       conditional_state=None):
     if priority is None:
         priority = ResponsePriority.STRONG_CONTINUE if state.treelet_history else ResponsePriority.FORCE_START
     if conditional_state is None:
         conditional_state = ConditionalState()
         conditional_state.needs_internal_prompt = True
     rg_result = ResponseGeneratorResult(
         text=text,
         priority=priority,
         needs_prompt=needs_prompt,
         state=state,
         cur_entity=cur_entity,
         conditional_state=conditional_state)
     return rg_result
 def respond_neg_nav(self, state : State, wiki_entity : Optional[WikiEntity]) -> ResponseGeneratorResult:
     """This method generates the result when user says "change the subject" 
     
     :param state: the current state
     :type state: State
     :param wiki_entity: the current WIKI entity that we are using
     :type wiki_entity: Optional[WikiEntity]
     :return: a result that can be directly returned from the get_response function
     :rtype: ResponseGeneratorResult
     """
     self.logger.primary_info('NavigationalIntent is negative, so doing a hard switch out of OPINION') # type: ignore
     conditional_state = state.reset_state()
     conditional_state.last_turn_select = True
     return ResponseGeneratorResult(
         text=get_neural_fallback_handoff(self.state_manager.current_state) or "Ok, cool.",
         priority=ResponsePriority.WEAK_CONTINUE,
         needs_prompt=True,
         state=state,
         cur_entity=None,
         conditional_state=conditional_state)
Exemple #23
0
    def handle_prompt(self, state: State):
        utterance = self.rg.state_manager.current_state.text.lower()
        entity = self.rg.get_recommended_entity(state)
        if self.is_yes(utterance) and entity:
            base_response = self.respond_til(state, entity)
            # base_response.text = f"Speaking of {entity.common_name}," + base_response.text
            base_response.priority = ResponsePriority.STRONG_CONTINUE
            return base_response

        elif self.is_no(utterance):
            return ResponseGeneratorResult(
                text=self.rg.state_manager.current_state.
                choose_least_repetitive(ACKNOWLEDGE_NO),
                priority=ResponsePriority.STRONG_CONTINUE,
                needs_prompt=True,
                state=state,
                cur_entity=None,
                conditional_state=ConditionalState(
                    responding_treelet=self.__repr__(), ))
        raise CantRespondError(
            "couldn't classify user response into YES or NO")
Exemple #24
0
    def respond_til(self,
                    state: State,
                    entity: WikiEntity,
                    til_text: Optional[str] = None) -> ResponseGeneratorResult:
        """This method definitely responds with a TIL if exists and suggest either another
        TIL or sections depending on some heuristics

        :param state: The current state
        :type state: chirpy.response_generators.wiki.dtypes.State
        :param entity: The resolved entity that we are trying to get a TIL for
        :type entity: str
        :return: The response generator result
        :rtype: ResponseGeneratorResult

        """

        if not til_text:
            til_response = self.get_til(entity.name, state)
            if not til_response:
                raise CantRespondError("Not responding with more TILs")
            til_texts, doc_titles, section_titles = til_response
            til_text = self.rg.state_manager.current_state.choose_least_repetitive(
                I_LEARNED).format(til_texts)

        if til_text[-1] not in ['.', '!', '?']:
            til_text += '.'
        logger.primary_info(
            f'WIKI is responding with a TIL to entity {entity.name}')
        conditional_state = ConditionalState(
            cur_doc_title=entity.name,
            responding_treelet=self.__repr__(),
            til_used=til_text)
        base_response_result = ResponseGeneratorResult(
            text=til_text,
            priority=ResponsePriority.CAN_START,
            needs_prompt=True,
            state=state,
            cur_entity=entity,
            conditional_state=conditional_state)
        return base_response_result
Exemple #25
0
    def handle_prompt(self, state: State) -> ResponseGeneratorResult:
        utterance = self.rg.state_manager.current_state.text.lower()
        last_entity = self.rg.state_manager.current_state.entity_tracker.last_turn_end_entity
        entity = self.rg.get_recommended_entity(state)
        if entity != last_entity:
            raise CantRespondError("Recommended entity changed from last turn")
        is_question = self.rg.state_manager.current_state.question[
            'is_question']

        # Prompt came from this (i.e. TIL treelet's) get_prompt function, which has a yes/no answer.
        prompt_subhandler = state.prompt_handler.split(':')[1]
        if prompt_subhandler == 'wanna_know_more':
            # We should continue only for yes
            if self.is_yes(utterance) and entity:
                first_til_response = self.get_can_start_response(state)
                first_til_response.priority = ResponsePriority.STRONG_CONTINUE
                return first_til_response

            elif self.is_no(utterance):
                return ResponseGeneratorResult(
                    text=random.choice(ACKNOWLEDGE_NO),
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=None,
                    conditional_state=ConditionalState(
                        responding_treelet=self.__repr__(), ))

            raise CantRespondError(
                "couldn't classify user response into YES or NO")

        # Else : need to gauge engagement based on user response
        elif prompt_subhandler == 'paraphrase_handler':
            last_til = state.entity_state[entity.name].tils_used[-1]
            generations_for_last_til = state.entity_state[
                entity.name].conv_paraphrases[last_til]
            last_utterance_was_did_you_know_question = did_you_know.execute(
                generations_for_last_til[-1]
            ) if len(generations_for_last_til) > 0 else False

            unigram_overlap = self.ngram_recall(generations_for_last_til,
                                                last_til, 2)
            bigram_overlap = self.ngram_recall(generations_for_last_til,
                                               last_til, 2)
            bigram_unigram_overlap = (unigram_overlap + bigram_overlap) / 2
            content_not_covered = bigram_unigram_overlap < bigram_overlap_for_repeating_threshold

            logger.primary_info(
                f"Last utterance has {unigram_overlap} unigram_overlap and {bigram_unigram_overlap} average unigram-bigram overlap with the TIL it paraphrased"
            )
            # Cases:
            # User asks for clarification, is confused, or didn't understand what we said
            # These regexes are high precision and if the user is confused or unclear it probably means we paraphrased
            # incorrectly
            if (clarify.execute(utterance) or doubtful.execute(utterance)):
                # Apologize and read out the original til
                text = self.rg.state_manager.current_state.choose_least_repetitive(
                    original_til_templates(apologize=True,
                                           original_til=last_til))
                conditional_state = ConditionalState(
                    cur_doc_title=entity.name,
                    responding_treelet=self.__repr__(),
                    til_used=last_til)
                state.convpara_measurement[
                    'codepath'] = 'apologize_with_original_phrasing_for_unclear_paraphrase'
                response_result = ResponseGeneratorResult(
                    text=text,
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=entity,
                    conditional_state=conditional_state)

                logger.primary_info(
                    f'WIKI is responding with an apology and a non-paraphrased version of the previous TIL entity {entity}'
                )
                return response_result

            # If user sounds disinterested
            elif disinterested.execute(utterance):
                # Do not generate another TIL response
                logger.primary_info(
                    "ConvPara TIL detected explicitly disinterested user. Handing over with WEAK_CONTINUE"
                )
                apology_text = random.choice(HANDOVER_TEXTS)
                apology_response = ResponseGeneratorResult(
                    text=apology_text,
                    priority=ResponsePriority.WEAK_CONTINUE,
                    needs_prompt=True,
                    cur_entity=None,
                    state=state,
                    conditional_state=ConditionalState(
                        responding_treelet=self.__repr__(), ))
                apology_response.state.convpara_measurement[
                    'codepath'] = 'apology_handover_for_explicitly_disinterested'

                return apology_response

            elif (not last_utterance_was_did_you_know_question
                  ) and utterance == 'no':
                # Read out the original til without apologizing
                # Rationale is that users often say no because they are surprised, and we can read out the til verbatim
                # There's a chance we said it wrong the first time, but typically in that case clarify or doubtful catch it
                text = self.rg.state_manager.current_state.choose_least_repetitive(
                    original_til_templates(apologize=False,
                                           original_til=last_til))
                conditional_state = ConditionalState(
                    cur_doc_title=entity.name,
                    responding_treelet=self.__repr__(),
                    til_used=last_til)
                state.convpara_measurement[
                    'codepath'] = 'original_phrasing_for_no_to_nonquestion_paraphrase'
                response_result = ResponseGeneratorResult(
                    text=text,
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=entity,
                    conditional_state=conditional_state)

                logger.primary_info(
                    f'User said no to a non-question, WIKI is responding with a non-paraphrased version of the previous TIL entity {entity}'
                )
                return response_result

            elif last_utterance_was_did_you_know_question and unigram_overlap < 0.35 and len(
                    generations_for_last_til) <= 1:
                # If we asked a did you know style question in the last paraphrase and didn't talk about much of the til
                # measured using unigram overlap, try paraphrasing but only once

                logger.primary_info(
                    f'WIKI trying the previous TIL with paraphrasing for one more turn '
                    f'because we asked a did you know kind of question and we didn\'t cover enough content'
                )
                paraphrased_repeat_result = self.respond_til(
                    state,
                    entity,
                    preferences=ConvParaPreferences(
                        higher_unigram_recall=True,
                        statement_or_question='statement'),
                    til_text=last_til)
                paraphrased_repeat_result.priority = ResponsePriority.STRONG_CONTINUE
                paraphrased_repeat_result.state.convpara_measurement[
                    'codepath'] = 'paraphrase_for_answer_to_did_you_know'
                return paraphrased_repeat_result

            # User asks a question and all the content is not covered
            # note that just single word questions are covered as part of doubtful or clarification already
            #elif (is_question or contains_phrase(utterance, set(HIGH_PREC_QUESTION_WORD))) and len(generations_for_last_til)<=1:
            #    # We should cite the original TIL and say that's all I know about it
            #    text = self.rg.state_manager.current_state.choose_least_repetitive(deflect_questions_with_original_til_templates(original_til=last_til))
            #    conditional_state = ConditionalState(
            #        cur_doc_title=entity.name,
            #        responding_treelet=self.__repr__(),
            #        til_used=last_til)
            #    state.convpara_measurement['codepath'] = 'deflect_question_with_original_phrasing'
            #    response_result = ResponseGeneratorResult(text=text, priority=ResponsePriority.STRONG_CONTINUE,
            #                                              needs_prompt=True, state=state, cur_entity=entity,
            #                                              conditional_state=conditional_state)
            #    logger.primary_info(
            #        "ConvPara TIL deflected question with original phrasing. Handing over with WEAK_CONTINUE")
            #    return response_result

            # If user sounds interested
            elif interested.execute(utterance):
                # Generate response using paraphrasing
                base_response = self.respond_til(
                    state,
                    entity,
                    preferences=ConvParaPreferences(
                        higher_unigram_recall=True))
                base_response.cur_entity = entity
                base_response.priority = ResponsePriority.STRONG_CONTINUE
                base_response.state.convpara_measurement[
                    'codepath'] = 'new_convpara_TIL_for_interested_user'
                return base_response

        # Fixme: store user satisfaction for convpara til
        state.convpara_measurement['codepath'] = 'disinterested'
        raise CantRespondError(
            "User wasn't very interested, not using convpara til anymore")
Exemple #26
0
    def respond_til(self,
                    state: State,
                    entity: WikiEntity,
                    preferences: Optional[ConvParaPreferences] = None,
                    til_text=False) -> ResponseGeneratorResult:
        if entity.name in CONVPARA_BLACKLISTED_ENTITIES:
            raise CantRespondError(f"{entity} blacklisted for convpara")
        if not til_text:
            til_response = self.get_til(entity.name, state)
            if not til_response:
                raise CantRespondError("Not responding with more TILs")
            til_text, _, _ = til_response

        paraphrases = ConvPara(self.rg.state_manager).get_paraphrases(
            background=til_text, entity=entity.name)
        paraphrases = filter_and_log(lambda p: p.finished, paraphrases,
                                     "Paraphrases for TIL",
                                     "they were unfinished")
        paraphrases = filter_and_log(
            lambda p: not contains_offensive(p.readable_text()), paraphrases,
            "Paraphrases for TIL", "contained offensive phrase")
        if not paraphrases:
            raise CantRespondError(
                f"No good conv paraphrases for TIL \n {til_text}")
        if preferences:
            if preferences.statement_or_question:
                if preferences.statement_or_question == 'question':
                    paraphrases = sorted(
                        paraphrases,
                        key=lambda p: did_you_know.execute(p.readable_text()),
                        reverse=True)
                else:
                    paraphrases = sorted(paraphrases,
                                         key=lambda p: not did_you_know.
                                         execute(p.readable_text()),
                                         reverse=True)
            if preferences.higher_unigram_recall:
                generations_for_other_tils = state.entity_state[
                    entity.name].conv_paraphrases[
                        til_text] if til_text in state.entity_state[
                            entity.name].conv_paraphrases else []
                paraphrases = sorted(
                    paraphrases,
                    key=lambda p: self.ngram_recall([p.readable_text(
                    )] + generations_for_other_tils, til_text, 1),
                    reverse=True)
            text = paraphrases[0].readable_text()
        else:
            text = random.choice([p.readable_text() for p in paraphrases])
        if text[-1] not in ['.', '!', '?']:
            text += '.'

        logger.primary_info(
            f'WIKI is responding with a *paraphrased* TIL to entity {entity.name}'
        )
        logger.primary_info(f"TIL text: {til_text} \n ConvPara output: {text}")
        conditional_state = ConditionalState(
            cur_doc_title=entity.name,
            til_used=til_text,
            responding_treelet=self.__repr__(),
            prompt_handler=f"{self.__repr__()}:paraphrase_handler",
            paraphrase=(til_text, text))
        base_response_result = ResponseGeneratorResult(
            text=text,
            priority=ResponsePriority.CAN_START,
            cur_entity=entity,
            needs_prompt=False,
            state=state,
            conditional_state=conditional_state)
        return base_response_result
    def get_response(self, state : State) -> ResponseGeneratorResult:
        """This function defines the stages that we go through to generate the result. The procedure is

        1. First populate the "additional_features"
        2. Incorporate unconditional information such as user's likes and dislikes, phrases that were detected
        3. Advance the state to the next state depending on the user's utterance and additional features
        4. Define the action space for the policy
        5. Select an action using a policy
        6. Utterancify the action chosen using additional information like lists of reasons and alternatives
        7. Post process the state conditioned on the action
        
        :param state: the current state
        :type state: State
        :return: a result that can be used for chirpy
        :rtype: ResponseGeneratorResult
        """
        self.initialize_turn()
        neg_intent = self.state_manager.current_state.navigational_intent.neg_intent  # type: ignore
        if neg_intent and self.state_manager.last_state_active_rg == 'OPINION': # type: ignore
            return self.respond_neg_nav(state, None)
        utterance = self.state_manager.current_state.text
        additional_features = self.populate_features(state, utterance)
        high_prec = self.state_manager.current_state.entity_linker.high_prec # type: ignore
        # should_evaluate = len(state.action_history) > 4 and not state.evaluated \
        #     and not state.first_episode and state.cur_policy != '' and state.cur_policy != repr(OneTurnAgreePolicy())
        should_evaluate = False # Turning off evaluation question. 
        if self.state_manager.current_state.entity_linker.high_prec and state.cur_phrase != '': # type: ignore
            cur_entity_name = self.opinionable_phrases[state.cur_phrase].wiki_entity_name
            if (cur_entity_name is None and state.cur_phrase not in [linked_span.span for linked_span in high_prec]) \
                    or (cur_entity_name is not None and cur_entity_name not in [linked_span.top_ent.name for linked_span in high_prec]):
                # If the above condition passes, it means that the linked entity is not the currently opinionating phrase.
                if len(additional_features.detected_phrases) == 0:
                    # User no longer want to talk about an opinionable phrase
                    return self.respond_neg_nav(state, random.choice(self.state_manager.current_state.entity_linker.high_prec).top_ent) # type: ignore
        if state.last_turn_prompt or state.last_turn_select:
            priority = ResponsePriority.STRONG_CONTINUE
        elif len(high_prec) > 0 and \
                not any(linked_span.span in self.opinionable_phrases \
                        or linked_span.top_ent.name in self.opinionable_entities for linked_span in high_prec): # type: ignore
            self.logger.primary_info(f'Opinion realized that there is a high precision entity, will not CAN_START our conversation') # type: ignore
            priority = ResponsePriority.NO
        # if WhatsYourOpinion().execute(utterance) is not None:
        #     self.logger.primary_info(f"Opinion detected user is asking for our opinion, raising priority to FORCE_START") # type: ignore
        #     priority = ResponsePriority.FORCE_START
        else:
            priority = ResponsePriority.CAN_START
        if len(state.action_history) > 0 and state.action_history[-1].exit:
            self.logger.primary_info(f'Opinion detected our previous action is to exit and we were not selected, will reset the state before this turn starts') # type: ignore
            state = state.reset_state()
            priority = ResponsePriority.CAN_START # Drop the priority to CAN_START because we already ended a convo before
        # First need to incorporate the unconditional information learned from this turn
        state.detected_opinionated_phrases += additional_features.detected_phrases
        if len(additional_features.detected_phrases) > 0:
            # Here we only use regex since sentiment analysis may not always do well
            if utils.is_like(utterance)[0]:
                state.user_sentiment_history += tuple((phrase, 4) for phrase in additional_features.detected_phrases)
            elif utils.is_not_like(utterance)[0]:
                state.user_sentiment_history += tuple((phrase, 0) for phrase in additional_features.detected_phrases)
        additional_features.detected_phrases = tuple([phrase for phrase in additional_features.detected_phrases if phrase not in state.phrases_done])

        # Then need to advance the state using the utterance
        state_p = state_actions.next_state(state, utterance, additional_features)
        if state_p is None or state_p.cur_phrase is None:
            return emptyResult(state.reset_state())
        reasons_used = dict(state.reasons_used)
        phrase_reasons_used = set(reasons_used[state_p.cur_phrase]) if state_p.cur_phrase in reasons_used else []
        pos_reasons, neg_reasons = utils.get_reasons(state_p.cur_phrase)
        pos_reasons = [reason for reason in pos_reasons if reason not in phrase_reasons_used]
        neg_reasons = [reason for reason in neg_reasons if reason not in phrase_reasons_used]
        related_entities = [phrase.text for phrase in self.opinionable_phrases.values() \
            if phrase.category is not None and  phrase.category == self.opinionable_phrases[state_p.cur_phrase].category \
                and phrase.wiki_entity_name != self.opinionable_phrases[state_p.cur_phrase].wiki_entity_name]
        related_entities = [e for e in related_entities if e not in state_p.phrases_done]
    
        # Then need to define the action space
        action_space = self.get_action_space(state_p, pos_reasons, neg_reasons, related_entities)
        # Then need to select a policy if we don't have one
        ab_test_policy = self.state_manager.current_state.experiments.look_up_experiment_value('opinion_policy') # type: ignore
        if state_p.cur_policy != '':
            self.logger.primary_info(f'OPINION is using current policy {state_p.cur_policy} to respond to the user') # type: ignore
        elif ab_test_policy != 'random' or ab_test_policy == 'not_defined': 
            state_p.cur_policy = ab_test_policy 
            self.logger.primary_info(f'Opinion detected a/b test policy is {ab_test_policy}, will set current episode accordingly ') # type: ignore
        elif state_p.num_turns_since_long_policy < 20:
            policies, weights = zip(*self.short_policy_rates)
            state_p.cur_policy = random.choices(policies, weights, k=1)[0] # type: ignore
            self.logger.primary_info(f'Opinion had a long conversation {state_p.num_turns_since_long_policy} < 20 turns ago. Will use policy {state_p.cur_policy}') # type: ignore
        else:
            if state_p.last_policy in set([p for p, _ in self.disagree_policy_rates]):
                policies, weights = zip(*self.agree_policies_rates)
            elif state_p.last_policy in set([p for p, _ in self.agree_policies_rates]):
                policies, weights = zip(*self.agree_policies_rates)
            else:
                policies, weights = zip(*self.policy_rates)
            state_p.cur_policy = random.choices(policies, weights, k=1)[0] # type: ignore
            self.logger.primary_info(f'OPINION have no current policy, randomly picked {state_p.cur_policy} to respond to the user, resetting turn count') # type: ignore
        state_p.last_policy = state_p.cur_policy
        policy = self.policies[state_p.cur_policy] # type: ignore
        # Then need to get the action from a policy
        action = policy.get_action(state_p, action_space, additional_features)
        self.logger.primary_info(f'OPINION\'s strategy chose action {action}') # type: ignore
        action_space = self.get_action_space(state_p, pos_reasons, neg_reasons, related_entities) # Redefine action space for checks in case cur_phrase changed
        if action not in action_space:
            self.logger.error(f'OPINION policy {repr(policy)} generated an action {action} that is not in the action space {action_space}. Check policy implementation.')
            new_state = state.reset_state()
            return emptyResult(new_state)
        # Then need to utterancify the action
        text, phrase, reason = fancy_utterancify(state_p, action, pos_reasons, neg_reasons, 
                                                related_entities, should_evaluate, self.state_manager.current_state.choose_least_repetitive) # type: ignore
        # Then need to fill the rest of the fields of state_p (through mutation)
        state_p = state_actions.fill_state_on_action(state_p, action, text, phrase, additional_features, reason, 
            self.opinionable_phrases, self.opinionable_entities)

        state_p.last_turn_select = True
        state_p.last_turn_prompt = False
        user_sentiment_history_dict = dict(state.user_sentiment_history)
        wiki_entity = None
        if phrase != '' and phrase in user_sentiment_history_dict and user_sentiment_history_dict[phrase] > 2 \
                and self.opinionable_phrases[phrase].good_for_wiki:
            wiki_entity = get_entity_by_wiki_name(self.opinionable_phrases[phrase].wiki_entity_name)
        state.last_turn_prompt, state.last_turn_select = False, False
        needs_prompt = False
        if action.exit:
            if len(state_p.action_history) > 6:
                self.logger.primary_info(f"Opinion had a conversation of length {len(state_p.action_history)}, will reset long_policy count") # type: ignore
                state_p.num_turns_since_long_policy = 0
            if not should_evaluate:
                needs_prompt = True
            if len(state_p.action_history) < 4:
                self.logger.primary_info(f"Opinion only had 4 turns. Will WEAK_CONTINUE the conversation") # type: ignore
                priority = ResponsePriority.WEAK_CONTINUE
            state_p.first_episode = False
        return ResponseGeneratorResult(text, priority, needs_prompt, state, wiki_entity, conditional_state=state_p)
Exemple #28
0
    def get_response(self, state: State) -> ResponseGeneratorResult:
        """This method will return a response depending on which treelet we are in

        :param state: the current state
        :type state: State
        :return: the result
        :rtype: ResponseGeneratorResult
        """

        # Expected to STRONG_CONTINUE
        base_response = emptyResult(state)
        if self.state_manager.last_state_active_rg == 'WIKI':
            neg_intent = self.state_manager.current_state.navigational_intent.neg_intent  # bool
            if neg_intent:
                logger.primary_info(
                    'NavigationalIntent is negative, so doing a hard switch out of WIKI'
                )
                return ResponseGeneratorResult(
                    text=get_neural_fallback_handoff(
                        self.state_manager.current_state) or "Ok, no problem.",
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=None,
                    conditional_state=ConditionalState())

        tracked_entity = self.get_recommended_entity(state)
        prompted_options = state.prompted_options
        if self.state_manager.last_state_active_rg == 'WIKI':
            # Should have some idea of how to continue
            # Based on what is in the state, figure out which treelets are applicable for STRONG_CONTINUE
            # If there's a designated continuer, try that, but have a fallback plan
            # Refactor treelets to remove fallbacks from there and have them here centrally
            try:
                logger.info(
                    f"Wiki handing over to prompt handler {state.prompt_handler}.handle_prompt"
                )
                prompt_handler = state.prompt_handler.split(':')[0]
                base_response = self.all_treelets[
                    prompt_handler].handle_prompt(state)
            except CantRespondError:
                logger.info(
                    f"{state.prompt_handler}.handle_prompt return CantRespondError, will try other treelets next"
                )

        state.reset()

        if base_response.priority == ResponsePriority.NO:  # No response so far
            # CAN_START
            # Look for entities and how much they've been talked about
            # If it hasn't been talked about, have a sequence of treelets that can check for availability of information
            # try them in sequence
            try:
                base_response = IntroductoryTreelet(
                    self).get_can_start_response(state)
            except CantRespondError:
                try:
                    base_response = OpenQuestionTreelet(
                        self).get_can_start_response(state)
                except CantRespondError:
                    try:
                        if self.state_manager.current_state.experiments.look_up_experiment_value(
                                'convpara'):
                            try:
                                base_response = ConvParaTILTreelet(
                                    self).get_can_start_response(state)
                            except CantRespondError:
                                base_response = TILTreelet(
                                    self).get_can_start_response(state)
                        else:
                            base_response = TILTreelet(
                                self).get_can_start_response(state)
                    except CantRespondError:

                        try:
                            base_response = HandleSectionTreelet(
                                self).get_can_start_response(state)
                        except CantRespondError:
                            try:
                                base_response = IntroduceEntityTreelet(
                                    self).get_can_start_response(state)
                            except:
                                if tracked_entity:
                                    logger.primary_info(
                                        f"WIKI has exhausted all treelets for this entity. Handing over with a weak continue."
                                    )
                                    apology_text = self.state_manager.current_state.choose_least_repetitive(
                                        HANDOVER_TEXTS)
                                    apology_state = deepcopy(state)
                                    apology_state.entity_state[
                                        tracked_entity.
                                        name].finished_talking = True
                                    apology_response = ResponseGeneratorResult(
                                        text=apology_text,
                                        priority=ResponsePriority.
                                        WEAK_CONTINUE,
                                        needs_prompt=True,
                                        cur_entity=None,
                                        state=apology_state,
                                        conditional_state=ConditionalState())

                                    return apology_response

        #return base_response
        # If the base response needs a prompt and we have not finished talking about an entity, then get a section prompt
        if base_response.needs_prompt and tracked_entity and not base_response.state.entity_state[
                tracked_entity.name].finished_talking:
            try:
                # This is a stub for sake of completeness, intro treelet doesn't have ability to continue response
                return IntroductoryTreelet(self).continue_response(
                    base_response)
            except CantContinueResponseError:
                try:
                    return OpenQuestionTreelet(self).continue_response(
                        base_response)
                except CantContinueResponseError:
                    try:
                        if self.state_manager.current_state.experiments.look_up_experiment_value(
                                'convpara'):
                            try:
                                return ConvParaTILTreelet(
                                    self).continue_response(base_response)
                            except CantContinueResponseError:
                                return TILTreelet(self).continue_response(
                                    base_response)
                        else:
                            return TILTreelet(self).continue_response(
                                base_response)
                    except CantContinueResponseError:
                        try:
                            return HandleSectionTreelet(
                                self).continue_response(base_response)
                        except CantContinueResponseError:
                            pass
        return base_response
Exemple #29
0
    def handle_prompt(self, state: State) -> ResponseGeneratorResult:
        """This method will attempt to select a section to talk about given the user's utterance
        which we assume is neither yes or no

        """
        utterance = self.rg.state_manager.current_state.text.lower()
        entity = self.rg.get_recommended_entity(state)
        if not entity or entity.name not in state.entity_state:
            raise CantRespondError("Recommended entity has changed")
        sections = wiki_utils.get_wiki_sections(entity.name)
        # Check if there is high fuzzy overlap between prompted options and user utterance, if so, pick that section

        #Prepared apology response
        apology_text = self.rg.state_manager.current_state.choose_least_repetitive(
            APOLOGY_TEXTS)
        apology_state = deepcopy(state)
        apology_state.entity_state[entity.name].finished_talking = True
        apology_response = ResponseGeneratorResult(
            text=apology_text,
            priority=ResponsePriority.WEAK_CONTINUE,
            needs_prompt=True,
            state=apology_state,
            cur_entity=None,
            conditional_state=ConditionalState(
                responding_treelet=self.__repr__(), ))
        selected_section = None
        for option in state.prompted_options:
            if all(
                    any(
                        editdistance.eval(u_token, eu_token) < 2
                        for u_token in utterance.split(' '))
                    for eu_token in option.lower().split(' ')):
                logger.primary_info(
                    f'WIKI prompted {option} and successfully found it in user utterance'
                )
                # In case the prompted option was for 1st level section, but actually the second level section was suggested,
                # run the following code to get the right option.

                # While we expect the option selected from prompted_options to have been chosen,
                # In the case of an entity switching (entity_a -> entity_b -> entity_a),
                # the prompted options are from a entity_b but the suggested sections are from entity_a
                options = [
                    sec for sec in state.entity_state[
                        entity.name].suggested_sections if option in str(sec)
                ]
                if options:
                    option = options[0].title
                    break
        else:
            #Check if any section title directly matches (TODO: remove replicated code in any_section_title_matches)
            for section in sections:
                if all(
                        any(
                            editdistance.eval(u_token, eu_token) < 2
                            for u_token in utterance.split(' '))
                        for eu_token in section.title.lower().split(' ')):
                    option = section.title
                    logger.primary_info(
                        f'WIKI found successfully section title {option} in user utterance'
                    )
                    break
            else:
                pass
                #If we see many users talking about random things, we should use search_sections here

                # elif: yes in user utterance then pick the first section, but no section is specifically mentioned
                if self.is_yes(utterance):
                    if state.prompted_options:
                        option = state.prompted_options[0]
                        # In case the prompted option was for 1st level section, but actually the second level section was suggested,
                        # run the following code to get the right option
                        new_options = \
                            [sec for sec in state.entity_state[entity.name].suggested_sections if option in str(sec)]
                        if new_options:
                            new_option = new_options[0].title
                        else:
                            new_option = None
                        if new_option and new_option != option:
                            logger.primary_info(
                                f'WIKI detected user saying yes to section {option}, but the prompted section was actually {new_option}. Responding using that.'
                            )
                            option = new_option
                        else:
                            logger.primary_info(
                                f'WIKI detected user saying yes to section {option}, responding to that section'
                            )
                    else:
                        raise CantRespondError(
                            "User didn't reply to open prompt with anything specific to talk about"
                        )
                else:
                    option = None

        if option:
            selected_sections = [
                sec for sec in sections if sec.title == option
            ]
            if len(selected_sections) == 0:
                CantRespondError(
                    f"Selected option {option} doesn't correspond to a section title, but it should!\n"
                    f"Sections are {sections}")
            selected_section = selected_sections[0]
            # This should not throw an error because there should at least be one suggested section that matches the option
            section_summary = selected_section.summarize(self.rg.state_manager)
            if not section_summary:
                raise CantRespondError(
                    f"Receieved empty section summary for {selected_section}")
            if chirpy.core.offensive_classifier.offensive_classifier.contains_offensive(
                    section_summary):
                raise CantRespondError(
                    f"The section summary {section_summary} contains some offensivephrase"
                )
            if self.rg.has_overlap_with_history(section_summary,
                                                threshold=0.8):
                raise CantRespondError(
                    f"Section chosen using title overlap : {selected_section.title} has high overlap with a past utterance. "
                    f"Discarding it. ")
            else:
                conditional_state = ConditionalState(
                    cur_doc_title=entity.name,
                    discussed_section=selected_section,
                    responding_treelet=self.__repr__())
                return ResponseGeneratorResult(
                    text=section_summary,
                    priority=ResponsePriority.STRONG_CONTINUE,
                    needs_prompt=True,
                    state=state,
                    cur_entity=entity,
                    conditional_state=conditional_state)
        elif self.is_no(utterance):
            apology_response = ResponseGeneratorResult(
                text=self.rg.state_manager.current_state.
                choose_least_repetitive(HANDOVER_TEXTS),
                priority=ResponsePriority.WEAK_CONTINUE,
                needs_prompt=True,
                state=apology_state,
                cur_entity=None,
                conditional_state=ConditionalState(
                    responding_treelet=self.__repr__(), ))
            return apology_response
        else:
            logger.primary_info(
                f"Found no sections matching user utterance. Can't respond with sections"
            )
            return self.rg.all_treelets[
                'Open Question Treelet (WIKI)'].handle_prompt(state)
Exemple #30
0
    def get_response(self, state: State,
                     state_manager) -> ResponseGeneratorResult:
        """
        Handle the user's answer to a categories question on the previous turn.

        If the cur_entity in the entity tracker is still the same category we set at the end of last turn, give a vague
        acknowledgement and ask a followup question. Otherwise, say nothing.
        """

        entity_tracker_state = state_manager.current_state.entity_tracker  # EntityTrackerState
        prev_turn_entity = entity_tracker_state.history[-2][
            'response']  # the cur_entity at the end of the previous turn e.g. 'Food' or 'Art'
        if 'prompt' in entity_tracker_state.history[-2]:
            prev_turn_entity = entity_tracker_state.history[-2]['prompt']

        # If the cur_entity has changed from the end of the last turn (e.g. because the user indicated they don't want
        # to talk about this category, or because the user named an entity which is now the cur_entity), say nothing.
        # Additionally, if we have just used prompt from another RG (state.just_asked == True), return an empty prompt so that RG can take over.
        if entity_tracker_state.cur_entity != prev_turn_entity or state.just_asked:
            logger.primary_info(
                f'cur_entity changed from previous turn, so not asking a second question'
            )
            return emptyResult(state)

        category_name = state.cur_category_name

        # # Otherwise, the cur_entity is still the category entity we set at the end of the previous turn e.g. 'Food'
        # # In this case, get another unasked question for the category and ask it with WEAK_CONTINUE
        # priority = ResponsePriority.WEAK_CONTINUE

        # question = state.get_first_category_response(category_name, state_manager)  # CategoryQuestion or None
        # if question:
        #     logger.primary_info(f'cur_entity {entity_tracker_state.cur_entity} is still the category entity we set at '
        #                         f'the end of the last turn, so asking a followup question on {category_name}')
        #     question_str = None
        #     if question.statement is None:
        #         question_str = question.question
        #     elif question.question is None:
        #         question_str = question.statement
        #     else:
        #         question_str = ' '.join((question.statement, question.question))
        #     response = choice(ACKNOWLEDGE_AND_ASK_SECOND_QUESTION).format(
        #         CATEGORYNAME2CLASS[category_name].activation_phrases[0], question_str)  # this is a hack to get a more natural-sounding name for the category
        #     cur_entity = get_entity_by_wiki_name(question.cur_entity_wiki_name, state_manager.current_state)
        #     conditional_state = ConditionalState(HandleAnswerTreelet.__name__, category_name, question.statement, question.question)
        #     return ResponseGeneratorResult(text=response, priority=priority, needs_prompt=False,
        #                                 state=state, cur_entity=cur_entity, expected_type=question.expected_type,
        #                                 conditional_state=conditional_state)

        # else:
        #     logger.primary_info(f'No unasked questions left for category "{category_name}", so returning empty result')
        #     return emptyResult(state)

        # If the entity does not change, i.e. Entity Linker may not have been triggered,
        # and CATEGORY RG has not responded, we want to use Regex / GPT2 to generate a good response, then ask for prompt from another RG.

        text = "Thanks for answering my questions!"  # Default response that will be overwritten!
        cur_entity = prev_turn_entity
        conditional_state = ConditionalState(HandleAnswerTreelet.__name__,
                                             category_name, None, None, True)

        about_alexa = ""
        if WhatAboutYouTemplate().execute(
                state_manager.current_state.text) is not None:
            if "What TV show are you watching right now?" in state_manager.current_state.history[
                    -1]:
                about_alexa = "I watched the office again. I've re-watched it so many times!"
            elif "What did you eat for dinner last night?" in state_manager.current_state.history[
                    -1]:
                about_alexa = "I had some delicious spaghetti."
            else:
                about_alexa = state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_WHAT_ABOUT_YOU)

        if DontKnowTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_DONT_KNOW), about_alexa))
            cur_entity = None
        elif BackChannelingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_BACK_CHANNELING), about_alexa))
            cur_entity = prev_turn_entity
        elif EverythingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_EVERYTHING_ANS), about_alexa))
            cur_entity = prev_turn_entity
        elif NotThingTemplate().execute(
                state_manager.current_state.text) is not None:
            text = " ".join(
                (state_manager.current_state.choose_least_repetitive(
                    RESPONSE_TO_NOTHING_ANS), about_alexa))
            cur_entity = None

        else:
            if not hasattr(state_manager.current_state, 'gpt2ed'):
                logger.primary_info(f"CATEGORIES RG is running gpt2ed")
                default_gpt2ed_output = GPT2ED(state_manager).execute()
                setattr(state_manager.current_state, 'gpt2ed',
                        default_gpt2ed_output)

            text = get_random_fallback_neural_response(
                state_manager.current_state)
            if text is None:
                return emptyResult(state)
            cur_entity = prev_turn_entity

        return ResponseGeneratorResult(
            text=text,
            priority=ResponsePriority.STRONG_CONTINUE,
            needs_prompt=True,
            state=state,
            cur_entity=cur_entity,
            expected_type=None,
            conditional_state=conditional_state)