コード例 #1
0
def if_choose_topic(annotated_uttr, prev_annotated_uttr=None):
    """Dialog context implies that the next utterances can pick up a topic:
    - annotated_uttr asks to switch topic
    - annotated_uttr asks "what do you want to talk about?"
    - annotated_uttr asks "let's talk about something (else)"
    - prev_annotated_uttr asks "what do you want to talk about?", and annotated_uttr says something/anything.
    """
    prev_annotated_uttr = {} if prev_annotated_uttr is None else prev_annotated_uttr
    uttr_ = annotated_uttr.get("text", "").lower()
    prev_uttr_ = prev_annotated_uttr.get("text", "--").lower()
    chat_about_intent = "lets_chat_about" in get_intents(
        annotated_uttr, probs=False, which="intent_catcher")
    user_asks_what_to_talk_about = re.search(COMPILE_WHAT_TO_TALK_ABOUT, uttr_)
    # user ask to "talk about something"
    smth1 = re.search(COMPILE_LETS_TALK_ABOUT_SOMETHING,
                      uttr_) or (chat_about_intent
                                 and re.search(COMPILE_SOMETHING, uttr_))
    # bot asks "what user wants to talk about", and user answers "something"
    prev_chat_about_intent = "lets_chat_about" in get_intents(
        prev_annotated_uttr, probs=False, which="intent_catcher")
    prev_uttr_asks_what_topic = prev_chat_about_intent or re.search(
        COMPILE_WHAT_TO_TALK_ABOUT, prev_uttr_)
    smth2 = prev_uttr_asks_what_topic and re.search(COMPILE_SOMETHING, uttr_)

    switch_topic = is_switch_topic(annotated_uttr)
    if switch_topic or user_asks_what_to_talk_about or (smth1 or smth2):
        return True
    return False
コード例 #2
0
def if_not_want_to_chat_about_particular_topic(annotated_uttr,
                                               prev_annotated_uttr={}):
    uttr_ = annotated_uttr.get("text", "")
    if re.search(COMPILE_NOT_WANT_TO_TALK_ABOUT_IT, uttr_):
        return True

    # prev uttr is what do you want to talk about?
    prev_chat_about_intent = "lets_chat_about" in get_intents(
        prev_annotated_uttr, probs=False, which="intent_catcher")
    prev_what_to_chat_about = prev_chat_about_intent or if_utterance_requests_topic(
        prev_annotated_uttr)
    if prev_what_to_chat_about and is_no(annotated_uttr):
        # previously offered to chat about topic, user declines
        return True
    elif prev_what_to_chat_about and is_switch_topic(annotated_uttr):
        # previously offered to chat about topic, user asks to switch topic
        return True
    elif prev_what_to_chat_about and SOMETHING_ELSE.search(uttr_):
        # previously offered to chat about topic, user asks to something else
        return True

    # current uttr is lets talk about something else / other than
    chat_about_intent = "lets_chat_about" in get_intents(
        annotated_uttr, probs=False, which="intent_catcher")
    chat_about = chat_about_intent or if_lets_chat_about_topic(uttr_)
    if chat_about and SOMETHING_ELSE.search(uttr_):
        return True
    return False
コード例 #3
0
ファイル: weather.py プロジェクト: deepmipt/deepy
def if_special_weather_turn_on(user_utt, prev_bot_utt):
    if ("weather_forecast_intent" in get_intents(
            user_utt, probs=False, which="all")
            or is_weather_for_homeland_requested(prev_bot_utt, user_utt)
            or is_weather_without_city_requested(prev_bot_utt, user_utt)):
        return True
    return False
コード例 #4
0
ファイル: condition.py プロジェクト: deepmipt/deepy
def is_no_human_abandon(vars):
    """Is dialog breakdown in human utterance or no. Uses MIDAS hold/abandon classes."""
    midas_classes = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="midas")
    if "abandon" not in midas_classes:
        return True
    return False
コード例 #5
0
ファイル: condition.py プロジェクト: deepmipt/deepy
def no_requests(vars):
    """Function to determine if
    - user didn't asked to switch topic,
    - user didn't ask to talk about something particular,
    - user didn't requested high priority intents (like what_is_your_name)
    - user didn't requested any special intents
    - user didn't ask questions
    """
    contain_no_special_requests = no_special_switch_off_requests(vars)

    request_intents = [
        "opinion_request",
        "topic_switching",
        "lets_chat_about",
        "what_are_you_talking_about",
        "Information_RequestIntent",
        "Topic_SwitchIntent",
        "Opinion_RequestIntent",
    ]
    intents = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="all")
    is_not_request_intent = all(
        [intent not in request_intents for intent in intents])
    is_no_question = "?" not in state_utils.get_last_human_utterance(
        vars)["text"]

    if contain_no_special_requests and is_not_request_intent and is_no_question:
        return True
    return False
コード例 #6
0
def get_current_intents(last_human_utterances):
    curr_intents = get_intents(last_human_utterances, probs=False, which="all")
    return list(
        set([
            get_midas_analogue_intent_for_any_intent(intent)
            for intent in curr_intents
            if get_midas_analogue_intent_for_any_intent(intent) is not None
        ]))
コード例 #7
0
def is_midas_negative_answer(vars):
    midas_classes = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="midas")

    intent_detected = any(
        [intent in midas_classes for intent in ["neg_answer"]])

    return intent_detected
コード例 #8
0
ファイル: condition.py プロジェクト: deepmipt/deepy
def is_side_or_stop(ctx: Context, actor: Actor) -> bool:
    """
    Check for side intents (including exit)
    """
    intents = set(get_intents(int_ctx.get_last_human_utterance(ctx, actor), which="intent_catcher", probs=False))
    side_intent_present = len(intents.intersection(SIDE_INTENTS)) > 0
    logger.debug("Side intent detected, exiting")
    return side_intent_present
コード例 #9
0
def is_no_human_abandon(ctx: Context, actor: Actor) -> bool:
    """Is dialog breakdown in human utterance or no. Uses MIDAS hold/abandon classes."""
    midas_classes = common_utils.get_intents(int_ctx.get_last_human_utterance(
        ctx, actor),
                                             which="midas")
    if "abandon" not in midas_classes:
        return True
    return False
コード例 #10
0
ファイル: tag_based_selection.py プロジェクト: deepmipt/deepy
def get_main_info_annotations(annotated_utterance):
    intents = get_intents(annotated_utterance, which="all")
    topics = get_topics(annotated_utterance, which="all")
    named_entities = get_entities(annotated_utterance,
                                  only_named=True,
                                  with_labels=False)
    nounphrases = get_entities(annotated_utterance,
                               only_named=False,
                               with_labels=False)
    return intents, topics, named_entities, nounphrases
コード例 #11
0
def is_side_or_stop(ctx: Context, actor: Actor) -> bool:
    """
    Check for side intents (including exit)
    """
    last_request = ctx.misc.get("agent",
                                {}).get("dialog",
                                        {}).get("human_utterances", [{}])[-1]
    intents = set(
        get_intents(last_request, which="intent_catcher", probs=False))
    side_intent_present = len(intents.intersection(SIDE_INTENTS)) > 0
    logger.debug("Side intent detected, exiting")
    return side_intent_present
コード例 #12
0
def intent_catcher_exists_condition(ctx: Context, actor: Actor, *args,
                                    **kwargs) -> bool:
    if ctx.validation:
        return False

    intents_by_catcher = common_utils.get_intents(
        int_ctx.get_last_human_utterance(ctx, actor),
        probs=False,
        which="intent_catcher",
    )

    response_funcs = get_respond_funcs()
    return bool(
        any([intent in response_funcs for intent in intents_by_catcher]))
コード例 #13
0
ファイル: sensitive.py プロジェクト: deepmipt/deepy
def is_sensitive_topic_and_request(annotated_uttr):
    cobot_dialogact_topics = set(
        get_topics(annotated_uttr, which="cobot_dialogact_topics"))
    cobot_topics = set(get_topics(annotated_uttr, which="cobot_topics"))
    sensitive_topics_detected = any(
        [t in sensitive_topics for t in cobot_topics]) or any(
            [t in sensitive_dialogact_topics for t in cobot_dialogact_topics])

    all_intents = get_intents(annotated_uttr, probs=False, which="all")
    sensitive_dialogacts_detected = any(
        [t in sensitive_all_intents for t in all_intents])

    if sensitive_topics_detected and sensitive_dialogacts_detected:
        return True
    return False
コード例 #14
0
def collect_topics_entities_intents(prev_human_utterance):
    if len(prev_human_utterance) > 1:
        intent_list = get_intents(prev_human_utterance,
                                  which="cobot_dialogact_intents")
        da_topic_list = get_topics(prev_human_utterance,
                                   which="cobot_dialogact_topics")
        cobot_topic_list = get_topics(prev_human_utterance,
                                      which="cobot_topics")

        intent_list = list(set(intent_list))
        da_topic_list = list(set(da_topic_list))
        cobot_topic_list = list(set(cobot_topic_list))
    else:
        intent_list, da_topic_list, cobot_topic_list = [], [], []

    return intent_list, da_topic_list, cobot_topic_list
コード例 #15
0
def get_intents_flags(utt):
    special_intents = [
        "cant_do",
        "repeat",
        "weather_forecast_intent",
        "what_are_you_talking_about",
        "what_can_you_do",
        "what_is_your_job",
        "what_is_your_name",
        "what_time",
        "where_are_you_from",
        "who_made_you",
    ]
    detected_intents = get_intents(utt, which="intent_catcher")
    lets_chat_about_flag = if_chat_about_particular_topic(utt)
    special_intents_flag = any(
        [si in detected_intents for si in special_intents])
    return lets_chat_about_flag, special_intents_flag
コード例 #16
0
ファイル: condition.py プロジェクト: deepmipt/deepy
def no_special_switch_off_requests(vars):
    """Function to determine if
    - user didn't asked to switch topic,
    - user didn't ask to talk about something particular,
    - user didn't requested high priority intents (like what_is_your_name)
    """
    intents_by_catcher = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars),
        probs=False,
        which="intent_catcher")
    is_high_priority_intent = any([
        intent not in common_utils.service_intents
        for intent in intents_by_catcher
    ])
    is_switch = is_switch_topic(vars)
    is_lets_chat = is_lets_chat_about_topic_human_initiative(vars)

    if not (is_high_priority_intent or is_switch or is_lets_chat):
        return True
    return False
コード例 #17
0
ファイル: tag_based_selection.py プロジェクト: deepmipt/deepy
def does_not_require_prompt(candidates, best_cand_id):
    _is_already_prompt = "prompt" in candidates[best_cand_id].get(
        "response_parts", [])
    _is_question = "?" in candidates[best_cand_id]["text"]
    _is_very_long = len(candidates[best_cand_id]["text"]) > 200

    _best_cand_intents = get_intents(candidates[best_cand_id], which="all")
    _is_request = any([
        intent in _best_cand_intents
        for intent in REQUIRE_ACTION_INTENTS.keys()
    ])
    _is_not_add_prompt_skill = candidates[best_cand_id][
        "skill_name"] in NOT_ADD_PROMPT_SKILLS

    _is_any_question = is_any_question_sentence_in_utterance(
        candidates[best_cand_id])
    _can_continue = candidates[best_cand_id].get(
        "can_continue", CAN_NOT_CONTINUE) != CAN_NOT_CONTINUE
    if (_is_already_prompt or _is_question or _is_very_long or _is_request
            or _is_not_add_prompt_skill or _is_any_question or _can_continue):
        return True
    return False
コード例 #18
0
def last_n_human_utt_dialog_formatter(
        dialog: Dict,
        last_n_utts: int,
        only_last_sentence: bool = False) -> List:
    """
    Args:
        dialog (Dict): full dialog state
        last_n_utts (int): how many last user utterances to take
        only_last_sentence (bool, optional): take only last sentence in each utterance. Defaults to False.
    """
    dialog = deepcopy(dialog)
    if len(dialog["human_utterances"]
           ) <= last_n_utts and not if_chat_about_particular_topic(
               dialog["human_utterances"][0]):
        # in all cases when not particular topic, convert first phrase in the dialog to `hello!`
        if "sentseg" in dialog["human_utterances"][0].get("annotations", {}):
            dialog["human_utterances"][0]["annotations"]["sentseg"][
                "punct_sent"] = "hello!"
            dialog["human_utterances"][0]["annotations"]["sentseg"][
                "segments"] = ["hello"]
        else:
            dialog["human_utterances"][0]["text"] = "hello"

    human_utts = []
    detected_intents = []
    for utt in dialog["human_utterances"][-last_n_utts:]:
        if "sentseg" in utt.get("annotations", {}):
            sentseg_ann = utt["annotations"]["sentseg"]
            if only_last_sentence:
                text = sentseg_ann["segments"][-1] if len(
                    sentseg_ann["segments"]) > 0 else ""
            else:
                text = sentseg_ann["punct_sent"]
        else:
            text = utt["text"]
        human_utts += [text]
        detected_intents += [get_intents(utt, which="all")]
    return [{"sentences_batch": [human_utts], "intents": [detected_intents]}]
コード例 #19
0
def is_cobot_opinion_expressed(vars):
    intents = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="all")
    opinion_expression_detected = "Opinion_ExpressionIntent" in intents
    return bool(opinion_expression_detected)
コード例 #20
0
def tell_me_more(annotated_uttr):
    intents = get_intents(annotated_uttr, which="intent_catcher", probs=False)
    cond1 = "tell_me_more" in intents
    cond2 = re.search(more_details_pattern, annotated_uttr["text"])
    return cond1 or cond2
コード例 #21
0
def if_chat_about_particular_topic(annotated_uttr,
                                   prev_annotated_uttr=None,
                                   key_words=None,
                                   compiled_pattern=r""):
    """Dialog context implies that the last utterances chooses particular conversational topic:
    - annotated_uttr asks "let's talk about PARTICULAR-TOPIC"
    - prev_annotated_uttr asks "what do you want to talk about?", and annotated_uttr says PARTICULAR-TOPIC.
    - prev_annotated_uttr asks "what are your interests?", and annotated_uttr says PARTICULAR-TOPIC.
    """
    prev_annotated_uttr = {} if prev_annotated_uttr is None else prev_annotated_uttr
    key_words = [] if key_words is None else key_words
    uttr_ = annotated_uttr.get("text", "").lower()
    prev_uttr_ = prev_annotated_uttr.get("text", "").lower()

    # current uttr is lets talk about blabla
    chat_about_intent = "lets_chat_about" in get_intents(
        annotated_uttr, probs=False, which="intent_catcher")
    chat_about = chat_about_intent or if_lets_chat_about_topic(uttr_)

    # prev uttr is what do you want to talk about?
    prev_chat_about_intent = "lets_chat_about" in get_intents(
        prev_annotated_uttr, probs=False, which="intent_catcher")
    prev_what_to_chat_about = prev_chat_about_intent or if_utterance_requests_topic(
        prev_annotated_uttr)

    not_want = if_not_want_to_chat_about_particular_topic(
        annotated_uttr, prev_annotated_uttr)
    if not_want:
        return False
    elif prev_what_to_chat_about or chat_about:
        if key_words:
            trigger_pattern = re.compile(
                rf"{join_word_beginnings_in_or_pattern(key_words)}[a-zA-Z0-9,\-\' ]+\?",
                re.IGNORECASE)
            offered_this_topic = trigger_pattern.search(prev_uttr_)
            user_agrees_or_any = ANY_TOPIC_AMONG_OFFERED.search(
                uttr_) or is_yes(annotated_uttr)
            if any([word in uttr_ for word in key_words
                    ]) or (offered_this_topic and user_agrees_or_any):
                return True
            else:
                return False
        elif compiled_pattern:
            if isinstance(compiled_pattern, str):
                offered_this_topic = re.search(
                    rf"{compiled_pattern}[a-zA-Z0-9,\-\' ]+\?", prev_uttr_,
                    re.IGNORECASE)
            else:
                offered_this_topic = re.search(
                    rf"{compiled_pattern.pattern}[a-zA-Z0-9,\-\' ]+\?",
                    prev_uttr_, re.IGNORECASE)
            user_agrees_or_any = ANY_TOPIC_AMONG_OFFERED.search(
                uttr_) or is_yes(annotated_uttr)
            if re.search(compiled_pattern, uttr_) or (offered_this_topic
                                                      and user_agrees_or_any):
                return True
            else:
                return False
        else:
            return True
    return False
コード例 #22
0
ファイル: connector.py プロジェクト: deepmipt/deepy
    async def send(self, payload: Dict, callback: Callable):
        try:
            st_time = time.time()
            dialog = deepcopy(payload["payload"]["dialogs"][0])
            is_sensitive_case = is_sensitive_situation(dialog["human_utterances"][-1])
            all_prev_active_skills = payload["payload"]["all_prev_active_skills"][0]

            curr_topics = get_topics(dialog["human_utterances"][-1], which="cobot_topics")
            curr_nounphrases = get_entities(dialog["human_utterances"][-1], only_named=False, with_labels=False)

            if len(curr_topics) == 0:
                curr_topics = ["Phatic"]
            logger.info(f"Found topics: {curr_topics}")
            for i in range(len(curr_nounphrases)):
                np = re.sub(np_remove_expr, "", curr_nounphrases[i])
                np = re.sub(rm_spaces_expr, " ", np)
                if re.search(np_ignore_expr, np):
                    curr_nounphrases[i] = ""
                else:
                    curr_nounphrases[i] = np.strip()

            curr_nounphrases = [np for np in curr_nounphrases if len(np) > 0]

            logger.info(f"Found nounphrases: {curr_nounphrases}")

            cands = []
            confs = []
            human_attrs = []
            bot_attrs = []
            attrs = []

            cands += [choice(donotknow_answers)]
            confs += [0.5]
            attrs += [{"type": "dummy"}]
            human_attrs += [{}]
            bot_attrs += [{}]

            if len(dialog["utterances"]) > 14 and not is_sensitive_case:
                questions_same_nps = []
                for i, nphrase in enumerate(curr_nounphrases):
                    for q_id in NP_QUESTIONS.get(nphrase, []):
                        questions_same_nps += [QUESTIONS_MAP[str(q_id)]]

                if len(questions_same_nps) > 0:
                    logger.info("Found special nounphrases for questions. Return question with the same nounphrase.")
                    cands += [choice(questions_same_nps)]
                    confs += [0.5]
                    attrs += [{"type": "nounphrase_question"}]
                    human_attrs += [{}]
                    bot_attrs += [{}]

            link_to_question, human_attr = get_link_to_question(dialog, all_prev_active_skills)
            if link_to_question:
                _prev_bot_uttr = dialog["bot_utterances"][-2]["text"] if len(dialog["bot_utterances"]) > 1 else ""
                _bot_uttr = dialog["bot_utterances"][-1]["text"] if len(dialog["bot_utterances"]) > 0 else ""
                _prev_active_skill = (
                    dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else ""
                )

                _no_to_first_linkto = any([phrase in _bot_uttr for phrase in LINK_TO_PHRASES])
                _no_to_first_linkto = _no_to_first_linkto and all(
                    [phrase not in _prev_bot_uttr for phrase in LINK_TO_PHRASES]
                )
                _no_to_first_linkto = _no_to_first_linkto and is_no(dialog["human_utterances"][-1])
                _no_to_first_linkto = _no_to_first_linkto and _prev_active_skill != "dff_friendship_skill"

                _if_switch_topic = is_switch_topic(dialog["human_utterances"][-1])
                bot_uttr_dict = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {}
                _if_choose_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr_dict)
                _is_ask_me_something = ASK_ME_QUESTION_PATTERN.search(dialog["human_utterances"][-1]["text"])

                if len(dialog["human_utterances"]) > 1:
                    _was_cant_do = "cant_do" in get_intents(dialog["human_utterances"][-2]) and (
                        len(curr_nounphrases) == 0 or is_yes(dialog["human_utterances"][-1])
                    )
                    _was_cant_do_stop_it = "cant_do" in get_intents(dialog["human_utterances"][-2]) and is_no(
                        dialog["human_utterances"][-1]
                    )
                else:
                    _was_cant_do = False
                    _was_cant_do_stop_it = False

                if _was_cant_do_stop_it:
                    link_to_question = "Sorry, bye! #+#exit"
                    confs += [1.0]  # finish dialog request
                elif _no_to_first_linkto:
                    confs += [0.99]
                elif _is_ask_me_something or _if_switch_topic or _was_cant_do or _if_choose_topic:
                    confs += [1.0]  # Use it only as response selector retrieve skill output modifier
                else:
                    confs += [0.05]  # Use it only as response selector retrieve skill output modifier
                cands += [link_to_question]
                attrs += [{"type": "link_to_for_response_selector"}]
                human_attrs += [human_attr]
                bot_attrs += [{}]

            facts_same_nps = []
            for i, nphrase in enumerate(curr_nounphrases):
                for fact_id in NP_FACTS.get(nphrase, []):
                    facts_same_nps += [
                        f"Well, now that you've mentioned {nphrase}, I've remembered this. {FACTS_MAP[str(fact_id)]}. "
                        f"{(opinion_request_question() if random.random() < ASK_QUESTION_PROB else '')}"
                    ]

            if len(facts_same_nps) > 0 and not is_sensitive_case:
                logger.info("Found special nounphrases for facts. Return fact with the same nounphrase.")
                cands += [choice(facts_same_nps)]
                confs += [0.5]
                attrs += [{"type": "nounphrase_fact"}]
                human_attrs += [{}]
                bot_attrs += [{}]

            total_time = time.time() - st_time
            logger.info(f"dummy_skill exec time: {total_time:.3f}s")
            asyncio.create_task(
                callback(task_id=payload["task_id"], response=[cands, confs, human_attrs, bot_attrs, attrs])
            )
        except Exception as e:
            logger.exception(e)
            sentry_sdk.capture_exception(e)
            asyncio.create_task(callback(task_id=payload["task_id"], response=e))
コード例 #23
0
def is_midas_opinion_expression(vars):
    midas_classes = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="midas")
    intent_detected = any([intent in midas_classes for intent in ["opinion"]])

    return intent_detected
コード例 #24
0
ファイル: condition.py プロジェクト: deepmipt/deepy
def forecast_intent_condition(ctx: Context, actor: Actor, *args,
                              **kwargs) -> bool:
    human_utter = get_last_human_utterance(ctx, actor)
    is_forecast_intent = "weather_forecast_intent" in get_intents(
        human_utter, probs=False, which="intent_catcher")
    return bool(is_forecast_intent)
コード例 #25
0
ファイル: connector.py プロジェクト: deepmipt/deepy
    async def send(self, payload: Dict, callback: Callable):
        st_time = time.time()
        try:
            dialog = payload["payload"]["states_batch"][0]

            skills_for_uttr = []
            user_uttr = dialog["human_utterances"][-1]
            user_uttr_text = user_uttr["text"].lower()
            user_uttr_annotations = user_uttr["annotations"]
            bot_uttr = dialog["bot_utterances"][-1] if len(
                dialog["bot_utterances"]) else {}
            bot_uttr_text_lower = bot_uttr.get("text", "").lower()
            prev_active_skill = bot_uttr.get("active_skill", "")

            intent_catcher_intents = get_intents(user_uttr,
                                                 probs=False,
                                                 which="intent_catcher")
            high_priority_intent_detected = any([
                k for k in intent_catcher_intents
                if k in high_priority_intents["dff_intent_responder_skill"]
            ])
            low_priority_intent_detected = any([
                k for k in intent_catcher_intents if k in low_priority_intents
            ])

            detected_topics = set(get_topics(user_uttr, which="all"))

            is_factoid = get_factoid(user_uttr).get("is_factoid", 0.0) > 0.96
            is_celebrity_mentioned = check_is_celebrity_mentioned(user_uttr)

            if_choose_topic_detected = if_choose_topic(user_uttr, bot_uttr)
            if_lets_chat_about_particular_topic_detected = if_chat_about_particular_topic(
                user_uttr, bot_uttr)

            dialog_len = len(dialog["human_utterances"])

            exit_cond = "exit" in intent_catcher_intents and (
                dialog_len == 1 or
                (dialog_len == 2 and len(user_uttr_text.split()) > 3))
            repeat_cond = ("repeat" in intent_catcher_intents
                           and prev_active_skill in UNPREDICTABLE_SKILLS
                           and re.match(r"^what.?$", user_uttr_text))
            cant_do_cond = ("cant_do" in intent_catcher_intents
                            and "play" in user_uttr_text and any([
                                phrase in bot_uttr_text_lower
                                for phrase in GREETING_QUESTIONS_TEXTS
                            ]))
            for intent_name, condition in zip(
                ["exit", "repeat", "cant_do"],
                [exit_cond, repeat_cond, cant_do_cond]):
                if condition:
                    high_priority_intent_detected = False
                    not_detected = {"detected": 0, "confidence": 0.0}
                    user_uttr["annotations"]["intent_catcher"][
                        intent_name] = not_detected
                    dialog["utterances"][-1]["annotations"]["intent_catcher"][
                        intent_name] = not_detected

            if "/new_persona" in user_uttr_text:
                # process /new_persona command
                skills_for_uttr.append(
                    "personality_catcher"
                )  # TODO: rm crutch of personality_catcher
            elif user_uttr_text == "/get_dialog_id":
                skills_for_uttr.append("dummy_skill")
            elif high_priority_intent_detected:
                # process intent with corresponding IntentResponder
                skills_for_uttr.append("dff_intent_responder_skill")
            elif is_sensitive_topic_and_request(user_uttr):
                # process user utterance with sensitive content, "safe mode"

                # adding open-domain skills without opinion expression
                skills_for_uttr.append("dff_program_y_dangerous_skill")
                skills_for_uttr.append("meta_script_skill")
                skills_for_uttr.append("personal_info_skill")
                skills_for_uttr.append("factoid_qa")
                skills_for_uttr.append("dff_grounding_skill")
                skills_for_uttr.append("dummy_skill")
                skills_for_uttr.append("small_talk_skill")

                if if_lets_chat_about_particular_topic_detected:
                    skills_for_uttr.append("news_api_skill")
                if if_special_weather_turn_on(user_uttr, bot_uttr):
                    skills_for_uttr.append("dff_weather_skill")
                if is_celebrity_mentioned:
                    skills_for_uttr.append("dff_gossip_skill")

                # adding closed-domain skills
                skills_for_uttr += turn_on_skills(
                    detected_topics,
                    intent_catcher_intents,
                    user_uttr_text,
                    bot_uttr.get("text", ""),
                    available_skills=[
                        "news_api_skill",
                        "dff_coronavirus_skill",
                        "dff_funfact_skill",
                        "dff_weather_skill",
                        "dff_short_story_skill",
                    ],
                )
                # adding linked-to skills
                skills_for_uttr.extend(get_linked_to_skills(dialog))
                skills_for_uttr.extend(get_previously_active_skill(dialog))
            else:
                # general case
                if low_priority_intent_detected:
                    skills_for_uttr.append("dff_intent_responder_skill")
                # adding open-domain skills
                skills_for_uttr.append("dff_grounding_skill")
                skills_for_uttr.append("dff_program_y_skill")
                skills_for_uttr.append("personal_info_skill")
                skills_for_uttr.append("meta_script_skill")
                skills_for_uttr.append("dummy_skill")
                skills_for_uttr.append("dialogpt")  # generative skill
                skills_for_uttr.append("small_talk_skill")
                skills_for_uttr.append("knowledge_grounding_skill")
                skills_for_uttr.append("convert_reddit")
                skills_for_uttr.append("comet_dialog_skill")
                skills_for_uttr.append("dff_program_y_wide_skill")

                # adding friendship only in the beginning of the dialog
                if len(dialog["utterances"]) < 20:
                    skills_for_uttr.append("dff_friendship_skill")

                if if_choose_topic_detected or if_lets_chat_about_particular_topic_detected:
                    skills_for_uttr.append("knowledge_grounding_skill")
                    skills_for_uttr.append("news_api_skill")

                switch_wiki_skill, _ = if_switch_wiki_skill(
                    user_uttr, bot_uttr)
                if switch_wiki_skill or switch_wiki_skill_on_news(
                        user_uttr, bot_uttr):
                    skills_for_uttr.append("dff_wiki_skill")
                if if_switch_test_skill(user_uttr, bot_uttr):
                    skills_for_uttr.append("dff_art_skill")

                # adding factoidQA Skill if user utterance is factoid question
                if is_factoid:
                    skills_for_uttr.append("factoid_qa")

                if "dummy_skill" in prev_active_skill and len(
                        dialog["utterances"]) > 4:
                    skills_for_uttr.append("dummy_skill_dialog")

                # if user mentions
                if is_celebrity_mentioned:
                    skills_for_uttr.append("dff_gossip_skill")
                # some special cases
                if if_special_weather_turn_on(user_uttr, bot_uttr):
                    skills_for_uttr.append("dff_weather_skill")
                if if_turn_on_emotion(user_uttr, bot_uttr):
                    skills_for_uttr.append("emotion_skill")
                if get_named_locations(user_uttr):
                    skills_for_uttr.append("dff_travel_skill")
                if extract_movies_names_from_annotations(user_uttr):
                    skills_for_uttr.append("dff_movie_skill")

                # adding closed-domain skills
                skills_for_uttr += turn_on_skills(
                    detected_topics,
                    intent_catcher_intents,
                    user_uttr_text,
                    bot_uttr.get("text", ""),
                    available_skills=[
                        "dff_art_skill",
                        "dff_movie_skill",
                        "dff_book_skill",
                        "news_api_skill",
                        "dff_food_skill",
                        "dff_animals_skill",
                        "dff_sport_skill",
                        "dff_music_skill",
                        "dff_science_skill",
                        "dff_gossip_skill",
                        "game_cooperative_skill",
                        "dff_weather_skill",
                        "dff_funfact_skill",
                        "dff_travel_skill",
                        "dff_coronavirus_skill",
                        "dff_bot_persona_skill",
                        "dff_gaming_skill",
                        "dff_short_story_skill",
                    ],
                )
                # adding linked-to skills
                skills_for_uttr.extend(get_linked_to_skills(dialog))
                skills_for_uttr.extend(get_previously_active_skill(dialog))

            # NOW IT IS NOT ONLY FOR USUAL CONVERSATION BUT ALSO FOR SENSITIVE/HIGH PRIORITY INTENTS/ETC

            if "dff_coronavirus_skill" in skills_for_uttr:
                #  no convert & comet when about coronavirus
                if "convert_reddit" in skills_for_uttr:
                    skills_for_uttr.remove("convert_reddit")
                if "comet_dialog_skill" in skills_for_uttr:
                    skills_for_uttr.remove("comet_dialog_skill")

            if len(dialog["utterances"]) > 1:
                # Use only misheard asr skill if asr is not confident and skip it for greeting
                if user_uttr_annotations.get("asr",
                                             {}).get("asr_confidence",
                                                     "high") == "very_low":
                    skills_for_uttr = ["misheard_asr"]

            if "/alexa_" in user_uttr_text:
                # adding alexa handler for Amazon Alexa specific commands
                skills_for_uttr = ["alexa_handler"]

            logger.info(f"Selected skills: {skills_for_uttr}")

            total_time = time.time() - st_time
            logger.info(f"rule_based_selector exec time = {total_time:.3f}s")
            asyncio.create_task(
                callback(task_id=payload["task_id"],
                         response=list(set(skills_for_uttr))))
        except Exception as e:
            total_time = time.time() - st_time
            logger.info(f"rule_based_selector exec time = {total_time:.3f}s")
            logger.exception(e)
            sentry_sdk.capture_exception(e)
            asyncio.create_task(
                callback(task_id=payload["task_id"],
                         response=["dff_program_y_skill", "dummy_skill"]))
コード例 #26
0
def is_cobot_opinion_demanded(vars):
    intents = common_utils.get_intents(
        state_utils.get_last_human_utterance(vars), which="all")
    opinion_request_detected = "Opinion_RequestIntent" in intents
    return bool(opinion_request_detected)