コード例 #1
0
    def unused_handler(ctx: Context, actor: Actor) -> str:

        used = ctx.misc.get("used_phrases", [])
        confidences = [1] * len(phrases)

        for idx, phrase in enumerate(phrases):
            times: int = used.count(id(phrase))
            if times == 0:
                used.append(id(phrase))
                ctx.misc["used_phrases"] = used
                return initial + phrase
            confidences[idx] *= 0.4**times

        if exit_on_exhaust:
            label = ctx.last_label
            actor.plot[label[0]][label[1]].transitions = {
                ("global_flow", "fallback", 2): cnd.true()
            }
            return initial

        target_idx = confidences.index(max(confidences))
        target_phrase = phrases[target_idx]
        used.append(id(target_phrase))
        ctx.misc["used_phrases"] = used
        return initial + target_phrase
コード例 #2
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def offer_more(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    facts_exhausted = ctx.misc.get("covid_facts_exhausted", False)
    asked_about_age = ctx.misc.get("asked_about_age", False)

    # Because node.response can be empty string
    # (for example, when all covid facts are exhausted)
    def add_space(string: str):
        if string:
            return f"{string} "
        else:
            return string

    if facts_exhausted and asked_about_age:
        # do nothing
        return ctx
    processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
    if callable(processed_node.response):
        try:
            response = processed_node.response(
                ctx,
                actor,
                *args,
                **kwargs,
            )
        except Exception as exc:
            logger.exception(exc)
            response = ""
    else:
        response = processed_node.response
    if not facts_exhausted:

        processed_node.response = f"{response} Would you want to learn more?"
        ctx.a_s["processed_node"] = processed_node
        return ctx
    if not asked_about_age:
        processed_node.response = (
            f"{response} Anyway, I can approximately tell you how likely you are to "
            f"recover from coronavirus if you get it. What is your age?")
        ctx.a_s["processed_node"] = processed_node
        return ctx
    logger.critical(
        "add_catch_question processor has reached an unreachable end in coronavirus_skill"
    )
    return ctx
コード例 #3
0
ファイル: actor.py プロジェクト: deepmipt/deepy
def get_ctx(
    human_utter_index,
    dialog,
    state,
    dff_shared_state,
    entities,
    used_links,
    age_group,
    disliked_skills,
    clarification_request_flag,
):
    context = state.get("context", {})
    previous_human_utter_index = state.get("previous_human_utter_index", -1)
    current_turn_dff_suspended = state.get("current_turn_dff_suspended", False)
    agent = {
        "previous_human_utter_index": previous_human_utter_index,
        "human_utter_index": human_utter_index,
        "dialog": dialog,
        "entities": entities,
        "shared_memory": state.get("shared_memory", {}),
        "previous_turn_dff_suspended": current_turn_dff_suspended,
        "current_turn_dff_suspended": False,
        "response": {},
        "dff_shared_state": dff_shared_state,
        "cache": {},
        "history": state.get("history", {}),
        "used_links": used_links,
        "age_group": age_group,
        "disliked_skills": disliked_skills,
        "clarification_request_flag": clarification_request_flag,
    }
    ctx = Context.cast(context)
    ctx.misc["agent"] = agent
    ctx.add_request(dialog["human_utterances"][-1]["text"])
    return ctx
コード例 #4
0
def save_cross_state(ctx: Context,
                     actor: Actor,
                     service_name=SERVICE_NAME.replace("-", "_"),
                     new_state={}):
    if not ctx.validation:
        ctx.misc["agent"]["dff_shared_state"]["cross_states"][
            service_name] = new_state
コード例 #5
0
ファイル: processing.py プロジェクト: deepmipt/deepy
 def set_flag_handler(
     ctx: Context,
     actor: Actor,
     *args,
     **kwargs,
 ) -> Context:
     ctx.misc[flag] = value
     return ctx
コード例 #6
0
def choose_story(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    prev_node = get_previous_node(ctx)
    story = get_story_left(ctx, actor)
    story_type = get_story_type(ctx, actor)
    setup = stories.get(story_type, {}).get(story, {}).get("setup", "")
    what_happend_next_phrase = random.choice(
        sorted(phrases.get("what_happend_next", [])))

    # include sure if user defined a type of story at the beginnig, otherwise include nothing
    sure_phrase = random.choice(sorted(phrases.get(
        "sure", []))) if prev_node == "start_node" else ""

    ctx.misc["stories_told"] = ctx.misc.get("stories_told", []) + [story]
    ctx.misc["story"] = story
    ctx.misc["story_type"] = story_type

    return sure_phrase + " " + setup + " " + "..." + " " + what_happend_next_phrase
コード例 #7
0
ファイル: processing.py プロジェクト: deepmipt/deepy
 def save_slots_to_ctx_processing(
     ctx: Context,
     actor: Actor,
     *args,
     **kwargs,
 ) -> Context:
     ctx.misc["slots"] = ctx.misc.get("slots", {}) | slots
     return ctx
コード例 #8
0
ファイル: actor.py プロジェクト: deepmipt/deepy
def get_response(ctx: Context, actor: Actor, *args, **kwargs):
    agent = ctx.misc["agent"]
    human_utter_index = agent["human_utter_index"]
    dff_shared_state = agent["dff_shared_state"]
    history = agent["history"]
    used_links = agent["used_links"]
    age_group = agent["age_group"]
    disliked_skills = agent["disliked_skills"]
    current_turn_dff_suspended = agent["current_turn_dff_suspended"]
    response_parts = agent.get("response_parts", [])
    history[str(human_utter_index)] = list(ctx.labels.values())[-1]
    state = {
        "shared_memory": agent["shared_memory"],
        "previous_human_utter_index": human_utter_index,
        "history": history,
        "current_turn_dff_suspended": current_turn_dff_suspended,
    }
    confidence = ctx.misc["agent"]["response"].get("confidence", 0.85)
    can_continue = CAN_CONTINUE_SCENARIO if confidence else CAN_NOT_CONTINUE
    can_continue = ctx.misc["agent"]["response"].get("can_continue",
                                                     can_continue)
    ctx.clear(2, ["requests", "responses", "labels"])
    del ctx.misc["agent"]
    state["context"] = json.loads(ctx.json())

    human_attr = {
        f"{SERVICE_NAME}_state": state,
        "dff_shared_state": dff_shared_state,
        "used_links": used_links,
        "age_group": age_group,
        "disliked_skills": disliked_skills,
    }
    hype_attr = {"can_continue": can_continue}
    if response_parts:
        hype_attr["response_parts"] = response_parts
    response = ctx.last_response
    if isinstance(response, list):
        responses = []
        for reply, conf, h_a, b_a, attr in response:
            conf = conf if conf else confidence
            h_a = human_attr | h_a
            attr = hype_attr | attr
            responses += [(reply, conf, h_a, b_a, attr)]
        return list(zip(*responses))
    else:
        return (response, confidence, human_attr, {}, hype_attr)
コード例 #9
0
ファイル: processing.py プロジェクト: deepmipt/deepy
 def add_from_options_handler(
     ctx: Context,
     actor: Actor,
     *args,
     **kwargs,
 ) -> Context:
     processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
     processed_node.response = f"{processed_node.response} {random.choice(options)}"
     ctx.a_s["processed_node"] = processed_node
     return ctx
コード例 #10
0
ファイル: responses.py プロジェクト: deepmipt/deepy
def generate_universal_response(ctx: Context) -> REPLY_TYPE:
    """
    Returns:
      string from universal_intent_responses file filtered with intent,
      confidence (can be UNIVERSAL_RESPONSE_CONF, UNIVERSAL_RESPONSE_LOW_CONF, ALMOST_SUPER_CONF),
      human attributes (used universal intent responses), # for now not used
      bot attributes (empty),
      attributes (response parts)
    """
    dialog = ctx.misc["agent"]["dialog"]
    curr_intents = get_current_intents(dialog["human_utterances"][-1])
    # currently unused this part because it's specific formatter need to be implemented
    human_attr = {}
    human_attr["dff_grounding_skill"] = ctx.misc.get("dff_grounding_skill", {})
    human_attr["dff_grounding_skill"][
        "used_universal_intent_responses"] = human_attr[
            "dff_grounding_skill"].get("used_universal_intent_responses", [])
    attr = {}
    reply = ""
    confidence = 0.0
    ackn, _, _, _, _ = generate_acknowledgement_response(ctx)
    is_question = is_any_question_sentence_in_utterance(
        dialog["human_utterances"][-1])

    def universal_response(intent):
        nonlocal reply, human_attr, attr
        # for now return random reply UNIVERSAL_INTENT_RESPONSES
        reply = get_unused_reply(
            intent, human_attr["dff_grounding_skill"]
            ["used_universal_intent_responses"])
        human_attr["dff_grounding_skill"][
            "used_universal_intent_responses"] += [reply]
        attr = {"response_parts": ["body"], "type": "universal_response"}

    ctx.misc["dff_grounding_skill"] = human_attr["dff_grounding_skill"]

    for intent in curr_intents:
        if intent in UNIVERSAL_INTENT_RESPONSES:
            universal_response(intent)
            confidence = UNIVERSAL_RESPONSE_CONF
            # we prefer the first found intent, as it should be semantic request
            break
    if reply == "":
        if is_question:
            universal_response("open_question_opinion")
            confidence = UNIVERSAL_RESPONSE_LOW_CONF
    if is_question and is_sensitive_topic_and_request(
            dialog["human_utterances"][-1]):
        # if question in sensitive situation - answer with confidence 0.99
        confidence = ALMOST_SUPER_CONF
    if ackn and not is_toxic_or_badlisted_utterance(
            dialog["human_utterances"][-1]):
        reply = f"{ackn} {reply}"
        attr["response_parts"] = ["acknowledgement", "body"]
    return reply, confidence, human_attr, {}, attr
コード例 #11
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def insert_global_confirmed(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
    processed_node.response = processed_node.response.format(
        cds.overall().confirmed)
    ctx.a_s["processed_node"] = processed_node
    return ctx
コード例 #12
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def execute_response(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
    if callable(processed_node.response):
        processed_node.response = processed_node.response(ctx, actor)
    ctx.a_s["processed_node"] = processed_node

    return ctx
コード例 #13
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def detect_age(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    age = get_age(ctx)

    if age:
        ctx.misc["age"] = age

    return ctx
コード例 #14
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def detect_subject(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    subject = get_subject(ctx)

    if subject and subject["type"] != "undetected":
        ctx.misc["subject"] = subject

    return ctx
コード例 #15
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def execute_response(
    ctx: Context,
    actor: Actor,
) -> Context:
    """Execute the callable response preemptively,
    so that slots can be filled"""
    processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
    if callable(processed_node.response):
        processed_node.response = processed_node.response(ctx, actor)
    ctx.a_s["processed_node"] = processed_node

    return ctx
コード例 #16
0
ファイル: response.py プロジェクト: deepmipt/deepy
def get_covid_fact(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    if ctx.validation:
        return ""
    used_facts = ctx.misc.get("used_covid_facts", [])
    fact_to_use = -1
    result = ""

    for idx, fact in enumerate(COVID_FACTS):
        if idx not in used_facts:
            fact_to_use = idx
            result = fact
            break

    if fact_to_use != -1:
        used_facts.append(fact_to_use)
        ctx.misc["used_covid_facts"] = used_facts

    if len(used_facts) == len(COVID_FACTS):
        ctx.misc["covid_facts_exhausted"] = True

    return result
コード例 #17
0
ファイル: processing.py プロジェクト: deepmipt/deepy
 def fill_responses_by_slots_processing(
     ctx: Context,
     actor: Actor,
     *args,
     **kwargs,
 ) -> Context:
     processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
     for slot_name, slot_value in ctx.misc.get("slots", {}).items():
         processed_node.response = processed_node.response.replace(
             "{"
             f"{slot_name}"
             "}", slot_value)
     ctx.a_s["processed_node"] = processed_node
     return ctx
コード例 #18
0
def set_cross_link(
        ctx: Context,
        actor: Actor,
        to_service_name,
        cross_link_additional_data={},
        from_service_name=SERVICE_NAME.replace("-", "_"),
):
    cur_human_index = get_human_utter_index(ctx, actor)
    if not ctx.validation:
        ctx.misc["agent"]["dff_shared_state"]["cross_links"][
            to_service_name] = {
                cur_human_index: {
                    "from_service": from_service_name,
                    **cross_link_additional_data,
                }
            }
コード例 #19
0
def get_book_by_genre(ctx: Context, actor: Actor) -> Optional[Tuple[str, str]]:
    """
    Extract book, author, and description by genre from BOOKREADS
    """
    genre = get_slot("cur_genre")(ctx, actor)
    if not genre:
        return None
    used = ctx.misc.get(["used_phrases"], [])
    for book_info in BOOKREADS_DATA[genre]:
        book = book_info["book"]
        author = book_info["author"]
        description = book_info["description"]
        if not id(book) in used:
            used.append(id(book))
            ctx.misc["used_phrases"] = used
            logger.info(f"fetched {book}")
            return book, author, description
    return None
コード例 #20
0
ファイル: processing.py プロジェクト: deepmipt/deepy
def insert_subject(
    ctx: Context,
    actor: Actor,
    *args,
    **kwargs,
) -> Context:
    # See condition.subject_detected for more details
    subject = ctx.misc.get(
        "subject",
        {
            "type": "country",
            "city": "undetected",
            "state": "undetected",
            "county": "undetected",
            "country": "undetected",
        },
    )
    processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
    processed_node.response = processed_node.response.format(
        subject[subject["type"]])
    ctx.a_s["processed_node"] = processed_node

    return ctx
コード例 #21
0
def add_parts_to_response_parts(ctx: Context, actor: Actor, parts=[]):
    response_parts = set(
        [] if ctx.validation else ctx.misc["agent"].get("response_parts", []))
    response_parts.update(parts)
    if not ctx.validation:
        ctx.misc["agent"]["response_parts"] = sorted(list(response_parts))
コード例 #22
0
def reset_dff_suspension(ctx: Context, actor: Actor):
    if not ctx.validation:
        ctx.misc["agent"]["current_turn_dff_suspended"] = False
コード例 #23
0
def set_age_group(ctx: Context, actor: Actor, set_age_group):
    if not ctx.validation:
        ctx.misc["agent"]["age_group"] = set_age_group
コード例 #24
0
ファイル: processing.py プロジェクト: deepmipt/deepy
 def set_flag_handler(ctx: Context, actor: Actor) -> Context:
     ctx.misc["flags"] = ctx.misc.get("flags", {})
     ctx.misc["flags"].update({label: value})
     return ctx