Exemplo n.º 1
0
def set_conf_and_can_cont_by_universal_policy(ctx: Context, actor: Actor):
    DIALOG_BEGINNING_START_CONFIDENCE = 0.98
    DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9
    DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98
    MIDDLE_DIALOG_START_CONFIDENCE = 0.7

    if not is_begin_of_dialog(ctx, actor, begin_dialog_n=10):
        confidence = 0.0
        can_continue_flag = CAN_NOT_CONTINUE
    elif is_first_our_response(ctx, actor):
        confidence = DIALOG_BEGINNING_START_CONFIDENCE
        can_continue_flag = CAN_CONTINUE_SCENARIO
    elif not is_interrupted(ctx,
                            actor) and common_greeting.dont_tell_you_answer(
                                int_ctx.get_last_human_utterance(ctx, actor)):
        confidence = DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE
        can_continue_flag = CAN_CONTINUE_SCENARIO
    elif not is_interrupted(ctx, actor):
        confidence = DIALOG_BEGINNING_CONTINUE_CONFIDENCE
        can_continue_flag = CAN_CONTINUE_SCENARIO
    else:
        confidence = MIDDLE_DIALOG_START_CONFIDENCE
        can_continue_flag = CAN_CONTINUE_SCENARIO

    int_ctx.set_can_continue(ctx, actor, can_continue_flag)
    int_ctx.set_confidence(ctx, actor, confidence)
Exemplo n.º 2
0
def forecast_intent_processing(ctx, actor) -> None:
    if not forecast_requested_condition(ctx, actor) and forecast_intent_condition(ctx, actor):
        set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
        set_confidence(ctx, actor, QUESTION_CONF)
    else:
        set_can_continue(ctx, actor, MUST_CONTINUE)
        set_confidence(ctx, actor, HIGH_CONF)
Exemplo n.º 3
0
def set_confidence_from_input(ctx: Context, actor: Actor, *args, **kwargs) -> Context:
    intent, confidence = get_detected_intents(int_ctx.get_last_human_utterance(ctx, actor))
    if intent in high_priority_intents["dff_intent_responder_skill"]:
        int_ctx.set_confidence(ctx, actor, 1.0)
    else:
        int_ctx.set_confidence(ctx, actor, confidence)
    return ctx
Exemplo n.º 4
0
def set_start_confidence(ctx: Context, actor: Actor) -> Context:
    user_uttr = int_ctx.get_last_human_utterance(ctx, actor)
    bot_uttr = int_ctx.get_last_bot_utterance(ctx, actor)
    if if_chat_about_particular_topic(user_uttr, bot_uttr, compiled_pattern=ART_PATTERN):
        int_ctx.set_confidence(ctx, actor, SUPER_CONFIDENCE)
    elif re.findall(ART_PATTERN, user_uttr["text"]):
        int_ctx.set_confidence(ctx, actor, HIGH_CONFIDENCE)
    return ctx
Exemplo n.º 5
0
def tell_punchline(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE")
    int_ctx.set_confidence(ctx, actor, 0.8) if int_cnd.is_do_not_know_vars(
        ctx, actor) else None
    story = ctx.misc.get("story", "")
    story_type = ctx.misc.get("story_type", "")

    return stories.get(story_type, {}).get(story, {}).get("punchline", "")
Exemplo n.º 6
0
def activity_question_response(ctx: Context, actor: Actor, *args,
                               **kwargs) -> str:
    response = ""
    preferred_weather = get_preferred_weather(ctx.last_request, WEATHER_DICT)
    if preferred_weather:
        save_to_shared_memory(ctx, actor, preferred_weather=preferred_weather)
        set_confidence(ctx, actor, SMALLTALK_CONF)
        response = WEATHER_DICT[preferred_weather]["question"]
    else:
        set_confidence(ctx, actor, ZERO_CONF)
    return response
Exemplo n.º 7
0
def sys_need_more_time_response(ctx: Context, actor: Actor) -> str:
    # get ack, body
    ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor)

    # obtaining random response from weekend questions
    body = random.choice(common_weekend.WISH_MORE_TIME)

    # set confidence
    int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE)
    int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
    int_ctx.add_acknowledgement_to_response_parts(ctx, actor)

    return " ".join([ack, body])
Exemplo n.º 8
0
def activity_answer_response(ctx: Context, actor: Actor, *args,
                             **kwargs) -> str:
    response = ""
    if is_yes_vars(ctx, actor):
        set_can_continue(ctx, actor, CAN_NOT_CONTINUE)
        set_confidence(ctx, actor, SMALLTALK_CONF)
        shared_memory = get_shared_memory(ctx, actor)
        preferred_weather = shared_memory.get("preferred_weather", "")
        save_to_shared_memory(ctx, actor, preferred_weather="")
        if preferred_weather:
            response = WEATHER_DICT[preferred_weather]["answer"]
    else:
        set_can_continue(ctx, actor, CAN_NOT_CONTINUE)
        set_confidence(ctx, actor, ZERO_CONF)
    return response
Exemplo n.º 9
0
def thematic_funfact_response(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    response = ""
    set_confidence(ctx, actor, CONF_HIGH)
    set_can_continue(ctx, actor, MUST_CONTINUE)
    entity = ctx.last_request.split("about")
    if len(entity) > 1:
        entity = entity[1]
        human_utter = get_last_human_utterance(ctx, actor)
        topic = get_topics(human_utter, which="cobot_topics")[0]
        funfact = get_fact(entity, f"fact about {entity}")
        if funfact:
            link_question = make_question(topic)
            response = f"{funfact} {link_question}"
    if not response:
        set_confidence(ctx, actor, CONF_ZERO)
    return response
Exemplo n.º 10
0
def forecast_response(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    location_name = ""
    if homeland_forecast_requested_condition(ctx, actor):
        dialog = get_dialog(ctx, actor)
        if "human" in dialog and "profile" in dialog["human"]:
            location_name = dialog["human"]["profile"].get("location", "")
    else:
        human_utter = get_last_human_utterance(ctx, actor)
        location_name = retrieve_location_entity_from_utterance(human_utter)
    if location_name:
        forecast_intent_processing(ctx, actor)
        response = f"{request_weather_service(location_name)}. {QUESTION_PHRASE}"
    else:
        set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO)
        set_confidence(ctx, actor, MISSED_CITY_CONF)
        response = SORRY_PHRASE
    return response
Exemplo n.º 11
0
def random_funfact_response(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    response = ""
    set_confidence(ctx, actor, CONF_HIGH)
    set_can_continue(ctx, actor, MUST_CONTINUE)
    funfact_list = copy.deepcopy(FUNFACT_LIST)
    random.shuffle(funfact_list)
    shared_memory = get_shared_memory(ctx, actor)
    given_funfacts = []
    if shared_memory:
        given_funfacts = shared_memory.get("given_funfacts", [])
    for funfact, topic in funfact_list:
        if funfact not in given_funfacts:
            given_funfacts.append(funfact)
            save_to_shared_memory(ctx, actor, given_funfacts=given_funfacts)
            link_question = make_question(topic)
            response = f"{funfact} {link_question}"
            break
    if not response:
        set_confidence(ctx, actor, CONF_ZERO)
    return response
Exemplo n.º 12
0
def fallback(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    prev_node = get_previous_node(ctx)
    story_type = get_story_type(ctx, actor)
    story_left = get_story_left(ctx, actor)

    # runout stories
    if prev_node == "which_story_node" and story_type and not story_left:
        int_ctx.set_can_continue(ctx, actor, "CANNOT_CONTINUE")
        return "Oh, sorry, but I've run out of stories."

    # no stories
    elif prev_node == "which_story_node" and not story_type:
        int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE")
        return random.choice(sorted(phrases.get("no_stories", [])))

    # if prev_node is tell_punchline_node or fallback_node
    else:
        int_ctx.set_can_continue(ctx, actor, "MUST_CONTINUE")
        int_ctx.set_confidence(ctx, actor, 0.5) if int_cnd.is_do_not_know_vars(
            ctx, actor) else None
        return random.choice(sorted(phrases.get("start_phrases", [])))
Exemplo n.º 13
0
def provide_facts_response(ctx, actor, page_source, wiki_page):
    wiki = ctx.misc.get("wiki", {})
    user_uttr: dict = ctx.misc.get("agent", {}).get("dialog", {}).get("human_utterances", [{}])[-1]
    isyes = is_yes(user_uttr) or re.findall(CONTINUE_PATTERN, user_uttr["text"])
    response = ""
    cur_wiki_page = wiki.get("cur_wiki_page", "")
    if not cur_wiki_page:
        wiki["cur_wiki_page"] = wiki_page
        if page_source == "wikiHow":
            page_content = get_wikihow_content(wiki_page, wikihow_cache)
            wiki_page_content_list = preprocess_wikihow_page(page_content)
            memory["wiki_page_content"] = wiki_page_content_list
        elif page_source == "wikipedia":
            page_content = get_wikipedia_content(wiki_page)
            wiki_page_content_list = preprocess_wikipedia_page(wiki_page.lower(), [], page_content)
            memory["wiki_page_content"] = wiki_page_content_list
        logger.info(f"wiki_page {wiki_page}")

    used_wiki_page_nums_dict = wiki.get("used_wiki_page_nums", {})
    used_wiki_page_nums = used_wiki_page_nums_dict.get(wiki_page, [])
    wiki_page_content_list = memory.get("wiki_page_content", [])
    logger.info(f"response, used_wiki_page_nums {used_wiki_page_nums}")
    logger.info(f"response, wiki_page_content_list {wiki_page_content_list[:3]}")

    if wiki_page_content_list:
        for num, fact in enumerate(wiki_page_content_list):
            if num not in used_wiki_page_nums:
                facts_str = fact.get("facts_str", "")
                question = fact.get("question", "")
                response = f"{facts_str} {question}".strip().replace("  ", " ")
                used_wiki_page_nums.append(num)
                used_wiki_page_nums_dict[wiki_page] = used_wiki_page_nums
                wiki["used_wiki_page_nums"] = used_wiki_page_nums_dict
                break

        if len(wiki_page_content_list) == len(used_wiki_page_nums):
            if len(wiki_page_content_list) == len(used_wiki_page_nums):
                wiki["wiki_page"] = ""
                memory["wiki_page_content"] = []
    logger.info(f"response, final {response}")
    if response:
        if isyes:
            context.set_confidence(ctx, actor, 1.0)
            context.set_can_continue(ctx, actor, common_constants.MUST_CONTINUE)
        else:
            context.set_confidence(ctx, actor, 0.99)
            context.set_can_continue(ctx, actor, common_constants.CAN_CONTINUE_SCENARIO)
    else:
        context.set_confidence(ctx, actor, 0.0)
        context.set_can_continue(ctx, actor, common_constants.CAN_NOT_CONTINUE)
    if hasattr(ctx, "a_s"):
        processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"])
        processed_node.response = response
        ctx.a_s["processed_node"] = processed_node
    else:
        processed_node = ctx.framework_states["actor"].get("processed_node", ctx.framework_states["actor"]["next_node"])
        processed_node.response = response
        ctx.framework_states["actor"]["processed_node"] = processed_node
    return ctx
Exemplo n.º 14
0
def programy_reponse(ctx: Context, actor: Actor, *args, **kwargs) -> str:
    response = model(ctx.requests.values())
    if psycho_help_spec in response:
        set_confidence(ctx, actor, 1.0)
        return response
    return response