def set_conf_and_can_cont_by_universal_policy(ctx: Context, actor: Actor): DIALOG_BEGINNING_START_CONFIDENCE = 0.98 DIALOG_BEGINNING_CONTINUE_CONFIDENCE = 0.9 DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE = 0.98 MIDDLE_DIALOG_START_CONFIDENCE = 0.7 if not is_begin_of_dialog(ctx, actor, begin_dialog_n=10): confidence = 0.0 can_continue_flag = CAN_NOT_CONTINUE elif is_first_our_response(ctx, actor): confidence = DIALOG_BEGINNING_START_CONFIDENCE can_continue_flag = CAN_CONTINUE_SCENARIO elif not is_interrupted(ctx, actor) and common_greeting.dont_tell_you_answer( int_ctx.get_last_human_utterance(ctx, actor)): confidence = DIALOG_BEGINNING_SHORT_ANSWER_CONFIDENCE can_continue_flag = CAN_CONTINUE_SCENARIO elif not is_interrupted(ctx, actor): confidence = DIALOG_BEGINNING_CONTINUE_CONFIDENCE can_continue_flag = CAN_CONTINUE_SCENARIO else: confidence = MIDDLE_DIALOG_START_CONFIDENCE can_continue_flag = CAN_CONTINUE_SCENARIO int_ctx.set_can_continue(ctx, actor, can_continue_flag) int_ctx.set_confidence(ctx, actor, confidence)
def forecast_intent_processing(ctx, actor) -> None: if not forecast_requested_condition(ctx, actor) and forecast_intent_condition(ctx, actor): set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO) set_confidence(ctx, actor, QUESTION_CONF) else: set_can_continue(ctx, actor, MUST_CONTINUE) set_confidence(ctx, actor, HIGH_CONF)
def tell_punchline(ctx: Context, actor: Actor, *args, **kwargs) -> str: int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE") int_ctx.set_confidence(ctx, actor, 0.8) if int_cnd.is_do_not_know_vars( ctx, actor) else None story = ctx.misc.get("story", "") story_type = ctx.misc.get("story_type", "") return stories.get(story_type, {}).get(story, {}).get("punchline", "")
def sys_need_more_time_response(ctx: Context, actor: Actor) -> str: # get ack, body ack = int_cnd.get_not_used_and_save_sentiment_acknowledgement(ctx, actor) # obtaining random response from weekend questions body = random.choice(common_weekend.WISH_MORE_TIME) # set confidence int_ctx.set_confidence(ctx, actor, DIALOG_BEGINNING_CONTINUE_CONFIDENCE) int_ctx.set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO) int_ctx.add_acknowledgement_to_response_parts(ctx, actor) return " ".join([ack, body])
def activity_answer_response(ctx: Context, actor: Actor, *args, **kwargs) -> str: response = "" if is_yes_vars(ctx, actor): set_can_continue(ctx, actor, CAN_NOT_CONTINUE) set_confidence(ctx, actor, SMALLTALK_CONF) shared_memory = get_shared_memory(ctx, actor) preferred_weather = shared_memory.get("preferred_weather", "") save_to_shared_memory(ctx, actor, preferred_weather="") if preferred_weather: response = WEATHER_DICT[preferred_weather]["answer"] else: set_can_continue(ctx, actor, CAN_NOT_CONTINUE) set_confidence(ctx, actor, ZERO_CONF) return response
def thematic_funfact_response(ctx: Context, actor: Actor, *args, **kwargs) -> str: response = "" set_confidence(ctx, actor, CONF_HIGH) set_can_continue(ctx, actor, MUST_CONTINUE) entity = ctx.last_request.split("about") if len(entity) > 1: entity = entity[1] human_utter = get_last_human_utterance(ctx, actor) topic = get_topics(human_utter, which="cobot_topics")[0] funfact = get_fact(entity, f"fact about {entity}") if funfact: link_question = make_question(topic) response = f"{funfact} {link_question}" if not response: set_confidence(ctx, actor, CONF_ZERO) return response
def which_story(ctx: Context, actor: Actor, *args, **kwargs) -> str: prev_node = get_previous_node(ctx) if prev_node in ["start_node", "fallback_node"]: int_ctx.set_can_continue(ctx, actor, "MUST_CONTINUE") # include sure if user asked to tell a story, include nothing if agent proposed to tell a story sure_phrase = random.choice(sorted(phrases.get( "sure", []))) if prev_node == "start_node" else "" return sure_phrase + " " + random.choice( sorted(phrases.get("which_story", []))) elif prev_node == "choose_story_node": int_ctx.set_can_continue(ctx, actor, "CANNOT_CONTINUE") return random.choice(sorted(phrases.get("no", []))) else: return "Ooops."
def forecast_response(ctx: Context, actor: Actor, *args, **kwargs) -> str: location_name = "" if homeland_forecast_requested_condition(ctx, actor): dialog = get_dialog(ctx, actor) if "human" in dialog and "profile" in dialog["human"]: location_name = dialog["human"]["profile"].get("location", "") else: human_utter = get_last_human_utterance(ctx, actor) location_name = retrieve_location_entity_from_utterance(human_utter) if location_name: forecast_intent_processing(ctx, actor) response = f"{request_weather_service(location_name)}. {QUESTION_PHRASE}" else: set_can_continue(ctx, actor, CAN_CONTINUE_SCENARIO) set_confidence(ctx, actor, MISSED_CITY_CONF) response = SORRY_PHRASE return response
def random_funfact_response(ctx: Context, actor: Actor, *args, **kwargs) -> str: response = "" set_confidence(ctx, actor, CONF_HIGH) set_can_continue(ctx, actor, MUST_CONTINUE) funfact_list = copy.deepcopy(FUNFACT_LIST) random.shuffle(funfact_list) shared_memory = get_shared_memory(ctx, actor) given_funfacts = [] if shared_memory: given_funfacts = shared_memory.get("given_funfacts", []) for funfact, topic in funfact_list: if funfact not in given_funfacts: given_funfacts.append(funfact) save_to_shared_memory(ctx, actor, given_funfacts=given_funfacts) link_question = make_question(topic) response = f"{funfact} {link_question}" break if not response: set_confidence(ctx, actor, CONF_ZERO) return response
def provide_facts_response(ctx, actor, page_source, wiki_page): wiki = ctx.misc.get("wiki", {}) user_uttr: dict = ctx.misc.get("agent", {}).get("dialog", {}).get("human_utterances", [{}])[-1] isyes = is_yes(user_uttr) or re.findall(CONTINUE_PATTERN, user_uttr["text"]) response = "" cur_wiki_page = wiki.get("cur_wiki_page", "") if not cur_wiki_page: wiki["cur_wiki_page"] = wiki_page if page_source == "wikiHow": page_content = get_wikihow_content(wiki_page, wikihow_cache) wiki_page_content_list = preprocess_wikihow_page(page_content) memory["wiki_page_content"] = wiki_page_content_list elif page_source == "wikipedia": page_content = get_wikipedia_content(wiki_page) wiki_page_content_list = preprocess_wikipedia_page(wiki_page.lower(), [], page_content) memory["wiki_page_content"] = wiki_page_content_list logger.info(f"wiki_page {wiki_page}") used_wiki_page_nums_dict = wiki.get("used_wiki_page_nums", {}) used_wiki_page_nums = used_wiki_page_nums_dict.get(wiki_page, []) wiki_page_content_list = memory.get("wiki_page_content", []) logger.info(f"response, used_wiki_page_nums {used_wiki_page_nums}") logger.info(f"response, wiki_page_content_list {wiki_page_content_list[:3]}") if wiki_page_content_list: for num, fact in enumerate(wiki_page_content_list): if num not in used_wiki_page_nums: facts_str = fact.get("facts_str", "") question = fact.get("question", "") response = f"{facts_str} {question}".strip().replace(" ", " ") used_wiki_page_nums.append(num) used_wiki_page_nums_dict[wiki_page] = used_wiki_page_nums wiki["used_wiki_page_nums"] = used_wiki_page_nums_dict break if len(wiki_page_content_list) == len(used_wiki_page_nums): if len(wiki_page_content_list) == len(used_wiki_page_nums): wiki["wiki_page"] = "" memory["wiki_page_content"] = [] logger.info(f"response, final {response}") if response: if isyes: context.set_confidence(ctx, actor, 1.0) context.set_can_continue(ctx, actor, common_constants.MUST_CONTINUE) else: context.set_confidence(ctx, actor, 0.99) context.set_can_continue(ctx, actor, common_constants.CAN_CONTINUE_SCENARIO) else: context.set_confidence(ctx, actor, 0.0) context.set_can_continue(ctx, actor, common_constants.CAN_NOT_CONTINUE) if hasattr(ctx, "a_s"): processed_node = ctx.a_s.get("processed_node", ctx.a_s["next_node"]) processed_node.response = response ctx.a_s["processed_node"] = processed_node else: processed_node = ctx.framework_states["actor"].get("processed_node", ctx.framework_states["actor"]["next_node"]) processed_node.response = response ctx.framework_states["actor"]["processed_node"] = processed_node return ctx
def fallback(ctx: Context, actor: Actor, *args, **kwargs) -> str: prev_node = get_previous_node(ctx) story_type = get_story_type(ctx, actor) story_left = get_story_left(ctx, actor) # runout stories if prev_node == "which_story_node" and story_type and not story_left: int_ctx.set_can_continue(ctx, actor, "CANNOT_CONTINUE") return "Oh, sorry, but I've run out of stories." # no stories elif prev_node == "which_story_node" and not story_type: int_ctx.set_can_continue(ctx, actor, "CAN_CONTINUE") return random.choice(sorted(phrases.get("no_stories", []))) # if prev_node is tell_punchline_node or fallback_node else: int_ctx.set_can_continue(ctx, actor, "MUST_CONTINUE") int_ctx.set_confidence(ctx, actor, 0.5) if int_cnd.is_do_not_know_vars( ctx, actor) else None return random.choice(sorted(phrases.get("start_phrases", [])))