def user_didnt_name_game_after_question_about_games_and_didnt_refuse_to_discuss_request( ngrams, vars): logger.info( f"user_didnt_name_game_after_question_about_games_and_didnt_refuse_to_discuss_request" ) user_uttr = state_utils.get_last_human_utterance(vars) bot_text = state_utils.get_last_bot_utterance(vars).get("text", "") found_game_name = bool(find_games_in_text(user_uttr.get("text", ""))) flag = ( not is_no(user_uttr) and not found_game_name and (does_text_contain_link_to_gaming(bot_text) or common_intents.is_question_about_games(bot_text)) and not was_link_from_gaming_to_other_skill_made_in_previous_bot_utterance( vars)) logger.info( f"user_didnt_name_game_after_question_about_games_and_didnt_refuse_to_discuss_request={flag}" ) return flag
def suggest_user_game_description_response(vars): logger.info("called suggest_user_game_description_response") game = gaming_memory.get_current_igdb_game(vars) response = f"Would you like me to tell you short description of {game['name']}?" human_uttr = state_utils.get_last_human_utterance(vars) logger.info( f"(suggest_user_game_description_response)human_uttr: {human_uttr['text']}" ) if is_no(human_uttr): state_utils.set_confidence(vars, confidence=common_nlg.CONF_1) state_utils.set_can_continue( vars, continue_flag=common_constants.MUST_CONTINUE) else: state_utils.set_confidence(vars, confidence=common_nlg.CONF_092_CAN_CONTINUE) state_utils.set_can_continue( vars, continue_flag=common_constants.CAN_CONTINUE_SCENARIO) return response
def animal_facts_response(vars): shared_memory = state_utils.get_shared_memory(vars) users_wild_animal = shared_memory.get("users_wild_animal", "") facts = shared_memory.get("wild_animal_facts", []) isno = is_no(state_utils.get_last_human_utterance(vars)) used_wild_animal_facts = shared_memory.get("used_wild_animal_facts", []) found_fact = {} found_num = -1 for num, fact in enumerate(facts): if num not in used_wild_animal_facts: found_num = num found_fact = fact used_wild_animal_facts.append(num) state_utils.save_to_shared_memory(vars, used_wild_animal_facts=used_wild_animal_facts) break logger.info(f"animal_facts_response, found_num {found_num} used_wild_animals_facts {used_wild_animal_facts}") response = "" facts_str = " ".join(found_fact["sentences"][:2]).strip().replace(" ", " ") if found_num == 0: facts_str = f"I know a lot about {users_wild_animal}. {facts_str}".strip().replace(" ", " ") if found_num != len(facts) - 1: next_fact = facts[found_num + 1] next_title = next_fact.get("title", "") if next_title: question = ANIMALS_WIKI_Q.get(next_title, "").format(users_wild_animal) else: question = random.choice(ANIMALS_COBOT_Q) question = question.format(users_wild_animal) if isno and found_num != 0: facts_str = "" response = f"{facts_str} {question}".strip().replace(" ", " ") else: response = facts_str if response: state_utils.set_confidence(vars, confidence=CONF_1) state_utils.set_can_continue(vars, continue_flag=common_constants.MUST_CONTINUE) else: state_utils.set_confidence(vars, confidence=CONF_4) state_utils.set_can_continue(vars, continue_flag=common_constants.CAN_NOT_CONTINUE) return response
def tell_fact_request(ngrams, vars): flag = False user_uttr = state_utils.get_last_human_utterance(vars) isno = is_no(state_utils.get_last_human_utterance(vars)) shared_memory = state_utils.get_shared_memory(vars) wikipedia_page = shared_memory.get("cur_wikipedia_page", "") wikihow_page = shared_memory.get("cur_wikihow_page", "") if "wikihow_content" in facts_memory and facts_memory["wikihow_content"]: used_wikihow_nums = shared_memory.get("used_wikihow_nums", {}).get(wikihow_page, []) if len(facts_memory["wikihow_content"]) > len(used_wikihow_nums): flag = True elif "wikipedia_content" in facts_memory and facts_memory[ "wikipedia_content"]: used_wikipedia_nums = shared_memory.get("used_wikipedia_nums", {}).get(wikipedia_page, []) if len(facts_memory["wikipedia_content"]) > len(used_wikipedia_nums): flag = True else: found_entity_substr, _, found_entity_types = find_entity( vars, "current") logger.info( f"request, found_entity_substr {found_entity_substr} found_entity_types {found_entity_types}" ) curr_page = get_page_title(vars, found_entity_substr) wikihow_articles = [] if found_entity_substr in wikihowq_by_substr: wikihow_questions = wikihowq_by_substr[found_entity_substr] wikihow_articles = list(wikihow_questions.keys()) if curr_page or wikihow_articles: flag = True not_want = re.findall(COMPILE_NOT_WANT_TO_TALK_ABOUT_IT, user_uttr["text"]) if isno or not_want: flag = False logger.info(f"tell_fact_request={flag}") return flag
def get_updated_disliked_skills(dialog, can_not_be_disliked_skills=None): can_not_be_disliked_skills = [] if can_not_be_disliked_skills is None else can_not_be_disliked_skills disliked_skills = dialog["human"]["attributes"].get("disliked_skills", []) prev_bot_uttr = dialog["bot_utterances"][-1]["text"].lower() if len( dialog["bot_utterances"]) > 0 else "" linked_to_skill = "" for skill_name, link_phrases in skills_phrases_map.items(): for phrase in link_phrases: if phrase.lower() in prev_bot_uttr: linked_to_skill = skill_name break if linked_to_skill: negative_prob = get_sentiment(dialog["human_utterances"][-1], probs=True).get("negative", 0.0) toxicity = get_toxic(dialog["human_utterances"][-1], probs=False) _is_no = is_no(dialog["human_utterances"][-1]) if negative_prob > 0.8 or toxicity or _is_no: if linked_to_skill not in can_not_be_disliked_skills: disliked_skills.append(linked_to_skill) return disliked_skills
def start_or_continue_scenario(vars, topic_config): flag = False bot_uttr = state_utils.get_last_bot_utterance(vars) prev_active_skill = bot_uttr.get("active_skill", "") shared_memory = state_utils.get_shared_memory(vars) isno = is_no(state_utils.get_last_human_utterance(vars)) cur_mode = shared_memory.get("cur_mode", "smalltalk") found_topic = shared_memory.get("special_topic", "") logger.info(f"special_topic_request, found_topic {found_topic}") user_info = shared_memory.get("user_info", {}) entity_triplets = shared_memory.get("entity_triplets", {}) logger.info( f"start_or_continue_scenario, user_info {user_info}, entity_triplets {entity_triplets}" ) if cur_mode == "facts" and isno: cur_mode = "smalltalk" first_utt = False if not found_topic or prev_active_skill not in { "dff_wiki_skill", "dff_music_skill" }: found_topic, first_utt, utt_can_continue, utt_conf = check_switch( vars, topic_config) logger.info(f"start_or_continue_scenario, {found_topic}, {first_utt}") if found_topic: cur_topic_smalltalk = topic_config[found_topic].get("smalltalk", []) used_utt_nums = shared_memory.get("used_utt_nums", {}).get("found_topic", []) logger.info(f"used_smalltalk {used_utt_nums}") if cur_topic_smalltalk and len(used_utt_nums) < len( cur_topic_smalltalk) and cur_mode == "smalltalk": flag = True if not first_utt and ((found_topic != "music" and prev_active_skill != "dff_wiki_skill") or (found_topic == "music" and prev_active_skill != "dff_music_skill")): flag = False return flag
def user_asked_have_pets_request(ngrams, vars): flag = False shared_memory = state_utils.get_shared_memory(vars) user_uttr = state_utils.get_last_human_utterance(vars) bot_uttr = state_utils.get_last_bot_utterance(vars) annotations = state_utils.get_last_human_utterance(vars)["annotations"] have_pet = re.findall( r"(do|did|have) you (have |had |like )?(any |a )?(pets|pet|animals|animal|dog|cat|puppy|" r"kitty|kitten)", user_uttr["text"], ) tell_about_pet = re.findall( r"(tell|talk|hear)(.*)(your )(dog|cat|puppy|kitty|kitten)s?", user_uttr["text"]) users_pet = re.findall( r"my (cat|dog|puppy|kitty|kitten|rat|fish|parrot|hamster)", user_uttr["text"], re.IGNORECASE) user_has_pet = re.findall( r"i (have |had )(a )?(cat|dog|puppy|kitty|kitten|rat|fish|parrot|hamster)", user_uttr["text"], re.IGNORECASE) found_animal = find_entity_by_types(annotations, {"Q55983715", "Q16521"}) pet_mentioned = re.findall(r"(cat|dog|puppy|kitty|kitten)", user_uttr["text"], re.IGNORECASE) started = shared_memory.get("start", False) bot_asked_pet = re.findall(DO_YOU_HAVE_TEMPLATE, bot_uttr["text"]) isno = is_no(user_uttr) if (have_pet or tell_about_pet or (started and not users_pet and not user_has_pet and (not found_animal or pet_mentioned)) or (bot_asked_pet and isno)): flag = True if re.findall(NOT_SWITCH_TEMPLATE, user_uttr["text"]): flag = False logger.info(f"user_asked_have_pets_request={flag}") return flag
def is_no_vars(vars): flag = True flag = flag and common_utils.is_no( state_utils.get_last_human_utterance(vars)) return flag
def no_request(ngrams, vars): flag = True flag = flag and is_no(state_utils.get_last_human_utterance(vars)) logger.info(f"no_request {flag}") return flag
def about_pet_request(ngrams, vars): flag = False user_uttr = state_utils.get_last_human_utterance(vars) bot_uttr = state_utils.get_last_bot_utterance(vars) isno = is_no(state_utils.get_last_human_utterance(vars)) dontlike = re.findall(r"(do not like |don't like |hate )(cat|dog)", user_uttr["text"]) shared_memory = state_utils.get_shared_memory(vars) told_about_cat = shared_memory.get("told_about_cat", False) have_cat = re.findall(r"(do|did) you have (a )?(cat|kitty|kitten)s?", user_uttr["text"]) told_about_dog = shared_memory.get("told_about_dog", False) have_dog = re.findall(r"(do|did) you have (a )?(dog|puppy)s?", user_uttr["text"]) about_users_pet = if_about_users_pet(ngrams, vars) if ( not about_users_pet and not dontlike and (not isno or have_cat) and (not told_about_cat or have_cat or not told_about_dog or have_dog) ): flag = True my_pet = shared_memory.get("my_pet", "") bot_asked_pet = re.findall(DO_YOU_HAVE_TEMPLATE, bot_uttr["text"]) all_facts_used = False if my_pet: used_facts = shared_memory.get("used_facts", {}).get(my_pet, []) all_facts = MY_PET_FACTS[my_pet] if len(all_facts) == len(used_facts): all_facts_used = True prev_state = condition_utils.get_last_state(vars) prev_skill = bot_uttr.get("active_skill", "") if ( my_pet and prev_skill == "dff_animals_skill" and str(prev_state).split(".")[-1] == "SYS_MY_PET" and all_facts_used ): if my_pet == "cat": my_pet = "dog" elif my_pet == "dog": my_pet = "cat" new_all_facts_used = False used_facts = shared_memory.get("used_facts", {}).get(my_pet, []) all_facts = MY_PET_FACTS[my_pet] if len(all_facts) == len(used_facts): new_all_facts_used = True if not new_all_facts_used: flag = True if my_pet: ans, pet, *_ = answer_users_question(vars) if ans and ((pet != "cat" and told_about_dog) or (pet != "dog" and told_about_cat)): flag = False used_facts = shared_memory.get("used_facts", {}).get(my_pet, []) if len(used_facts) > 0 and prev_skill != "dff_animals_skill": flag = False cat_intro = shared_memory.get("cat_intro", False) dog_intro = shared_memory.get("dog_intro", False) if (my_pet == "cat" and cat_intro) or (my_pet == "dog" and dog_intro): flag = False if ("do you have pets" in bot_uttr["text"].lower() or bot_asked_pet) and isno: flag = True if if_lets_chat(user_uttr["text"]) and not re.findall(ANIMALS_FIND_TEMPLATE, user_uttr["text"]): flag = False logger.info(f"about_pet_request={flag}") return flag
def process_info(dialog, which_info="name"): human_attr = {} bot_attr = {} attr = {"can_continue": CAN_NOT_CONTINUE} response = "" confidence = 0.0 curr_uttr_dict = dialog["human_utterances"][-1] curr_user_uttr = curr_uttr_dict["text"].lower() curr_user_annot = curr_uttr_dict["annotations"] bot_utterance_texts = [u["text"].lower() for u in dialog["bot_utterances"]] try: prev_bot_uttr = dialog["bot_utterances"][-1]["text"].lower() except IndexError: prev_bot_uttr = "" logger.info(f"Previous bot uterance: {prev_bot_uttr}") is_about_templates = { "name": what_is_your_name_pattern.search(prev_bot_uttr) or my_name_is_pattern.search(curr_user_uttr), "homeland": where_are_you_from_pattern.search(prev_bot_uttr) or my_origin_is_pattern.search(curr_user_uttr), "location": what_is_your_location_pattern.search(prev_bot_uttr) or my_location_is_pattern.search(curr_user_uttr), } response_phrases = { "name": RESPONSE_PHRASES["name"][0], "location": RESPONSE_PHRASES["location"][1] if RESPONSE_PHRASES["location"][0].lower() in bot_utterance_texts else RESPONSE_PHRASES["location"][0], "homeland": RESPONSE_PHRASES["homeland"][1] if RESPONSE_PHRASES["homeland"][0].lower() in bot_utterance_texts else RESPONSE_PHRASES["homeland"][0], } got_info = False # if user doesn't want to share his info if user_tells_bot_called_him_wrong(curr_uttr_dict, prev_bot_uttr, dialog["human"]["profile"]): logger.info(f"User says My name is not Blabla") response = f"My bad. What is your name again?" confidence = 1.0 got_info = True attr["can_continue"] = MUST_CONTINUE elif (is_about_templates[which_info] or was_user_asked_to_clarify_info(prev_bot_uttr, which_info)) and ( is_no(curr_uttr_dict) or is_secret(curr_user_uttr, which_info)): response = "As you wish." confidence = 1.0 attr["can_continue"] = CAN_NOT_CONTINUE return response, confidence, human_attr, bot_attr, attr elif re.search(r"is that where you live now", prev_bot_uttr) and is_yes(curr_uttr_dict): logger.info(f"Found location=homeland") if dialog["human"]["attributes"].get("homeland", None): human_attr["location"] = dialog["human"]["attributes"]["homeland"] else: found_homeland = check_entities( "homeland", curr_user_uttr=dialog["utterances"][-3]["text"].lower(), curr_user_annot=dialog["utterances"][-3]["annotations"], prev_bot_uttr=dialog["utterances"][-4]["text"].lower(), ) human_attr["location"] = found_homeland response = response_phrases["location"] confidence = 1.0 got_info = True attr["can_continue"] = MUST_CONTINUE elif re.search(r"is that where you live now", prev_bot_uttr) and is_no(curr_uttr_dict): logger.info(f"Found location is not homeland") response = f"So, where do you live now?" confidence = 1.0 got_info = False attr["can_continue"] = MUST_CONTINUE if (is_about_templates[which_info] or was_user_asked_to_clarify_info( prev_bot_uttr, which_info)) and not got_info: logger.info(f"Asked for {which_info} in {prev_bot_uttr}") found_info, named_entities_found = check_entities( which_info, curr_user_uttr, curr_user_annot, prev_bot_uttr) logger.info( f"found_info, named_entities_found: {found_info}, {named_entities_found}" ) if which_info == "name" and found_info is not None: found_info = filter_unreadable_names(found_info) if found_info is None: logger.info(f"found_info is None") if did_user_misunderstand_bot_question_about_geography( curr_user_uttr, which_info, prev_bot_uttr): response = ASK_GEOGRAPHICAL_LOCATION_BECAUSE_USER_MISUNDERSTOOD_BOT[ which_info] confidence = 0.9 attr["can_continue"] = CAN_CONTINUE_SCENARIO elif which_info in [ "homeland", "location" ] and NON_GEOGRAPHICAL_LOCATIONS_COMPILED_PATTERN.search( curr_user_uttr): response = "" confidence = 0.0 attr["can_continue"] = CAN_NOT_CONTINUE elif was_user_asked_to_clarify_info(prev_bot_uttr, which_info): response = "" confidence = 0.0 attr["can_continue"] = CAN_NOT_CONTINUE elif (which_info == "name" and len(curr_user_uttr.split()) == 1 and len( get_entities(curr_uttr_dict, only_named=False, with_labels=False)) > 0): response = "I've never heard about this name." confidence = 1.0 attr["can_continue"] = MUST_CONTINUE else: response = REPEAT_INFO_PHRASES[which_info] confidence = 1.0 attr["can_continue"] = MUST_CONTINUE else: if which_info == "name": found_info = shorten_long_names(found_info) response = response_phrases[which_info] + found_info + "." confidence = 1.0 attr["can_continue"] = MUST_CONTINUE human_attr[which_info] = found_info else: if NON_GEOGRAPHICAL_LOCATIONS_COMPILED_PATTERN.search( found_info): if did_user_misunderstand_bot_question_about_geography( found_info, which_info, prev_bot_uttr): response = ASK_GEOGRAPHICAL_LOCATION_BECAUSE_USER_MISUNDERSTOOD_BOT[ which_info] confidence = 0.9 attr["can_continue"] = CAN_CONTINUE_SCENARIO else: response = "" confidence = 0.0 attr["can_continue"] = CAN_NOT_CONTINUE else: if which_info == "location": response = response_phrases[which_info] elif which_info == "homeland": if dialog["human"]["profile"].get("location", None) is None: response = response_phrases[which_info] else: response = response_phrases["location"] human_attr[which_info] = found_info if named_entities_found: confidence = 1.0 attr["can_continue"] = MUST_CONTINUE else: confidence = 0.9 attr["can_continue"] = CAN_CONTINUE_SCENARIO return response, confidence, human_attr, bot_attr, attr
def is_no_vars(ctx: Context, actor: Actor) -> bool: flag = True flag = flag and common_utils.is_no( int_ctx.get_last_human_utterance(ctx, actor)) return bool(flag)
def get_statement_phrase(dialog, topic, attr, TOPICS): """ For considered topic propose dive deeper questions for meta-script, assign attributes for dialog. Args: topic: current topic `verb + adj/adv/noun` attr: dictionary of current attributes Returns: tuple of text response, confidence and response attributes """ last_uttr = dialog["utterances"][-1] # choose and fill template with relation from COMeT used_templates = get_used_attributes_by_name( dialog["utterances"], attribute_name="meta_script_relation_template", value_by_default=None, activated=True)[-2:] meta_script_template = get_not_used_template( used_templates, meta_script_skill_constants.DIVE_DEEPER_TEMPLATE_COMETS) attr["meta_script_relation_template"] = meta_script_template relation = meta_script_skill_constants.DIVE_DEEPER_TEMPLATE_COMETS[ meta_script_template]["attribute"] prediction = get_comet_atomic(f"person {topic}", relation, TOPICS) if prediction == "": return "", 0.0, {"can_continue": CAN_NOT_CONTINUE} if (random() < 0.5 and len(dialog["utterances"]) >= 2 and dialog["bot_utterances"][-1].get("active_skill", "") == "meta_script_skill"): dothat = "do that" doingthat = "doing that" else: dothat = re.sub(r"^be ", "become ", topic) doingthat = get_gerund_topic(topic) statement = (meta_script_template.replace("DOINGTHAT", doingthat).replace( "DOTHAT", dothat).replace("RELATION", prediction)) # choose template for short comment used_templates = get_used_attributes_by_name( dialog["utterances"], attribute_name="meta_script_deeper_comment_template", value_by_default=None, activated=True, )[-2:] if is_yes(last_uttr): comment = get_not_used_template( used_templates, meta_script_skill_constants.DIVE_DEEPER_COMMENTS["yes"] + meta_script_skill_constants.DIVE_DEEPER_COMMENTS["other"], ) attr["meta_script_deeper_comment_template"] = comment elif is_no(last_uttr): comment = get_not_used_template( used_templates, meta_script_skill_constants.DIVE_DEEPER_COMMENTS["no"] + meta_script_skill_constants.DIVE_DEEPER_COMMENTS["other"], ) attr["meta_script_deeper_comment_template"] = comment else: comment = get_not_used_template( used_templates, meta_script_skill_constants.DIVE_DEEPER_COMMENTS["other"]) attr["meta_script_deeper_comment_template"] = comment # choose and fill template of question upon relation from COMeT used_templates = get_used_attributes_by_name( dialog["utterances"], attribute_name="meta_script_question_template", value_by_default=None, activated=True)[-3:] meta_script_template_question = get_not_used_template( used_templates, meta_script_skill_constants. DIVE_DEEPER_TEMPLATE_COMETS[meta_script_template]["templates"]) attr["meta_script_question_template"] = meta_script_template_question if is_custom_topic(topic): response = f"{meta_script_template_question.replace('STATEMENT', statement)}".strip( ) confidence = meta_script_skill_constants.CONTINUE_USER_TOPIC_CONFIDENCE else: response = f"{comment} {meta_script_template_question.replace('STATEMENT', statement)}".strip( ) confidence = meta_script_skill_constants.DEFAULT_CONFIDENCE attr["can_continue"] = CAN_CONTINUE_SCENARIO return response, confidence, attr
def get_response_for_particular_topic_and_status(topic, curr_meta_script_status, dialog, source_topic): attr = { "meta_script_topic": topic, "meta_script_status": curr_meta_script_status } if len(dialog["human_utterances"]) > 0: user_uttr = dialog["human_utterances"][-1] text_user_uttr = dialog["human_utterances"][-1]["text"].lower() last_user_sent_text = (dialog["human_utterances"][-1].get( "annotations", {}).get("sentseg", {}).get("segments", [""])[-1].lower()) else: user_uttr = {"text": ""} text_user_uttr = "" last_user_sent_text = "" if len(dialog["bot_utterances"]) > 0: bot_uttr = dialog["bot_utterances"][-1] else: bot_uttr = {} if curr_meta_script_status == "starting": response, confidence, attr = get_starting_phrase(dialog, topic, attr) attr["response_parts"] = ["prompt"] can_offer_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr) talk_about_user_topic = is_custom_topic( topic) and if_chat_about_particular_topic(user_uttr, bot_uttr) prev_what_to_talk_about_outputs = [ get_outputs_with_response_from_dialog(dialog["utterances"][-3:], response=response, activated=True) for response in GREETING_QUESTIONS[list(GREETING_QUESTIONS.keys())[0]] ] prev_what_to_talk_about_outputs = sum([ list_of_outputs for list_of_outputs in prev_what_to_talk_about_outputs if len(list_of_outputs) > 0 ], []) prev_what_to_talk_about_greeting = len( prev_what_to_talk_about_outputs) > 0 and bot_uttr.get( "active_skill", "") in ["dff_friendship_skill", "program_y"] if (not prev_what_to_talk_about_greeting and can_offer_topic) or talk_about_user_topic: # if person wants to talk about something particular and we have extracted some topic - do that! confidence = MATCHED_DIALOG_BEGIN_CONFIDENCE elif "?" in last_user_sent_text or prev_what_to_talk_about_greeting: # if some question was asked by user, do not start script at all! response, confidence = "", 0.0 elif len(dialog["utterances"]) <= 20: confidence = DEFAULT_DIALOG_BEGIN_CONFIDENCE elif source_topic == NP_SOURCE: confidence = NOUN_TOPIC_STARTING_CONFIDENCE else: confidence = DEFAULT_STARTING_CONFIDENCE else: if curr_meta_script_status == "deeper1" and "?" in last_user_sent_text and "what" not in text_user_uttr: response, confidence, attr = "", 0.0, {} elif "?" in last_user_sent_text and not check_topic_lemmas_in_sentence( text_user_uttr, topic): logger.info( "Question by user was detected. Without any word from topic in it. " "Don't continue the script on this turn.") response, confidence, attr = "", 0.0, {} elif is_switch_topic(user_uttr) or if_chat_about_particular_topic( user_uttr): logger.info("Topic switching was detected. Finish script.") response, confidence = FINISHED_SCRIPT_RESPONSE, 0.5 attr["meta_script_status"] = FINISHED_SCRIPT attr["can_continue"] = CAN_NOT_CONTINUE elif get_user_replies_to_particular_skill( dialog["utterances"], "meta_script_skill")[-2:] == ["no.", "no."]: logger.info( "Two consequent `no` answers were detected. Finish script.") response, confidence = FINISHED_SCRIPT_RESPONSE, 0.5 attr["meta_script_status"] = FINISHED_SCRIPT attr["can_continue"] = CAN_NOT_CONTINUE elif curr_meta_script_status == "comment": response, confidence, attr = get_comment_phrase(dialog, attr) attr["can_continue"] = CAN_NOT_CONTINUE elif curr_meta_script_status == "opinion": response, confidence, attr = get_opinion_phrase( dialog, topic, attr) elif curr_meta_script_status == "deeper1" and ( is_no(user_uttr) or "never" in text_user_uttr): response, confidence = FINISHED_SCRIPT_RESPONSE, 0.5 attr["meta_script_status"] = FINISHED_SCRIPT attr["can_continue"] = CAN_NOT_CONTINUE else: response, confidence, attr = get_statement_phrase( dialog, topic, attr, TOPICS) attr["can_continue"] = CAN_CONTINUE_SCENARIO if confidence > 0.7 and (is_yes(user_uttr) or len(text_user_uttr.split()) > 7): # if yes detected, confidence 1.0 - we like agreements! confidence = 1.0 if confidence > 0.7 and bot_uttr.get("active_skill", "") != "meta_script_skill": confidence = BROKEN_DIALOG_CONTINUE_CONFIDENCE logger.info( f"User sent: `{text_user_uttr}`. Response: `{response}`. Attr: `{attr}.`" ) return response, confidence, attr
def ask_about_pet_request(ngrams, vars): flag = False user_uttr = state_utils.get_last_human_utterance(vars) bot_uttr = state_utils.get_last_bot_utterance(vars) isno = is_no(state_utils.get_last_human_utterance(vars)) isyes = is_yes(state_utils.get_last_human_utterance(vars)) user_has = re.findall(r"i (have|had)", user_uttr["text"]) extract_breed(vars) delete_pet(vars) bot_asked_pet = re.findall( r"do you have a (cat|dog|rat|fish|bird|parrot|hamster)", bot_uttr["text"], re.IGNORECASE) user_has_not = (bot_asked_pet and isno) and not re.findall(PETS_TEMPLATE, user_uttr["text"]) user_told_pet = re.findall("(cat|dog|rat|fish|bird|parrot|hamster)", user_uttr["text"]) and re.findall( r"(do you have pets|what pets do you have)", bot_uttr["text"], re.IGNORECASE) user_mentioned_pet = re.findall( r"my (cat|dog|rat|fish|bird|parrot|hamster|puppy|kitty|kitten)", user_uttr["text"], re.IGNORECASE) user_has_pet = re.findall( r"i (have |had )(a )?(cat|dog|rat|fish|bird|parrot|hamster|puppy|kitty|kitten)", user_uttr["text"], re.IGNORECASE, ) shared_memory = state_utils.get_shared_memory(vars) found_pet = re.findall(PETS_TEMPLATE, user_uttr["text"]) used_pets_q = shared_memory.get("used_pets_q", []) users_pet = shared_memory.get("users_pet", "") my_pets = my_pets_request(ngrams, vars) found_question = {} logger.info( f"ask_about_pet_request, my_pets {my_pets} user_has_not {user_has_not} users_pet {users_pet} " f"bot_asked_pet {bot_asked_pet} isyes {isyes} user_has {user_has} user_told_pet {user_told_pet} " f"user_mentioned_pet {user_mentioned_pet} user_has_pet {user_has_pet} found_pet {found_pet}" ) if (not my_pets and not user_has_not and (users_pet or (bot_asked_pet and (isyes or user_has)) or user_told_pet or user_mentioned_pet or user_has_pet)): for elem in USER_PETS_Q: if elem["what"] not in used_pets_q: found_question = elem found_attr = "" if found_question and found_question["attr"]: curr_attr = found_question["attr"] found_attr = shared_memory.get(curr_attr, "") found_keywords = False if found_question and found_question["keywords"]: keywords = found_question["keywords"] found_keywords = any( [keyword in user_uttr["text"] for keyword in keywords]) if not found_attr and not found_keywords: flag = True if found_question.get("what", "") == "breed" and ( (users_pet and users_pet not in CATS_DOGS) or (found_pet and found_pet[0] not in CATS_DOGS)): flag = False logger.info( f"ask_about_pet, what {found_question.get('what', '')} found_attr {found_attr} " f"found_keywords {found_keywords}") if flag: break logger.info(f"ask_about_pet_request={flag}") return flag
def get_entities_with_attitudes(annotated_uttr: dict, prev_annotated_uttr: dict): entities_with_attitudes = {"like": [], "dislike": []} all_entities = get_entities(annotated_uttr, only_named=False, with_labels=False) all_prev_entities = get_entities(prev_annotated_uttr, only_named=False, with_labels=False) logger.info( f"Consider all curr entities: {all_entities}, and all previous entities: {all_prev_entities}" ) curr_entity = all_entities[0] if all_entities else "" prev_entity = all_prev_entities[-1] if all_prev_entities else "" curr_uttr_text = annotated_uttr.get("text", "") prev_uttr_text = prev_annotated_uttr.get("text", "") curr_sentiment = get_sentiment(annotated_uttr, probs=False, default_labels=["neutral"])[0] current_first_sentence = (annotated_uttr.get("annotations", {}).get( "sentseg", {}).get("segments", [curr_uttr_text])[0]) if "?" in current_first_sentence: pass elif WHAT_FAVORITE_PATTERN.search(prev_uttr_text): # what is your favorite ..? - animals -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif WHAT_LESS_FAVORITE_PATTERN.search(prev_uttr_text): # what is your less favorite ..? - animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif DO_YOU_LOVE_PATTERN.search(prev_uttr_text): if is_no(annotated_uttr): # do you love .. animals? - no -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif is_yes(annotated_uttr): # do you love .. animals? - yes -> `like animals` entities_with_attitudes["like"] += [prev_entity] elif DO_YOU_HATE_PATTERN.search(prev_uttr_text): if is_no(annotated_uttr): # do you hate .. animals? - no -> `like animals` entities_with_attitudes["like"] += [prev_entity] elif is_yes(annotated_uttr): # do you hate .. animals? - yes -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif I_HATE_PATTERN.search(curr_uttr_text): # i hate .. animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif I_LOVE_PATTERN.search(curr_uttr_text) or MY_FAVORITE_PATTERN.search( curr_uttr_text): # i love .. animals -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif if_chat_about_particular_topic( annotated_uttr, prev_annotated_uttr=prev_annotated_uttr, key_words=[curr_entity]): # what do you want to chat about? - ANIMALS -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif if_not_want_to_chat_about_particular_topic( annotated_uttr, prev_annotated_uttr=prev_annotated_uttr): # i don't wanna talk about animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif WHAT_DO_YOU_THINK_PATTERN.search(prev_uttr_text): if curr_sentiment == "negative": # what do you thank .. animals? - negative -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif curr_sentiment == "positive": # what do you thank .. animals? - positive -> `like animals` entities_with_attitudes["like"] += [prev_entity] entities_with_attitudes["like"] = [ el for el in entities_with_attitudes["like"] if el ] entities_with_attitudes["dislike"] = [ el for el in entities_with_attitudes["dislike"] if el ] return entities_with_attitudes
def make_utt_with_ack(vars, prev_what_to_ask, what_to_ask): ack = "" statement = "" question = "" user_uttr = state_utils.get_last_human_utterance(vars) shared_memory = state_utils.get_shared_memory(vars) make_my_pets_info(vars) prev_state = condition_utils.get_last_state(vars) about_users_pet = str(prev_state).split(".")[-1] == "SYS_ASK_ABOUT_PET" isno = is_no(state_utils.get_last_human_utterance(vars)) users_pet = shared_memory.get("users_pet", "") users_pet_name = shared_memory.get("users_pet_name", "") users_pet_breed = shared_memory.get("users_pet_breed", "") logger.info( f"make_utt_with_ack users_pet {users_pet} users_pet_name {users_pet_name} " f"users_pet_breed {users_pet_breed} {breeds_dict.get('users_pet_breed', '')} " f"about_users_pet {about_users_pet}") my_pets_info = shared_memory.get("my_pets_info", {}) if about_users_pet and prev_what_to_ask == "name" and users_pet_name: ack = "Very cool name! You have such an amusing mind!" if about_users_pet and prev_what_to_ask == "breed": if users_pet and users_pet_breed: breed_info = breeds_dict[users_pet_breed] facts = breed_info.get("facts", "") if not facts.endswith("."): facts = f"{facts}." if facts: ack = f"I know a lot about {users_pet} breeds. {facts}" # + f"Would you like to know more about {users_pet_breed}?" if about_users_pet and prev_what_to_ask == "play" and not isno: ack = "Really, playing with a pet makes a lot of fun." if prev_what_to_ask == "videos" and not isno: if users_pet: ack = f"I would like to see videos with your {users_pet} if I could." if what_to_ask == "name": if users_pet in CATS_DOGS: repl_pet = replace_pet(users_pet) statement = choose_pet_phrase(vars, repl_pet) if users_pet: question = f"What is your {users_pet}'s name?" else: question = "What is your pet's name?" if what_to_ask == "breed": if users_pet in CATS_DOGS: repl_pet = replace_pet(users_pet) my_pet = my_pets_info[repl_pet] my_pet_breed = my_pet["breed"] statement = f"I have a {my_pet_breed} {users_pet}." question = f"What is your {users_pet}'s breed?" if what_to_ask == "play": if users_pet in CATS_DOGS: repl_pet = replace_pet(users_pet) games = " and ".join(pet_games[repl_pet]) statement = f"I like to play with my {users_pet} different games, such as {games}." if users_pet: question = f"Do you play with your {users_pet}?" if what_to_ask == "like": statement = "There's an old saying that pets repay the love you give them ten-fold." if users_pet_name: question = f"Do you like {users_pet_name}?" elif users_pet: question = f"Do you like your {users_pet}?" else: question = "Do you like your pet?" if what_to_ask == "videos": statement = "I saw a lot of funny videos with pets on Youtube." question = "Did you shoot any videos with your pet?" if what_to_ask == "pandemic": statement = "Nowadays during pandemic we have to stay at home." question = "Does your pet help you to cheer up during pandemic?" if "bark" in user_uttr["text"]: ack = f"Woof-woof, bow-bow, ruff-ruff! {ack}" used_acks = shared_memory.get("used_acks", []) if ack and ack in used_acks: ack = "" else: used_acks.append(ack) state_utils.save_to_shared_memory(vars, used_acks=used_acks) response = f"{ack} {statement} {question}" response = response.replace(" ", " ").strip() return response
def _get_reply_and_conf(self, annotated_user_phrase, bot_phrase, emotion, emotion_skill_attributes, human_attr): user_phrase = annotated_user_phrase["text"] is_yes_detected = is_yes(annotated_user_phrase) is_no_detected = is_no(annotated_user_phrase) start_states = { "joy": "joy_i_feel" if self._check_i_feel(user_phrase, bot_phrase) else "joy_feeling_towards_smth", "sadness": "sad_and_lonely", "fear": "fear", "anger": "anger", "surprise": "surprise", "love": "love", } if "emotion_skill" not in human_attr: human_attr["emotion_skill"] = {} if emotion != "neutral": human_attr["emotion_skill"]["last_emotion"] = emotion state = emotion_skill_attributes.get("state", "") prev_jokes_advices = emotion_skill_attributes.get( "prev_jokes_advices", []) just_asked_about_jokes = "why hearing jokes is so important for you? are you sad?" in bot_phrase reply, confidence = "", 0 link = "" self.logger.info( f"_get_reply_and_conf {user_phrase}; {bot_phrase}; {emotion}; {just_asked_about_jokes};" f" {emotion_skill_attributes}; {is_yes_detected}; {is_no_detected}; {human_attr}" ) if state == "": # start_state state = start_states[emotion] step = self.steps[state] reply = self._random_choice(step["answers"]) confidence = min(0.98, self.emotion_precision[emotion]) if len(step["next_step"]): state = random.choice(step["next_step"]) elif state == "sad_and_lonely" and just_asked_about_jokes and is_no: reply = "Actually, I love jokes but not now. Dead serious about that." confidence = 1.0 state = "" elif state == "offered_advice": # we offered an advice if is_no_detected or is_positive_regexp_based( {"text": user_phrase}): state = "no" step = self.steps[state] reply = random.choice(step["answers"]) if len(step["next_step"]): state = random.choice(step["next_step"]) else: state = "" confidence = 0.95 else: # provide advices and offer another one reply = self._random_choice(self.advices[emotion], prev_jokes_advices) state = "offer_another_advice" if reply == "": state = "sad_and_lonely_end" step = self.steps[state] reply = random.choice(step["answers"]) else: prev_jokes_advices.append(reply) if len(prev_jokes_advices) == len(self.advices[emotion]): state = "sad_and_lonely_end" confidence = 1.0 if is_yes else 0.95 else: if emotion in ["sadness", "fear", "anger"] and "joy" in state: state = "sad_and_lonely" step = self.steps[state] reply = random.choice(step["answers"]) if len(step["next_step"]): state = random.choice(step["next_step"]) if state == "offer_advice" and sorted(self.advices.get( emotion, [])) == sorted(prev_jokes_advices): logger.warning( "Asked for advice, but we have already done them") reply, confidence = "", 0 link = step["link"] if link: link = link_to([link], human_attributes=human_attr) link["phrase"] = reply # reply += link['phrase'] confidence = 0.95 emotion_skill_attributes = { "state": state, "emotion": emotion, "prev_jokes_advices": prev_jokes_advices } if "joy" in state: confidence = confidence * 0.5 return reply, confidence, link, emotion_skill_attributes
def collect_topics_and_statuses(dialogs): topics = [] statuses = [] curr_news_samples = [] for dialog in dialogs: curr_uttr = dialog["human_utterances"][-1] prev_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) else {} human_attr = {} human_attr["news_api_skill"] = dialog["human"]["attributes"].get( "news_api_skill", {}) discussed_news = human_attr["news_api_skill"].get("discussed_news", []) prev_bot_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) > 0 else {} prev_bot_uttr_lower = prev_bot_uttr.get("text", "").lower() prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="news_api_skill", activated=True) if len(prev_news_skill_output) > 0 and len( prev_news_skill_output[-1]) > 0: logger.info(f"News skill was prev active.") prev_news_skill_output = prev_news_skill_output[-1] prev_status = prev_news_skill_output.get("news_status", "") prev_topic = prev_news_skill_output.get("news_topic", "all") last_news = prev_news_skill_output.get("curr_news", {}) if prev_status == OFFERED_NEWS_DETAILS_STATUS: topics.append(prev_topic) if is_yes(curr_uttr): logger.info(f"Detected topic for news: {prev_topic}") statuses.append("details") else: logger.info("User refused to get news details") statuses.append("finished") curr_news_samples.append(last_news) elif prev_status == OFFERED_BREAKING_NEWS_STATUS or OFFER_BREAKING_NEWS.lower( ) in prev_bot_uttr_lower: topics.append("all") if is_yes(curr_uttr): logger.info("Detected topic for news: all.") statuses.append("headline") else: logger.info("User refuse to get latest news") statuses.append("declined") curr_news_samples.append(last_news) elif re.search(TELL_MORE_NEWS_TEMPLATES, curr_uttr["text"].lower()): prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-7:], skill_name="news_api_skill", activated=True) for prev_news_out in prev_news_skill_output: if prev_news_out.get("curr_news", {}) != {}: last_news = prev_news_out.get("curr_news", {}) logger.info( f"User requested more news. Prev news was: {last_news}") topics.append(prev_topic) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(prev_topic, curr_uttr, discussed_news)) elif prev_status == OFFERED_NEWS_TOPIC_CATEGORIES_STATUS: if not (news_rejection(curr_uttr["text"].lower()) or is_no(curr_uttr)): logger.info("User chose the topic for news") if ANY_TOPIC_PATTERN.search(curr_uttr["text"]): topics.append(prev_topic.split()[0]) curr_news_samples.append( get_news_for_current_entity( prev_topic.split()[0], curr_uttr, discussed_news)) elif SECOND_TOPIC_PATTERN.search(curr_uttr["text"]): topics.append(prev_topic.split()[1]) curr_news_samples.append( get_news_for_current_entity( prev_topic.split()[1], curr_uttr, discussed_news)) else: entities = extract_topics(curr_uttr) if len(entities) != 0: topics.append(entities[-1]) curr_news_samples.append( get_news_for_current_entity( entities[-1], curr_uttr, discussed_news)) else: topics.append("all") curr_news_samples.append( get_news_for_current_entity( "all", curr_uttr, discussed_news)) logger.info(f"Chosen topic: {topics}") statuses.append("headline") else: logger.info("User doesn't want to get any news") topics.append("all") statuses.append("declined") curr_news_samples.append({}) elif prev_status == OFFER_TOPIC_SPECIFIC_NEWS_STATUS: topics.append(prev_topic) if is_yes(curr_uttr): logger.info( f"User wants to listen news about {prev_topic}.") statuses.append("headline") else: logger.info( f"User doesn't want to listen news about {prev_topic}." ) statuses.append("declined") curr_news_samples.append(last_news) else: logger.info( "News skill was active and now can offer more news.") topics.append("all") statuses.append("finished") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) else: logger.info(f"News skill was NOT active.") about_news = ( ({"News"} & set(get_topics(curr_uttr, which="cobot_topics"))) or re.search(NEWS_TEMPLATES, curr_uttr["text"].lower()) ) and not re.search(FALSE_NEWS_TEMPLATES, curr_uttr["text"].lower()) lets_chat_about_particular_topic = if_chat_about_particular_topic( curr_uttr, prev_uttr) lets_chat_about_news = if_chat_about_particular_topic( curr_uttr, prev_uttr, compiled_pattern=NEWS_TEMPLATES) _was_offer_news = was_offer_news_about_topic(prev_bot_uttr_lower) _offered_by_bot_entities = EXTRACT_OFFERED_NEWS_TOPIC_TEMPLATE.findall( prev_bot_uttr_lower) if about_news: # the request contains something about news entities = extract_topics(curr_uttr) logger.info(f"News request on entities: `{entities}`") if re.search(TELL_MORE_NEWS_TEMPLATES, curr_uttr["text"].lower()): # user requestd more news. # look for the last 3 turns and find last discussed news sample logger.info("Tell me more news request.") prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-7:], skill_name="news_api_skill", activated=True) if len(prev_news_skill_output) > 0 and len( prev_news_skill_output[-1]) > 0: prev_news_skill_output = prev_news_skill_output[-1] prev_topic = prev_news_skill_output.get( "news_topic", "all") else: prev_topic = "all" logger.info( "News skill was NOT prev active. User requested more news." ) topics.append(prev_topic) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(prev_topic, curr_uttr, discussed_news)) elif len(entities) == 0: # no entities or nounphrases -> no special news request, get all news logger.info("News request, no entities and nounphrases.") topics.append("all") statuses.append("headline") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) else: # found entities or nounphrases -> special news request, # get the last mentioned entity # if no named entities, get the last mentioned nounphrase logger.info(f"Detected topic for news: {entities[-1]}") topics.append(entities[-1]) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(entities[-1], curr_uttr, discussed_news)) elif OFFER_BREAKING_NEWS.lower() in prev_bot_uttr_lower: # news skill was not previously active topics.append("all") if is_yes(curr_uttr) or lets_chat_about_news: logger.info("Detected topic for news: all.") statuses.append("headline") else: logger.info( "Detected topic for news: all. Refused to get latest news" ) statuses.append("declined") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) elif _was_offer_news and _offered_by_bot_entities: topics.append(_offered_by_bot_entities[-1]) if is_yes(curr_uttr): logger.info( f"Bot offered news on entities: `{_offered_by_bot_entities}`" ) statuses.append("headline") else: logger.info( f"Bot offered news on entities: `{_offered_by_bot_entities}`. User refused." ) statuses.append("declined") curr_news_samples.append( get_news_for_current_entity(_offered_by_bot_entities[-1], curr_uttr, discussed_news)) elif lets_chat_about_particular_topic: # the request contains something about news entities = extract_topics(curr_uttr) logger.info(f"News request on entities: `{entities}`") if len(entities) == 0: # no entities or nounphrases & lets_chat_about_particular_topic logger.info( "No news request, no entities and nounphrases, but lets chat." ) topics.append("all") statuses.append("declined") curr_news_samples.append({}) else: # found entities or nounphrases -> special news request, # get the last mentioned entity # if no named entities, get the last mentioned nounphrase logger.info(f"Detected topic for news: {entities[-1]}") topics.append(entities[-1]) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(entities[-1], curr_uttr, discussed_news)) else: logger.info("Didn't detected news request.") topics.append("all") statuses.append("declined") curr_news_samples.append({}) return topics, statuses, curr_news_samples
def has_pet_request(ngrams, vars): flag = False if not is_no(state_utils.get_last_human_utterance(vars)): flag = True logger.info(f"has_pet_request={flag}") return flag
def start_or_continue_facts(vars, topic_config): flag = False shared_memory = state_utils.get_shared_memory(vars) bot_uttr = state_utils.get_last_bot_utterance(vars) prev_active_skill = bot_uttr.get("active_skill", "") isno = is_no(state_utils.get_last_human_utterance(vars)) found_topic = shared_memory.get("special_topic", "") cur_mode = shared_memory.get("cur_mode", "smalltalk") cur_wikipedia_page = shared_memory.get("cur_wikipedia_page", "") cur_wikihow_page = shared_memory.get("cur_wikihow_page", "") logger.info( f"cur_wikihow_page {cur_wikihow_page} cur_wikipedia_page {cur_wikipedia_page}" ) if found_topic: if cur_mode == "smalltalk" and "triggers" in topic_config[found_topic]: triggers = topic_config[found_topic]["triggers"] entity_substr, entity_types, wikipedia_page, wikihow_page = find_trigger( vars, triggers) if wikihow_page or wikipedia_page or if_facts_agree(vars): flag = True else: checked_wikipage = extract_and_save_wikipage(vars) if checked_wikipage: flag = True if (cur_wikipedia_page or cur_wikihow_page) and not isno: wikihow_page_content_list = memory.get("wikihow_content", []) wikipedia_page_content_list = memory.get( "wikipedia_content", []) used_wikihow_nums = shared_memory.get("used_wikihow_nums", {}).get( cur_wikihow_page, []) used_wikipedia_nums = shared_memory.get( "used_wikipedia_nums", {}).get(cur_wikipedia_page, []) logger.info( f"request, used_wikihow_nums {used_wikihow_nums} used_wikipedia_nums {used_wikipedia_nums}" ) logger.info( f"request, wikipedia_page_content_list {wikipedia_page_content_list[:3]} " f"wikihow_page_content_list {wikihow_page_content_list[:3]}" ) if len(wikihow_page_content_list) > 0 and len( used_wikihow_nums) < len(wikihow_page_content_list): flag = True if len(wikipedia_page_content_list) > 0 and len( used_wikipedia_nums) < len( wikipedia_page_content_list): flag = True first_utt = False if not shared_memory.get("special_topic", "") or prev_active_skill not in { "dff_wiki_skill", "dff_music_skill" }: found_topic, first_utt, utt_can_continue, utt_conf = check_switch( vars, topic_config) logger.info(f"start_or_continue_facts, first_utt {first_utt}") if found_topic: facts = topic_config[found_topic].get("facts", {}) if facts: flag = True if not first_utt and ((found_topic != "music" and prev_active_skill != "dff_wiki_skill") or (found_topic == "music" and prev_active_skill != "dff_music_skill")): flag = False return flag
def misheard_response(): st_time = time.time() dialogs_batch = request.json["dialogs"] final_confidences = [] final_responses = [] final_human_attributes = [] final_bot_attributes = [] for dialog in dialogs_batch: prev_user_utt = None if len(dialog["human_utterances"]) > 1: prev_user_utt = dialog["human_utterances"][-2] bot_attributes = dialog["bot"]["attributes"] human_attributes = dialog["human"]["attributes"] current_user_utt = dialog["human_utterances"][-1] prev_bot_utt = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) > 0 else {} logger.debug( f"MISHEARD ASR INPUT: current utt text: {current_user_utt['text']};" f"bot attrs: {bot_attributes}; user attrs: {human_attributes}" f"HYPOTS: {current_user_utt['hypotheses']}" f"PREV BOT UTT: {prev_bot_utt}") if bot_attributes.get("asr_misheard") is True and prev_bot_utt[ "active_skill"] == "misheard_asr": bot_attributes["asr_misheard"] = False if is_yes(current_user_utt): hypots = prev_user_utt["hypotheses"] logger.debug(f"PREV HYPOTS: {hypots}") candidates = [] confs = [] for resp in hypots: if resp["skill_name"] != "misheard_asr": candidates.append(resp["text"]) confs.append(resp["confidence"]) final_responses.append(candidates) final_confidences.append(confs) final_human_attributes.append([human_attributes] * len(candidates)) final_bot_attributes.append([bot_attributes] * len(candidates)) elif is_no(current_user_utt): response = "What is it that you'd like to chat about?" final_responses.append([response]) final_confidences.append([1.0]) final_human_attributes.append([human_attributes]) final_bot_attributes.append([bot_attributes]) else: final_responses.append(["sorry"]) final_confidences.append([0.0]) final_human_attributes.append([human_attributes]) final_bot_attributes.append([bot_attributes]) else: bot_attributes["asr_misheard"] = False if current_user_utt["annotations"]["asr"][ "asr_confidence"] == "very_low": final_responses.append([np.random.choice(misheard_responses)]) final_confidences.append([1.0]) final_human_attributes.append([human_attributes]) final_bot_attributes.append([bot_attributes]) elif current_user_utt["annotations"]["asr"][ "asr_confidence"] == "medium": response = f"Excuse me, I misheard you. Have you said: \"{dialog['human_utterances'][-1]['text']}\"?" final_responses.append([response]) final_confidences.append([1.0]) bot_attributes["asr_misheard"] = True final_human_attributes.append([human_attributes]) final_bot_attributes.append([bot_attributes]) else: final_responses.append(["sorry"]) final_confidences.append([0.0]) final_human_attributes.append([human_attributes]) final_bot_attributes.append([bot_attributes]) total_time = time.time() - st_time logger.info(f"misheard_asr#misheard_respond exec time: {total_time:.3f}s") resp = list( zip(final_responses, final_confidences, final_human_attributes, final_bot_attributes)) logger.debug(f"misheard_asr#misheard_respond OUTPUT: {resp}") return jsonify(resp)
def sys_feel_great_condition(ctx: Context, actor: Actor, *args, **kwargs) -> bool: human_utterance = int_ctx.get_last_human_utterance(ctx, actor) flag = common_utils.is_no(human_utterance) return flag
def smalltalk_response(vars, topic_config): response = "" first_utt = False shared_memory = state_utils.get_shared_memory(vars) bot_uttr = state_utils.get_last_bot_utterance(vars) prev_active_skill = bot_uttr.get("active_skill", "") if prev_active_skill not in {"dff_wiki_skill", "dff_music_skill"}: delete_topic_info(vars) found_topic = shared_memory.get("special_topic", "") cur_mode = shared_memory.get("cur_mode", "smalltalk") isno = is_no(state_utils.get_last_human_utterance(vars)) utt_can_continue = "can" utt_conf = 0.0 if cur_mode == "facts" and isno: state_utils.save_to_shared_memory(vars, cur_wikihow_page="") state_utils.save_to_shared_memory(vars, cur_wikipedia_page="") memory["wikihow_content"] = [] memory["wikipedia_content"] = [] if not found_topic: found_topic, first_utt, utt_can_continue, utt_conf = check_switch( vars, topic_config) if found_topic: expected_entities = topic_config[found_topic].get( "expected_entities", {}) if expected_entities: state_utils.save_to_shared_memory( vars, expected_entities=expected_entities) existing_subtopic_info = shared_memory.get("expected_subtopic_info", []) expected_subtopic_info = topic_config[found_topic].get( "expected_subtopic_info", {}) if expected_subtopic_info and not existing_subtopic_info and first_utt: state_utils.save_to_shared_memory( vars, expected_subtopic_info=expected_subtopic_info) extract_and_save_entity(vars, topic_config, found_topic) extract_and_save_subtopic(vars, topic_config, found_topic) available_utterances = shared_memory.get("available_utterances", []) logger.info(f"subtopics {shared_memory.get('subtopics', [])}") subtopics_to_delete = 0 add_general_ackn = False if found_topic: used_utt_nums_dict = shared_memory.get("used_utt_nums", {}) used_utt_nums = used_utt_nums_dict.get(found_topic, []) state_utils.save_to_shared_memory(vars, special_topic=found_topic) subtopics = shared_memory.get("subtopics", []) if subtopics: for i in range(len(subtopics) - 1, -1, -1): cur_subtopic = subtopics[i] for num, utt_info in enumerate( topic_config[found_topic]["smalltalk"]): utt_key = utt_info.get("key", "") if num not in used_utt_nums and ( not available_utterances or (available_utterances and utt_key in available_utterances)): if utt_info.get( "subtopic", "") == cur_subtopic and check_utt_cases( vars, utt_info): response, used_utt_nums = make_smalltalk_response( vars, topic_config, shared_memory, utt_info, used_utt_nums, num) if response: add_general_ackn = utt_info.get( "add_general_ackn", False) utt_can_continue = utt_info.get( "can_continue", "can") utt_conf = utt_info.get("conf", utt_conf) break if response: used_utt_nums_dict[found_topic] = used_utt_nums state_utils.save_to_shared_memory( vars, used_utt_nums=used_utt_nums_dict) if check_used_subtopic_utt(vars, topic_config, cur_subtopic): subtopics_to_delete += 1 break else: subtopics_to_delete += 1 if not subtopics or not response: for num, utt_info in enumerate( topic_config[found_topic]["smalltalk"]): utt_key = utt_info.get("key", "") if (num not in used_utt_nums and check_utt_cases(vars, utt_info) and not utt_info.get("subtopic", "") and (not available_utterances or (available_utterances and utt_key in available_utterances))): response, used_utt_nums = make_smalltalk_response( vars, topic_config, shared_memory, utt_info, used_utt_nums, num) if response: utt_can_continue = utt_info.get("can_continue", "can") utt_conf = utt_info.get("conf", utt_conf) add_general_ackn = utt_info.get( "add_general_ackn", False) used_utt_nums_dict[found_topic] = used_utt_nums state_utils.save_to_shared_memory( vars, used_utt_nums=used_utt_nums_dict) break if subtopics_to_delete: for i in range(subtopics_to_delete): subtopics.pop() state_utils.save_to_shared_memory(vars, subtopics=subtopics) logger.info( f"used_utt_nums_dict {used_utt_nums_dict} used_utt_nums {used_utt_nums}" ) acknowledgement = check_acknowledgements(vars, topic_config) answer = answer_users_question(vars, topic_config) or acknowledgement response = f"{answer} {response}".strip().replace(" ", " ") logger.info(f"response {response}") if response: state_utils.save_to_shared_memory(vars, cur_mode="smalltalk") if utt_conf > 0.0: state_utils.set_confidence(vars, confidence=utt_conf) else: state_utils.set_confidence(vars, confidence=CONF_DICT["WIKI_TOPIC"]) if first_utt or utt_can_continue == "must": state_utils.set_can_continue( vars, continue_flag=common_constants.MUST_CONTINUE) elif utt_can_continue == "prompt": state_utils.set_can_continue( vars, continue_flag=common_constants.CAN_CONTINUE_PROMPT) else: state_utils.set_can_continue( vars, continue_flag=common_constants.CAN_CONTINUE_SCENARIO) else: state_utils.set_confidence(vars, confidence=CONF_DICT["UNDEFINED"]) state_utils.set_can_continue( vars, continue_flag=common_constants.CAN_NOT_CONTINUE) if not add_general_ackn: state_utils.add_acknowledgement_to_response_parts(vars) return response
async def send(self, payload: Dict, callback: Callable): try: st_time = time.time() dialog = deepcopy(payload["payload"]["dialogs"][0]) is_sensitive_case = is_sensitive_situation(dialog["human_utterances"][-1]) all_prev_active_skills = payload["payload"]["all_prev_active_skills"][0] curr_topics = get_topics(dialog["human_utterances"][-1], which="cobot_topics") curr_nounphrases = get_entities(dialog["human_utterances"][-1], only_named=False, with_labels=False) if len(curr_topics) == 0: curr_topics = ["Phatic"] logger.info(f"Found topics: {curr_topics}") for i in range(len(curr_nounphrases)): np = re.sub(np_remove_expr, "", curr_nounphrases[i]) np = re.sub(rm_spaces_expr, " ", np) if re.search(np_ignore_expr, np): curr_nounphrases[i] = "" else: curr_nounphrases[i] = np.strip() curr_nounphrases = [np for np in curr_nounphrases if len(np) > 0] logger.info(f"Found nounphrases: {curr_nounphrases}") cands = [] confs = [] human_attrs = [] bot_attrs = [] attrs = [] cands += [choice(donotknow_answers)] confs += [0.5] attrs += [{"type": "dummy"}] human_attrs += [{}] bot_attrs += [{}] if len(dialog["utterances"]) > 14 and not is_sensitive_case: questions_same_nps = [] for i, nphrase in enumerate(curr_nounphrases): for q_id in NP_QUESTIONS.get(nphrase, []): questions_same_nps += [QUESTIONS_MAP[str(q_id)]] if len(questions_same_nps) > 0: logger.info("Found special nounphrases for questions. Return question with the same nounphrase.") cands += [choice(questions_same_nps)] confs += [0.5] attrs += [{"type": "nounphrase_question"}] human_attrs += [{}] bot_attrs += [{}] link_to_question, human_attr = get_link_to_question(dialog, all_prev_active_skills) if link_to_question: _prev_bot_uttr = dialog["bot_utterances"][-2]["text"] if len(dialog["bot_utterances"]) > 1 else "" _bot_uttr = dialog["bot_utterances"][-1]["text"] if len(dialog["bot_utterances"]) > 0 else "" _prev_active_skill = ( dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else "" ) _no_to_first_linkto = any([phrase in _bot_uttr for phrase in LINK_TO_PHRASES]) _no_to_first_linkto = _no_to_first_linkto and all( [phrase not in _prev_bot_uttr for phrase in LINK_TO_PHRASES] ) _no_to_first_linkto = _no_to_first_linkto and is_no(dialog["human_utterances"][-1]) _no_to_first_linkto = _no_to_first_linkto and _prev_active_skill != "dff_friendship_skill" _if_switch_topic = is_switch_topic(dialog["human_utterances"][-1]) bot_uttr_dict = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {} _if_choose_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr_dict) _is_ask_me_something = ASK_ME_QUESTION_PATTERN.search(dialog["human_utterances"][-1]["text"]) if len(dialog["human_utterances"]) > 1: _was_cant_do = "cant_do" in get_intents(dialog["human_utterances"][-2]) and ( len(curr_nounphrases) == 0 or is_yes(dialog["human_utterances"][-1]) ) _was_cant_do_stop_it = "cant_do" in get_intents(dialog["human_utterances"][-2]) and is_no( dialog["human_utterances"][-1] ) else: _was_cant_do = False _was_cant_do_stop_it = False if _was_cant_do_stop_it: link_to_question = "Sorry, bye! #+#exit" confs += [1.0] # finish dialog request elif _no_to_first_linkto: confs += [0.99] elif _is_ask_me_something or _if_switch_topic or _was_cant_do or _if_choose_topic: confs += [1.0] # Use it only as response selector retrieve skill output modifier else: confs += [0.05] # Use it only as response selector retrieve skill output modifier cands += [link_to_question] attrs += [{"type": "link_to_for_response_selector"}] human_attrs += [human_attr] bot_attrs += [{}] facts_same_nps = [] for i, nphrase in enumerate(curr_nounphrases): for fact_id in NP_FACTS.get(nphrase, []): facts_same_nps += [ f"Well, now that you've mentioned {nphrase}, I've remembered this. {FACTS_MAP[str(fact_id)]}. " f"{(opinion_request_question() if random.random() < ASK_QUESTION_PROB else '')}" ] if len(facts_same_nps) > 0 and not is_sensitive_case: logger.info("Found special nounphrases for facts. Return fact with the same nounphrase.") cands += [choice(facts_same_nps)] confs += [0.5] attrs += [{"type": "nounphrase_fact"}] human_attrs += [{}] bot_attrs += [{}] total_time = time.time() - st_time logger.info(f"dummy_skill exec time: {total_time:.3f}s") asyncio.create_task( callback(task_id=payload["task_id"], response=[cands, confs, human_attrs, bot_attrs, attrs]) ) except Exception as e: logger.exception(e) sentry_sdk.capture_exception(e) asyncio.create_task(callback(task_id=payload["task_id"], response=e))
def tag_based_response_selection(dialog, candidates, scores, confidences, bot_utterances, all_prev_active_skills=None): all_prev_active_skills = all_prev_active_skills if all_prev_active_skills is not None else [] all_prev_active_skills = Counter(all_prev_active_skills) annotated_uttr = dialog["human_utterances"][-1] all_user_intents, all_user_topics, all_user_named_entities, all_user_nounphrases = get_main_info_annotations( annotated_uttr) _is_switch_topic_request = is_switch_topic(annotated_uttr) _is_force_intent = any( [_intent in all_user_intents for _intent in FORCE_INTENTS_IC.keys()]) # if user utterance contains any question (REGEXP & punctuation check!) _is_require_action_intent = is_any_question_sentence_in_utterance({ "text": annotated_uttr.get("annotations", {}).get("sentseg", {}).get("punct_sent", annotated_uttr["text"]) }) # if user utterance contains any question AND requires some intent by socialbot _is_require_action_intent = _is_require_action_intent and any([ _intent in all_user_intents for _intent in REQUIRE_ACTION_INTENTS.keys() ]) _force_intents_detected = [ _intent for _intent in FORCE_INTENTS_IC.keys() if _intent in all_user_intents ] # list of user intents which require some action by socialbot _require_action_intents_detected = [ _intent for _intent in REQUIRE_ACTION_INTENTS.keys() if _intent in all_user_intents ] _force_intents_skills = sum([ FORCE_INTENTS_IC.get(_intent, []) for _intent in _force_intents_detected ], []) # list of intents required by the socialbot _required_actions = sum([ REQUIRE_ACTION_INTENTS.get(_intent, []) for _intent in _require_action_intents_detected ], []) _contains_entities = len( get_entities(annotated_uttr, only_named=False, with_labels=False)) > 0 _is_active_skill_can_not_continue = False _prev_bot_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) > 0 else {} _prev_active_skill = dialog["bot_utterances"][-1]["active_skill"] if len( dialog["bot_utterances"]) > 0 else "" _prev_prev_active_skill = dialog["bot_utterances"][-2][ "active_skill"] if len(dialog["bot_utterances"]) > 1 else "" _no_script_two_times_in_a_row = False if _prev_active_skill and _prev_prev_active_skill: if all([ skill not in ACTIVE_SKILLS + ALMOST_ACTIVE_SKILLS for skill in [_prev_active_skill, _prev_prev_active_skill] ]): _no_script_two_times_in_a_row = True disliked_skills = get_updated_disliked_skills( dialog, can_not_be_disliked_skills=CAN_NOT_BE_DISLIKED_SKILLS) _is_dummy_linkto_available = any([ cand_uttr["skill_name"] == "dummy_skill" and cand_uttr.get("type", "") == "link_to_for_response_selector" for cand_uttr in candidates ]) categorized_hyps = {} categorized_prompts = {} for dasuffix in ["reqda", ""]: for actsuffix in ["active", "continued", "finished"]: for suffix in [ "same_topic_entity_no_db", "same_topic_entity_db", "othr_topic_entity_no_db", "othr_topic_entity_db", ]: categorized_hyps[f"{actsuffix}_{suffix}_{dasuffix}"] = [] categorized_prompts[f"{actsuffix}_{suffix}_{dasuffix}"] = [] CASE = "" acknowledgement_hypothesis = {} for cand_id, cand_uttr in enumerate(candidates): if confidences[cand_id] == 0.0 and cand_uttr[ "skill_name"] not in ACTIVE_SKILLS: logger.info( f"Dropping cand_id: {cand_id} due to toxicity/badlists") continue all_cand_intents, all_cand_topics, all_cand_named_entities, all_cand_nounphrases = get_main_info_annotations( cand_uttr) skill_name = cand_uttr["skill_name"] _is_dialog_abandon = get_dialog_breakdown_annotations( cand_uttr) and PRIORITIZE_NO_DIALOG_BREAKDOWN _is_just_prompt = (cand_uttr["skill_name"] == "dummy_skill" and any([ question_type in cand_uttr.get("type", "") for question_type in ["normal_question", "link_to_for_response_selector"] ])) or cand_uttr.get("response_parts", []) == ["prompt"] if cand_uttr["confidence"] == 1.0: # for those hypotheses where developer forgot to set tag to MUST_CONTINUE cand_uttr["can_continue"] = MUST_CONTINUE _can_continue = cand_uttr.get("can_continue", CAN_NOT_CONTINUE) _user_wants_to_chat_about_topic = ( if_chat_about_particular_topic(annotated_uttr) and "about it" not in annotated_uttr["text"].lower()) _user_does_not_want_to_chat_about_topic = if_not_want_to_chat_about_particular_topic( annotated_uttr) _user_wants_bot_to_choose_topic = if_choose_topic( annotated_uttr, _prev_bot_uttr) if any([ phrase.lower() in cand_uttr["text"].lower() for phrase in LINK_TO_PHRASES ]): # add `prompt` to response_parts if any linkto phrase in hypothesis cand_uttr["response_parts"] = cand_uttr.get("response_parts", []) + ["prompt"] # identifies if candidate contains named entities from last human utterance _same_named_entities = (len( get_common_tokens_in_lists_of_strings( all_cand_named_entities, all_user_named_entities)) > 0) # identifies if candidate contains all (not only named) entities from last human utterance _same_nounphrases = len( get_common_tokens_in_lists_of_strings(all_cand_nounphrases, all_user_nounphrases)) > 0 _same_topic_entity = (_same_named_entities or _same_nounphrases ) and PRIORITIZE_WITH_SAME_TOPIC_ENTITY _is_active_skill = (_prev_active_skill == cand_uttr["skill_name"] or cand_uttr.get("can_continue", "") == MUST_CONTINUE) _is_active_skill = _is_active_skill and skill_name in ACTIVE_SKILLS _is_active_skill = _is_active_skill and (_can_continue in [ MUST_CONTINUE, CAN_CONTINUE_SCENARIO, CAN_NOT_CONTINUE ] or (_can_continue == CAN_CONTINUE_PROMPT and all_prev_active_skills.get(skill_name, []) < 10)) _is_active_skill = _is_active_skill and PRIORITIZE_SCRIPTED_SKILLS if _is_active_skill: # we will forcibly add prompt if current scripted skill finishes scenario, # and has no opportunity to continue at all. _is_active_skill_can_not_continue = _is_active_skill and _can_continue in [ CAN_NOT_CONTINUE ] if _is_force_intent: # =====force intents, choose as best_on_topic hypotheses from skills responding this request===== CASE = "Force intent." if cand_uttr["skill_name"] in _force_intents_skills: categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) elif _is_switch_topic_request or _user_does_not_want_to_chat_about_topic or _user_wants_bot_to_choose_topic: # =====direct request by user to switch the topic of current conversation===== # give priority to dummy linkto hypothesis if available, else other prompts if available. _is_active_skill = ( cand_uttr.get("type", "") == "link_to_for_response_selector" if _is_dummy_linkto_available else _is_just_prompt) # no priority to must_continue to skip incorrect continuation of script _can_continue = CAN_CONTINUE_SCENARIO if _can_continue == MUST_CONTINUE else _can_continue CASE = "Switch topic intent." if len(all_user_named_entities) > 0 or len( all_user_nounphrases) > 0: # -----user defines new topic/entity----- # _same_topic_entity does not depend on hyperparameter in these case _same_topic_entity = _same_named_entities or _same_nounphrases categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) else: # -----user want socialbot to define new topic/entity----- categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) elif _user_wants_to_chat_about_topic: # user wants to chat about particular topic CASE = "User wants to talk about topic." # in this case we do not give priority to previously active skill (but give to must continue skill!) # because now user wants to talk about something particular _is_active_skill = cand_uttr.get("can_continue", "") == MUST_CONTINUE # _same_topic_entity does not depend on hyperparameter in these case _same_topic_entity = _same_named_entities or _same_nounphrases categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) elif _is_require_action_intent and PRIORITIZE_WITH_REQUIRED_ACT: # =====user intent requires particular action===== CASE = "User intent requires action. USER UTTERANCE CONTAINS QUESTION." _is_grounding_reqda = (skill_name == "dff_grounding_skill" and cand_uttr.get( "type", "") == "universal_response") _is_active_skill = cand_uttr.get( "can_continue", "") == MUST_CONTINUE # no priority to prev active skill _can_continue = CAN_NOT_CONTINUE # no priority to scripted skills if set(all_cand_intents).intersection( set(_required_actions )) or _is_grounding_reqda or _is_active_skill: # -----one of the can intent is in intents required by user----- categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=True, ) else: # -----NO required dialog acts----- categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) else: # =====user intent does NOT require particular action===== CASE = "General case." categorized_hyps, categorized_prompts = categorize_candidate( cand_id, skill_name, categorized_hyps, categorized_prompts, _is_just_prompt, _is_active_skill, _can_continue, _same_topic_entity, _is_dialog_abandon, _is_required_da=False, ) # a bit of rule based help if (len(dialog["human_utterances"]) == 1 and cand_uttr["skill_name"] == "dff_friendship_skill" and greeting_spec in cand_uttr["text"]): categorized_hyps = add_to_top1_category(cand_id, categorized_hyps, _is_require_action_intent) elif (cand_uttr["skill_name"] == "dff_friendship_skill" and (how_are_you_spec in cand_uttr["text"] or what_i_can_do_spec in cand_uttr["text"]) and len(dialog["utterances"]) < 16): categorized_hyps = add_to_top1_category(cand_id, categorized_hyps, _is_require_action_intent) # elif cand_uttr["skill_name"] == 'program_y_dangerous' and cand_uttr['confidence'] == 0.98: # categorized_hyps = add_to_top1_category(cand_id, categorized_hyps, _is_require_action_intent) elif cand_uttr[ "skill_name"] == "small_talk_skill" and is_sensitive_situation( dialog["human_utterances"][-1]): # let small talk to talk about sex ^_^ categorized_hyps = add_to_top1_category(cand_id, categorized_hyps, _is_require_action_intent) elif cand_uttr["confidence"] >= 1.0: # -------------------- SUPER CONFIDENCE CASE HERE! -------------------- categorized_hyps = add_to_top1_category(cand_id, categorized_hyps, _is_require_action_intent) if cand_uttr["skill_name"] == "dff_grounding_skill" and [ "acknowledgement" ] == cand_uttr.get("response_parts", []): acknowledgement_hypothesis = deepcopy(cand_uttr) logger.info(f"Current CASE: {CASE}") # now compute current scores as one float value curr_single_scores = compute_curr_single_scores(candidates, scores, confidences) # remove disliked skills from hypotheses if IGNORE_DISLIKED_SKILLS: for category in categorized_hyps: new_ids = [] for cand_id in categorized_hyps[category]: if (candidates[cand_id]["skill_name"] in disliked_skills and candidates[cand_id].get("can_continue", CAN_NOT_CONTINUE) == MUST_CONTINUE): disliked_skills.remove(candidates[cand_id]["skill_name"]) if candidates[cand_id]["skill_name"] not in disliked_skills: new_ids.append(cand_id) categorized_hyps[category] = deepcopy(new_ids) for category in categorized_prompts: new_ids = [] for cand_id in categorized_prompts[category]: if (candidates[cand_id]["skill_name"] in disliked_skills and candidates[cand_id].get("can_continue", CAN_NOT_CONTINUE) == MUST_CONTINUE): disliked_skills.remove(candidates[cand_id]["skill_name"]) if candidates[cand_id]["skill_name"] not in disliked_skills: new_ids.append(cand_id) categorized_prompts[category] = deepcopy(new_ids) best_cand_id = pickup_best_id(categorized_hyps, candidates, curr_single_scores, bot_utterances) best_candidate = candidates[best_cand_id] best_candidate["human_attributes"] = best_candidate.get( "human_attributes", {}) # save updated disliked skills to human attributes of the best candidate best_candidate["human_attributes"]["disliked_skills"] = disliked_skills logger.info(f"Best candidate: {best_candidate}") n_sents_without_prompt = len(sent_tokenize(best_candidate["text"])) _is_best_not_script = best_candidate[ "skill_name"] not in ACTIVE_SKILLS + ALMOST_ACTIVE_SKILLS no_question_by_user = "******" not in dialog["human_utterances"][-1][ "annotations"].get("sentseg", {}).get("punct_sent", dialog["human_utterances"][-1]["text"]) # if `no` to 1st in a row linkto question, and chosen response is not from scripted skill _no_to_first_linkto = is_no(dialog["human_utterances"][-1]) and any([ phrase.lower() in _prev_bot_uttr.get("text", "").lower() for phrase in LINK_TO_PHRASES ]) # if chosen short response or question by not-scripted skill _is_short_or_question_by_not_script = _is_best_not_script and ( "?" in best_candidate["text"] or len(best_candidate["text"].split()) < 4) _no_questions_for_3_steps = not any([ is_any_question_sentence_in_utterance(uttr) for uttr in dialog["bot_utterances"][-3:] ]) if PRIORITIZE_PROMTS_WHEN_NO_SCRIPTS: if (_no_script_two_times_in_a_row and _is_short_or_question_by_not_script and no_question_by_user) or (_no_to_first_linkto and _is_best_not_script): # if no scripted skills 2 time sin a row before, current chosen best cand is not scripted, contains `?`, # and user utterance does not contain "?", replace utterance with dummy! best_prompt_id = pickup_best_id(categorized_prompts, candidates, curr_single_scores, bot_utterances) best_candidate = deepcopy(candidates[best_prompt_id]) best_cand_id = best_prompt_id if does_not_require_prompt(candidates, best_cand_id): # the candidate already contains a prompt or a question or of a length more than 200 symbols logger.info( "Best candidate contains prompt, question, request or length of > 200 symbols. Do NOT add prompt." ) pass elif sum(categorized_prompts.values(), []): # best cand is 3d times in a row not scripted skill, let's append linkto # need to add some prompt, and have a prompt _add_prompt_forcibly = best_candidate[ "skill_name"] == _prev_active_skill and _is_active_skill_can_not_continue _add_prompt_forcibly = _add_prompt_forcibly and not _contains_entities # prompts are added: # - in 1 out of 10 cases, if current human utterance does not contain entities, # and no prompt for several last bot utterances # - if PRIORITIZE_PROMTS_WHEN_NO_SCRIPTS and current utterance is from active on prev step scripted skill and # it has a status can-not-continue # - if PRIORITIZE_PROMTS_WHEN_NO_SCRIPTS and last 2 bot uttr are not from scripted skill, # and current best uttr is also from not-scripted skill if ((prompt_decision() and not _contains_entities and _no_questions_for_3_steps) or (_add_prompt_forcibly and PRIORITIZE_PROMTS_WHEN_NO_SCRIPTS) or (PRIORITIZE_PROMTS_WHEN_NO_SCRIPTS and _no_script_two_times_in_a_row and _is_best_not_script)): logger.info("Decided to add a prompt to the best candidate.") best_prompt_id = pickup_best_id(categorized_prompts, candidates, curr_single_scores, bot_utterances) # as we have only one active skill, let's consider active skill as that one providing prompt # but we also need to reassign all the attributes best_prompt = candidates[best_prompt_id] best_candidate[ "text"] = f'{best_candidate["text"]} {best_prompt["text"]}' best_candidate["attributes"] = best_candidate.get("attributes", {}) best_candidate["attributes"]["prompt_skill"] = best_prompt # anyway we must combine used links best_candidate["human_attributes"] = best_candidate.get( "human_attributes", {}) best_candidate["human_attributes"] = join_used_links_in_attributes( best_candidate["human_attributes"], best_prompt.get("human_attributes", {})) if len(best_candidate["human_attributes"]["used_links"]) == 0: best_candidate["human_attributes"].pop("used_links") was_ackn = if_acknowledgement_in_previous_bot_utterance(dialog) best_resp_cont_ackn = "acknowledgement" in best_candidate.get( "response_parts", []) if (ADD_ACKNOWLEDGMENTS_IF_POSSIBLE and acknowledgement_hypothesis and acknowledgement_decision(all_user_intents) and n_sents_without_prompt == 1 and not was_ackn and not best_resp_cont_ackn): logger.info( "Acknowledgement is given, Final hypothesis contains only 1 sentence, no ackn in prev bot uttr," "and we decided to add an acknowledgement to the best candidate.") best_candidate[ "text"] = f'{acknowledgement_hypothesis["text"]} {best_candidate["text"]}' best_candidate["response_parts"] = ["acknowledgement" ] + best_candidate.get( "response_parts", []) return best_candidate, best_cand_id, curr_single_scores
def what_fav_food_response(vars): food_types = { "food": [ "lava cake", "This cake is delicious, decadent, addicting, divine, just so incredibly good!!!" " Soft warm chocolate cake outside giving way to a creamy, smooth stream of warm " "liquid chocolate inside, ensuring every forkful is bathed in velvety chocolate. " "It is my love at first bite.", ], "drink": [ "orange juice", "Isually I drink it at breakfast - it’s sweet with natural sugar for quick energy." " Oranges have lots of vitamins and if you drink it with pulp, it has fiber. Also," " oranges are rich in vitamin C that keeps your immune system healthy.", ], "fruit": [ "mango", "Every year I wait for the summers so that I can lose myself in the aroma of perfectly" " ripened mangoes and devour its heavenly sweet taste. Some people prefer mangoes which" " are tangy and sour. However, I prefer sweet ones that taste like honey.", ], "dessert": [ "profiteroles", "Cream puffs of the size of a hamburger on steroids, the two pate a choux ends" " showcased almost two cups of whipped cream - light, fluffy, and fresh. " "There is nothing better than choux pastry!", ], "vegetable": [ "broccoli", "This hearty and tasty vegetable is rich in dozens of nutrients. It is said " "to pack the most nutritional punch of any vegetable. When I think about green" " vegetables to include in my diet, broccoli is one of the foremost veggies to " "come to my mind.", ], "berry": [ "blueberry", "Fresh blueberries are delightful and have a slightly sweet taste that is mixed" " with a little bit of acid from the berry. When I bite down on a blueberry," " I enjoy a burst of juice as the berry pops, and this juice is very sweet. " "Blueberries are the blues that make you feel good!", ], "snack": [ "peanut butter", "It tastes great! Creamy, crunchy or beyond the jar, there is a special place " "among my taste receptors for that signature peanutty flavor. I always gravitate" " toward foods like peanut butter chocolate cheesecake, and peanut butter cottage" " cookies. There are so many peanut butter flavored items for all kinds of food products!" " Still, sometimes it’s best delivered on a spoon.", ], } user_utt = state_utils.get_last_human_utterance(vars) bot_utt = state_utils.get_last_bot_utterance(vars)["text"].lower() question = "" shared_memory = state_utils.get_shared_memory(vars) used_food = shared_memory.get("used_food", []) unused_food = [] linkto_food_skill_agreed = any( [req.lower() in state_utils.get_last_bot_utterance(vars)["text"].lower() for req in TRIGGER_PHRASES] ) lets_talk_about_asked = lets_talk_about_check(vars) try: if used_food: unused_food = [i for i in food_types.keys() if i not in used_food] if unused_food: food_type = random.choice(unused_food) else: state_utils.set_can_continue(vars, continue_flag=CAN_NOT_CONTINUE) return error_response(vars) else: food_type = "food" if linkto_food_skill_agreed: if is_yes(user_utt): if food_type == "food": state_utils.set_confidence(vars, confidence=CONF_HIGH) state_utils.set_can_continue(vars, continue_flag=MUST_CONTINUE) elif food_type == "snack": state_utils.set_confidence(vars, confidence=CONF_LOWEST) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) elif unused_food: state_utils.set_confidence(vars, confidence=CONF_MIDDLE) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) else: state_utils.set_confidence(vars, confidence=CONF_LOWEST) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) elif not is_no(user_utt): state_utils.set_confidence(vars, confidence=CONF_LOW) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) elif is_no(user_utt): state_utils.set_confidence(vars, confidence=CONF_HIGH) state_utils.set_can_continue(vars, continue_flag=CAN_NOT_CONTINUE) return ACKNOWLEDGEMENTS["fav_food_cook"] elif bool(lets_talk_about_asked): if (food_type == "food") or (lets_talk_about_asked == "if_chat_about_particular_topic"): state_utils.set_confidence(vars, confidence=CONF_HIGH) state_utils.set_can_continue(vars, continue_flag=MUST_CONTINUE) elif food_type == "snack": state_utils.set_confidence(vars, confidence=CONF_LOWEST) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) elif unused_food: state_utils.set_confidence(vars, confidence=CONF_MIDDLE) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) else: state_utils.set_confidence(vars, confidence=CONF_LOWEST) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) else: state_utils.set_confidence(vars, confidence=CONF_LOW) state_utils.set_can_continue(vars, continue_flag=CAN_CONTINUE_SCENARIO) state_utils.save_to_shared_memory(vars, used_food=used_food + [food_type]) fav_item = food_types.get(food_type, []) if fav_item: if food_type != "drink": if "what is your favorite food" in bot_utt: question = f" What {food_type} do you like?" else: question = " What is a typical meal from your country?" return f"I like to eat {fav_item[0]}. {fav_item[1]}" + question else: if "what is your favorite food" in bot_utt: question = f" What {food_type} do you prefer?" else: question = " What do you usually like to drink when you go out?" return f"I like to drink {fav_item[0]}. {fav_item[1]}" + question else: state_utils.set_can_continue(vars, continue_flag=CAN_NOT_CONTINUE) return error_response(vars) except Exception as exc: logger.exception(exc) sentry_sdk.capture_exception(exc) return error_response(vars)