def is_sensitive_topic_and_request(annotated_uttr): cobot_dialogact_topics = set( get_topics(annotated_uttr, which="cobot_dialogact_topics")) cobot_topics = set(get_topics(annotated_uttr, which="cobot_topics")) sensitive_topics_detected = any( [t in sensitive_topics for t in cobot_topics]) or any( [t in sensitive_dialogact_topics for t in cobot_dialogact_topics]) all_intents = get_intents(annotated_uttr, probs=False, which="all") sensitive_dialogacts_detected = any( [t in sensitive_all_intents for t in all_intents]) if sensitive_topics_detected and sensitive_dialogacts_detected: return True return False
def get_lets_chat_topic(lets_chat_about_flag, utt): lets_chat_topic = "" COBOT_DA_FILE_TOPICS_MATCH = { "Entertainment_Movies": "movies", "Entertainment_Music": "music", "Science_and_Technology": "science", "Sports": "sports", "Games": "games", "Movies_TV": "movies", "SciTech": "science", "Psychology": "emotions", "Music": "music", "Food_Drink": "food", "Weather_Time": "weather", "Entertainment": "activities", "Celebrities": "celebrities", "Travel_Geo": "travel", "Art_Event": "art", } if lets_chat_about_flag: _get_topics = get_topics(utt, which="all") for topic in _get_topics: if topic in COBOT_DA_FILE_TOPICS_MATCH: lets_chat_topic = COBOT_DA_FILE_TOPICS_MATCH[topic] if lets_chat_topic not in utt["text"]: lets_chat_topic = "" return lets_chat_topic
def collect_topics_entities_intents(prev_human_utterance): if len(prev_human_utterance) > 1: intent_list = get_intents(prev_human_utterance, which="cobot_dialogact_intents") da_topic_list = get_topics(prev_human_utterance, which="cobot_dialogact_topics") cobot_topic_list = get_topics(prev_human_utterance, which="cobot_topics") intent_list = list(set(intent_list)) da_topic_list = list(set(da_topic_list)) cobot_topic_list = list(set(cobot_topic_list)) else: intent_list, da_topic_list, cobot_topic_list = [], [], [] return intent_list, da_topic_list, cobot_topic_list
def book_movie_music_found(annotated_uttr): cobot_dialogacts = set( get_topics(annotated_uttr, which="cobot_dialogact_topics")) named_cobot_dialogacts = { "Entertainment_Books", "Entertainment_Movies", "Entertainment_Music" } dialogact_met = len(named_cobot_dialogacts & cobot_dialogacts) > 0 return dialogact_met
def get_main_info_annotations(annotated_utterance): intents = get_intents(annotated_utterance, which="all") topics = get_topics(annotated_utterance, which="all") named_entities = get_entities(annotated_utterance, only_named=True, with_labels=False) nounphrases = get_entities(annotated_utterance, only_named=False, with_labels=False) return intents, topics, named_entities, nounphrases
def thematic_funfact_response(ctx: Context, actor: Actor, *args, **kwargs) -> str: response = "" set_confidence(ctx, actor, CONF_HIGH) set_can_continue(ctx, actor, MUST_CONTINUE) entity = ctx.last_request.split("about") if len(entity) > 1: entity = entity[1] human_utter = get_last_human_utterance(ctx, actor) topic = get_topics(human_utter, which="cobot_topics")[0] funfact = get_fact(entity, f"fact about {entity}") if funfact: link_question = make_question(topic) response = f"{funfact} {link_question}" if not response: set_confidence(ctx, actor, CONF_ZERO) return response
async def send(self, payload: Dict, callback: Callable): st_time = time.time() try: dialog = payload["payload"]["states_batch"][0] skills_for_uttr = [] user_uttr = dialog["human_utterances"][-1] user_uttr_text = user_uttr["text"].lower() user_uttr_annotations = user_uttr["annotations"] bot_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) else {} bot_uttr_text_lower = bot_uttr.get("text", "").lower() prev_active_skill = bot_uttr.get("active_skill", "") intent_catcher_intents = get_intents(user_uttr, probs=False, which="intent_catcher") high_priority_intent_detected = any([ k for k in intent_catcher_intents if k in high_priority_intents["dff_intent_responder_skill"] ]) low_priority_intent_detected = any([ k for k in intent_catcher_intents if k in low_priority_intents ]) detected_topics = set(get_topics(user_uttr, which="all")) is_factoid = get_factoid(user_uttr).get("is_factoid", 0.0) > 0.96 is_celebrity_mentioned = check_is_celebrity_mentioned(user_uttr) if_choose_topic_detected = if_choose_topic(user_uttr, bot_uttr) if_lets_chat_about_particular_topic_detected = if_chat_about_particular_topic( user_uttr, bot_uttr) dialog_len = len(dialog["human_utterances"]) exit_cond = "exit" in intent_catcher_intents and ( dialog_len == 1 or (dialog_len == 2 and len(user_uttr_text.split()) > 3)) repeat_cond = ("repeat" in intent_catcher_intents and prev_active_skill in UNPREDICTABLE_SKILLS and re.match(r"^what.?$", user_uttr_text)) cant_do_cond = ("cant_do" in intent_catcher_intents and "play" in user_uttr_text and any([ phrase in bot_uttr_text_lower for phrase in GREETING_QUESTIONS_TEXTS ])) for intent_name, condition in zip( ["exit", "repeat", "cant_do"], [exit_cond, repeat_cond, cant_do_cond]): if condition: high_priority_intent_detected = False not_detected = {"detected": 0, "confidence": 0.0} user_uttr["annotations"]["intent_catcher"][ intent_name] = not_detected dialog["utterances"][-1]["annotations"]["intent_catcher"][ intent_name] = not_detected if "/new_persona" in user_uttr_text: # process /new_persona command skills_for_uttr.append( "personality_catcher" ) # TODO: rm crutch of personality_catcher elif user_uttr_text == "/get_dialog_id": skills_for_uttr.append("dummy_skill") elif high_priority_intent_detected: # process intent with corresponding IntentResponder skills_for_uttr.append("dff_intent_responder_skill") elif is_sensitive_topic_and_request(user_uttr): # process user utterance with sensitive content, "safe mode" # adding open-domain skills without opinion expression skills_for_uttr.append("dff_program_y_dangerous_skill") skills_for_uttr.append("meta_script_skill") skills_for_uttr.append("personal_info_skill") skills_for_uttr.append("factoid_qa") skills_for_uttr.append("dff_grounding_skill") skills_for_uttr.append("dummy_skill") skills_for_uttr.append("small_talk_skill") if if_lets_chat_about_particular_topic_detected: skills_for_uttr.append("news_api_skill") if if_special_weather_turn_on(user_uttr, bot_uttr): skills_for_uttr.append("dff_weather_skill") if is_celebrity_mentioned: skills_for_uttr.append("dff_gossip_skill") # adding closed-domain skills skills_for_uttr += turn_on_skills( detected_topics, intent_catcher_intents, user_uttr_text, bot_uttr.get("text", ""), available_skills=[ "news_api_skill", "dff_coronavirus_skill", "dff_funfact_skill", "dff_weather_skill", "dff_short_story_skill", ], ) # adding linked-to skills skills_for_uttr.extend(get_linked_to_skills(dialog)) skills_for_uttr.extend(get_previously_active_skill(dialog)) else: # general case if low_priority_intent_detected: skills_for_uttr.append("dff_intent_responder_skill") # adding open-domain skills skills_for_uttr.append("dff_grounding_skill") skills_for_uttr.append("dff_program_y_skill") skills_for_uttr.append("personal_info_skill") skills_for_uttr.append("meta_script_skill") skills_for_uttr.append("dummy_skill") skills_for_uttr.append("dialogpt") # generative skill skills_for_uttr.append("small_talk_skill") skills_for_uttr.append("knowledge_grounding_skill") skills_for_uttr.append("convert_reddit") skills_for_uttr.append("comet_dialog_skill") skills_for_uttr.append("dff_program_y_wide_skill") # adding friendship only in the beginning of the dialog if len(dialog["utterances"]) < 20: skills_for_uttr.append("dff_friendship_skill") if if_choose_topic_detected or if_lets_chat_about_particular_topic_detected: skills_for_uttr.append("knowledge_grounding_skill") skills_for_uttr.append("news_api_skill") switch_wiki_skill, _ = if_switch_wiki_skill( user_uttr, bot_uttr) if switch_wiki_skill or switch_wiki_skill_on_news( user_uttr, bot_uttr): skills_for_uttr.append("dff_wiki_skill") if if_switch_test_skill(user_uttr, bot_uttr): skills_for_uttr.append("dff_art_skill") # adding factoidQA Skill if user utterance is factoid question if is_factoid: skills_for_uttr.append("factoid_qa") if "dummy_skill" in prev_active_skill and len( dialog["utterances"]) > 4: skills_for_uttr.append("dummy_skill_dialog") # if user mentions if is_celebrity_mentioned: skills_for_uttr.append("dff_gossip_skill") # some special cases if if_special_weather_turn_on(user_uttr, bot_uttr): skills_for_uttr.append("dff_weather_skill") if if_turn_on_emotion(user_uttr, bot_uttr): skills_for_uttr.append("emotion_skill") if get_named_locations(user_uttr): skills_for_uttr.append("dff_travel_skill") if extract_movies_names_from_annotations(user_uttr): skills_for_uttr.append("dff_movie_skill") # adding closed-domain skills skills_for_uttr += turn_on_skills( detected_topics, intent_catcher_intents, user_uttr_text, bot_uttr.get("text", ""), available_skills=[ "dff_art_skill", "dff_movie_skill", "dff_book_skill", "news_api_skill", "dff_food_skill", "dff_animals_skill", "dff_sport_skill", "dff_music_skill", "dff_science_skill", "dff_gossip_skill", "game_cooperative_skill", "dff_weather_skill", "dff_funfact_skill", "dff_travel_skill", "dff_coronavirus_skill", "dff_bot_persona_skill", "dff_gaming_skill", "dff_short_story_skill", ], ) # adding linked-to skills skills_for_uttr.extend(get_linked_to_skills(dialog)) skills_for_uttr.extend(get_previously_active_skill(dialog)) # NOW IT IS NOT ONLY FOR USUAL CONVERSATION BUT ALSO FOR SENSITIVE/HIGH PRIORITY INTENTS/ETC if "dff_coronavirus_skill" in skills_for_uttr: # no convert & comet when about coronavirus if "convert_reddit" in skills_for_uttr: skills_for_uttr.remove("convert_reddit") if "comet_dialog_skill" in skills_for_uttr: skills_for_uttr.remove("comet_dialog_skill") if len(dialog["utterances"]) > 1: # Use only misheard asr skill if asr is not confident and skip it for greeting if user_uttr_annotations.get("asr", {}).get("asr_confidence", "high") == "very_low": skills_for_uttr = ["misheard_asr"] if "/alexa_" in user_uttr_text: # adding alexa handler for Amazon Alexa specific commands skills_for_uttr = ["alexa_handler"] logger.info(f"Selected skills: {skills_for_uttr}") total_time = time.time() - st_time logger.info(f"rule_based_selector exec time = {total_time:.3f}s") asyncio.create_task( callback(task_id=payload["task_id"], response=list(set(skills_for_uttr)))) except Exception as e: total_time = time.time() - st_time logger.info(f"rule_based_selector exec time = {total_time:.3f}s") logger.exception(e) sentry_sdk.capture_exception(e) asyncio.create_task( callback(task_id=payload["task_id"], response=["dff_program_y_skill", "dummy_skill"]))
def about_book(annotated_utterance): y1 = "Entertainment_Books" in get_topics(annotated_utterance, which="cobot_dialogact_topics") y2 = re.search(BOOK_PATTERN, annotated_utterance["text"]) return y1 or y2
def respond(): st_time = time() dialogs = request.json["dialogs"] responses = [] confidences = [] human_attributes = [] bot_attributes = [] attributes = [] topics, statuses, curr_news_samples = collect_topics_and_statuses(dialogs) topics = [remove_punct_and_articles(topic) for topic in topics] topics = np.array(topics) statuses = np.array(statuses) curr_news_samples = np.array(curr_news_samples) for dialog, curr_topic, curr_status, result in zip(dialogs, topics, statuses, curr_news_samples): logger.info( f"Composing answer for topic: {curr_topic} and status: {curr_status}." ) logger.info(f"Result: {result}.") human_attr = {} human_attr["used_links"] = dialog["human"]["attributes"].get( "used_links", defaultdict(list)) human_attr["disliked_skills"] = dialog["human"]["attributes"].get( "disliked_skills", []) human_attr["news_api_skill"] = dialog["human"]["attributes"].get( "news_api_skill", {}) human_attr["news_api_skill"]["discussed_news"] = human_attr[ "news_api_skill"].get("discussed_news", []) bot_attr = {} # the only difference is that result is already is a dictionary with news. lets_chat_about_particular_topic = if_chat_about_particular_topic( dialog["human_utterances"][-1], dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) else {}) curr_uttr = dialog["human_utterances"][-1] about_news = ({"News"} & set( get_topics(curr_uttr, which="cobot_topics"))) or re.search( NEWS_TEMPLATES, curr_uttr["text"].lower()) about_news = about_news and not re.search(FALSE_NEWS_TEMPLATES, curr_uttr["text"].lower()) prev_bot_uttr_lower = dialog["bot_utterances"][-1]["text"].lower( ) if len(dialog["bot_utterances"]) > 0 else "" if lets_chat_about_particular_topic: prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="news_api_skill", activated=True) if result and len(prev_news_skill_output) == 0: # it was a lets chat about topic and we found appropriate news if curr_topic == "all": if about_news: response = OFFER_BREAKING_NEWS confidence = DEFAULT_NEWS_OFFER_CONFIDENCE # 1.0 attr = { "news_status": OFFERED_BREAKING_NEWS_STATUS, "news_topic": "all", "can_continue": CAN_CONTINUE_PROMPT, "curr_news": result, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] else: response = "" confidence = 0.0 attr = {} else: response = SAY_TOPIC_SPECIFIC_NEWS.replace( "TOPIC", curr_topic) response = f"{response} {result['title']}.. {OFFER_MORE}" confidence = LINKTO_CONFIDENCE attr = { "news_status": OFFERED_NEWS_DETAILS_STATUS, "news_topic": curr_topic, "curr_news": result, "can_continue": CAN_CONTINUE_PROMPT, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] responses.append(response) confidences.append(confidence) human_attributes.append(human_attr) bot_attributes.append(bot_attr) attributes.append(attr) continue else: responses.append("") confidences.append(0.0) human_attributes.append(human_attr) bot_attributes.append(bot_attr) attributes.append({}) continue if result: logger.info("Topic: {}".format(curr_topic)) logger.info("News found: {}".format(result)) if curr_status == "headline": if len(dialog["human_utterances"]) > 0: curr_uttr = dialog["human_utterances"][-1] else: curr_uttr = {"text": ""} if OFFER_BREAKING_NEWS.lower( ) in prev_bot_uttr_lower and is_yes(curr_uttr): response = f"Here it is: {result['title']}.. {OFFER_MORE}" confidence = DEFAULT_NEWS_OFFER_CONFIDENCE attr = { "news_status": OFFERED_NEWS_DETAILS_STATUS, "news_topic": curr_topic, "curr_news": result, "can_continue": MUST_CONTINUE, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] elif curr_topic == "all": prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="news_api_skill", activated=True) if (len(prev_news_skill_output) > 0 and prev_news_skill_output[-1].get( "news_status", "") == OFFERED_NEWS_TOPIC_CATEGORIES_STATUS): # topic was not detected response = "" confidence = 0.0 attr = {} else: response = f"Here is one of the latest news that I found: {result['title']}.. {OFFER_MORE}" confidence = DEFAULT_NEWS_OFFER_CONFIDENCE attr = { "news_status": OFFERED_NEWS_DETAILS_STATUS, "news_topic": curr_topic, "curr_news": result, "can_continue": MUST_CONTINUE, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] else: response = ( f"Here is one of the latest news on topic {curr_topic}: " f"{result['title']}.. {OFFER_MORE}") confidence = DEFAULT_NEWS_OFFER_CONFIDENCE attr = { "news_status": OFFERED_NEWS_DETAILS_STATUS, "news_topic": curr_topic, "curr_news": result, "can_continue": MUST_CONTINUE, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] elif curr_status == "details": response = f"In details: {result['description']}. {ASK_OPINION}" confidence = DEFAULT_NEWS_DETAILS_CONFIDENCE attr = { "news_status": OPINION_REQUEST_STATUS, "news_topic": curr_topic, "curr_news": result, "can_continue": MUST_CONTINUE, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] elif curr_status == "declined": # user declined to get latest news, topical news, or we did not find news request response, confidence, human_attr, bot_attr, attr = "", 0.0, {}, {}, {} else: prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="news_api_skill", activated=True) curr_uttr = dialog["human_utterances"][-1] # status finished is here if len(prev_news_skill_output ) > 0 and prev_news_skill_output[-1].get( "news_status", "") not in [ OFFERED_NEWS_DETAILS_STATUS, OFFERED_NEWS_TOPIC_CATEGORIES_STATUS, ]: result = prev_news_skill_output[-1].get("curr_news", {}) # try to offer more news topics_list = NEWS_TOPICS[:] random.shuffle(topics_list) offered_topics = [] for topic in topics_list: curr_topic_result = get_news_for_current_entity( topic, curr_uttr, human_attr["news_api_skill"]["discussed_news"]) if len(curr_topic_result) > 0: offered_topics.append(topic) logger.info("Topic: {}".format(topic)) logger.info("Result: {}".format(curr_topic_result)) if len(offered_topics) == 2: break if len(offered_topics) == 2: # two topics with result news were found response = ( f"{random.choice(WHAT_TYPE_OF_NEWS)} " f"{offered_topics[0]} or {offered_topics[1].lower()}?" ) confidence = WHAT_TYPE_OF_NEWS_CONFIDENCE attr = { "news_status": OFFERED_NEWS_TOPIC_CATEGORIES_STATUS, "can_continue": CAN_CONTINUE_PROMPT, "news_topic": " ".join(offered_topics), "curr_news": result, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] else: # can't find enough topics for the user to offer response, confidence, human_attr, bot_attr, attr = link_to_other_skills( human_attr, bot_attr, curr_uttr) else: # news was offered previously but the user refuse to get it # or false news request was detected response, confidence, human_attr, bot_attr, attr = "", 0.0, {}, {}, {} else: # no found news logger.info("No particular news found.") new_result = get_news_for_current_entity( "all", curr_uttr, human_attr["news_api_skill"]["discussed_news"]) if curr_topic != "all" and len(new_result.get("title", "")) > 0: logger.info("Offer latest news.") response = f"Sorry, I could not find some specific news. {OFFER_BREAKING_NEWS}" confidence = NOT_SPECIFIC_NEWS_OFFER_CONFIDENCE attr = { "news_status": OFFERED_BREAKING_NEWS_STATUS, "news_topic": "all", "can_continue": MUST_CONTINUE, "curr_news": new_result, } if attr["curr_news"]["url"] not in human_attr[ "news_api_skill"]["discussed_news"]: human_attr["news_api_skill"]["discussed_news"] += [ attr["curr_news"]["url"] ] elif OFFER_BREAKING_NEWS.lower() in prev_bot_uttr_lower and is_yes( curr_uttr): logger.info("No latest news found.") response = ( "Sorry, seems like all the news slipped my mind. Let's chat about something else. " "What do you want to talk about?") confidence = NOT_SPECIFIC_NEWS_OFFER_CONFIDENCE attr = { "news_status": OFFERED_BREAKING_NEWS_STATUS, "can_continue": MUST_CONTINUE } else: response, confidence, human_attr, bot_attr, attr = "", 0.0, {}, {}, {} responses.append(response) confidences.append(confidence) human_attributes.append(human_attr) bot_attributes.append(bot_attr) attributes.append(attr) total_time = time() - st_time logger.info(f"news_api_skill exec time: {total_time:.3f}s") return jsonify( list( zip(responses, confidences, human_attributes, bot_attributes, attributes)))
def collect_topics_and_statuses(dialogs): topics = [] statuses = [] curr_news_samples = [] for dialog in dialogs: curr_uttr = dialog["human_utterances"][-1] prev_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) else {} human_attr = {} human_attr["news_api_skill"] = dialog["human"]["attributes"].get( "news_api_skill", {}) discussed_news = human_attr["news_api_skill"].get("discussed_news", []) prev_bot_uttr = dialog["bot_utterances"][-1] if len( dialog["bot_utterances"]) > 0 else {} prev_bot_uttr_lower = prev_bot_uttr.get("text", "").lower() prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="news_api_skill", activated=True) if len(prev_news_skill_output) > 0 and len( prev_news_skill_output[-1]) > 0: logger.info(f"News skill was prev active.") prev_news_skill_output = prev_news_skill_output[-1] prev_status = prev_news_skill_output.get("news_status", "") prev_topic = prev_news_skill_output.get("news_topic", "all") last_news = prev_news_skill_output.get("curr_news", {}) if prev_status == OFFERED_NEWS_DETAILS_STATUS: topics.append(prev_topic) if is_yes(curr_uttr): logger.info(f"Detected topic for news: {prev_topic}") statuses.append("details") else: logger.info("User refused to get news details") statuses.append("finished") curr_news_samples.append(last_news) elif prev_status == OFFERED_BREAKING_NEWS_STATUS or OFFER_BREAKING_NEWS.lower( ) in prev_bot_uttr_lower: topics.append("all") if is_yes(curr_uttr): logger.info("Detected topic for news: all.") statuses.append("headline") else: logger.info("User refuse to get latest news") statuses.append("declined") curr_news_samples.append(last_news) elif re.search(TELL_MORE_NEWS_TEMPLATES, curr_uttr["text"].lower()): prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-7:], skill_name="news_api_skill", activated=True) for prev_news_out in prev_news_skill_output: if prev_news_out.get("curr_news", {}) != {}: last_news = prev_news_out.get("curr_news", {}) logger.info( f"User requested more news. Prev news was: {last_news}") topics.append(prev_topic) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(prev_topic, curr_uttr, discussed_news)) elif prev_status == OFFERED_NEWS_TOPIC_CATEGORIES_STATUS: if not (news_rejection(curr_uttr["text"].lower()) or is_no(curr_uttr)): logger.info("User chose the topic for news") if ANY_TOPIC_PATTERN.search(curr_uttr["text"]): topics.append(prev_topic.split()[0]) curr_news_samples.append( get_news_for_current_entity( prev_topic.split()[0], curr_uttr, discussed_news)) elif SECOND_TOPIC_PATTERN.search(curr_uttr["text"]): topics.append(prev_topic.split()[1]) curr_news_samples.append( get_news_for_current_entity( prev_topic.split()[1], curr_uttr, discussed_news)) else: entities = extract_topics(curr_uttr) if len(entities) != 0: topics.append(entities[-1]) curr_news_samples.append( get_news_for_current_entity( entities[-1], curr_uttr, discussed_news)) else: topics.append("all") curr_news_samples.append( get_news_for_current_entity( "all", curr_uttr, discussed_news)) logger.info(f"Chosen topic: {topics}") statuses.append("headline") else: logger.info("User doesn't want to get any news") topics.append("all") statuses.append("declined") curr_news_samples.append({}) elif prev_status == OFFER_TOPIC_SPECIFIC_NEWS_STATUS: topics.append(prev_topic) if is_yes(curr_uttr): logger.info( f"User wants to listen news about {prev_topic}.") statuses.append("headline") else: logger.info( f"User doesn't want to listen news about {prev_topic}." ) statuses.append("declined") curr_news_samples.append(last_news) else: logger.info( "News skill was active and now can offer more news.") topics.append("all") statuses.append("finished") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) else: logger.info(f"News skill was NOT active.") about_news = ( ({"News"} & set(get_topics(curr_uttr, which="cobot_topics"))) or re.search(NEWS_TEMPLATES, curr_uttr["text"].lower()) ) and not re.search(FALSE_NEWS_TEMPLATES, curr_uttr["text"].lower()) lets_chat_about_particular_topic = if_chat_about_particular_topic( curr_uttr, prev_uttr) lets_chat_about_news = if_chat_about_particular_topic( curr_uttr, prev_uttr, compiled_pattern=NEWS_TEMPLATES) _was_offer_news = was_offer_news_about_topic(prev_bot_uttr_lower) _offered_by_bot_entities = EXTRACT_OFFERED_NEWS_TOPIC_TEMPLATE.findall( prev_bot_uttr_lower) if about_news: # the request contains something about news entities = extract_topics(curr_uttr) logger.info(f"News request on entities: `{entities}`") if re.search(TELL_MORE_NEWS_TEMPLATES, curr_uttr["text"].lower()): # user requestd more news. # look for the last 3 turns and find last discussed news sample logger.info("Tell me more news request.") prev_news_skill_output = get_skill_outputs_from_dialog( dialog["utterances"][-7:], skill_name="news_api_skill", activated=True) if len(prev_news_skill_output) > 0 and len( prev_news_skill_output[-1]) > 0: prev_news_skill_output = prev_news_skill_output[-1] prev_topic = prev_news_skill_output.get( "news_topic", "all") else: prev_topic = "all" logger.info( "News skill was NOT prev active. User requested more news." ) topics.append(prev_topic) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(prev_topic, curr_uttr, discussed_news)) elif len(entities) == 0: # no entities or nounphrases -> no special news request, get all news logger.info("News request, no entities and nounphrases.") topics.append("all") statuses.append("headline") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) else: # found entities or nounphrases -> special news request, # get the last mentioned entity # if no named entities, get the last mentioned nounphrase logger.info(f"Detected topic for news: {entities[-1]}") topics.append(entities[-1]) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(entities[-1], curr_uttr, discussed_news)) elif OFFER_BREAKING_NEWS.lower() in prev_bot_uttr_lower: # news skill was not previously active topics.append("all") if is_yes(curr_uttr) or lets_chat_about_news: logger.info("Detected topic for news: all.") statuses.append("headline") else: logger.info( "Detected topic for news: all. Refused to get latest news" ) statuses.append("declined") curr_news_samples.append( get_news_for_current_entity("all", curr_uttr, discussed_news)) elif _was_offer_news and _offered_by_bot_entities: topics.append(_offered_by_bot_entities[-1]) if is_yes(curr_uttr): logger.info( f"Bot offered news on entities: `{_offered_by_bot_entities}`" ) statuses.append("headline") else: logger.info( f"Bot offered news on entities: `{_offered_by_bot_entities}`. User refused." ) statuses.append("declined") curr_news_samples.append( get_news_for_current_entity(_offered_by_bot_entities[-1], curr_uttr, discussed_news)) elif lets_chat_about_particular_topic: # the request contains something about news entities = extract_topics(curr_uttr) logger.info(f"News request on entities: `{entities}`") if len(entities) == 0: # no entities or nounphrases & lets_chat_about_particular_topic logger.info( "No news request, no entities and nounphrases, but lets chat." ) topics.append("all") statuses.append("declined") curr_news_samples.append({}) else: # found entities or nounphrases -> special news request, # get the last mentioned entity # if no named entities, get the last mentioned nounphrase logger.info(f"Detected topic for news: {entities[-1]}") topics.append(entities[-1]) statuses.append("headline") curr_news_samples.append( get_news_for_current_entity(entities[-1], curr_uttr, discussed_news)) else: logger.info("Didn't detected news request.") topics.append("all") statuses.append("declined") curr_news_samples.append({}) return topics, statuses, curr_news_samples
async def send(self, payload: Dict, callback: Callable): try: st_time = time.time() dialog = deepcopy(payload["payload"]["dialogs"][0]) is_sensitive_case = is_sensitive_situation(dialog["human_utterances"][-1]) all_prev_active_skills = payload["payload"]["all_prev_active_skills"][0] curr_topics = get_topics(dialog["human_utterances"][-1], which="cobot_topics") curr_nounphrases = get_entities(dialog["human_utterances"][-1], only_named=False, with_labels=False) if len(curr_topics) == 0: curr_topics = ["Phatic"] logger.info(f"Found topics: {curr_topics}") for i in range(len(curr_nounphrases)): np = re.sub(np_remove_expr, "", curr_nounphrases[i]) np = re.sub(rm_spaces_expr, " ", np) if re.search(np_ignore_expr, np): curr_nounphrases[i] = "" else: curr_nounphrases[i] = np.strip() curr_nounphrases = [np for np in curr_nounphrases if len(np) > 0] logger.info(f"Found nounphrases: {curr_nounphrases}") cands = [] confs = [] human_attrs = [] bot_attrs = [] attrs = [] cands += [choice(donotknow_answers)] confs += [0.5] attrs += [{"type": "dummy"}] human_attrs += [{}] bot_attrs += [{}] if len(dialog["utterances"]) > 14 and not is_sensitive_case: questions_same_nps = [] for i, nphrase in enumerate(curr_nounphrases): for q_id in NP_QUESTIONS.get(nphrase, []): questions_same_nps += [QUESTIONS_MAP[str(q_id)]] if len(questions_same_nps) > 0: logger.info("Found special nounphrases for questions. Return question with the same nounphrase.") cands += [choice(questions_same_nps)] confs += [0.5] attrs += [{"type": "nounphrase_question"}] human_attrs += [{}] bot_attrs += [{}] link_to_question, human_attr = get_link_to_question(dialog, all_prev_active_skills) if link_to_question: _prev_bot_uttr = dialog["bot_utterances"][-2]["text"] if len(dialog["bot_utterances"]) > 1 else "" _bot_uttr = dialog["bot_utterances"][-1]["text"] if len(dialog["bot_utterances"]) > 0 else "" _prev_active_skill = ( dialog["bot_utterances"][-1]["active_skill"] if len(dialog["bot_utterances"]) > 0 else "" ) _no_to_first_linkto = any([phrase in _bot_uttr for phrase in LINK_TO_PHRASES]) _no_to_first_linkto = _no_to_first_linkto and all( [phrase not in _prev_bot_uttr for phrase in LINK_TO_PHRASES] ) _no_to_first_linkto = _no_to_first_linkto and is_no(dialog["human_utterances"][-1]) _no_to_first_linkto = _no_to_first_linkto and _prev_active_skill != "dff_friendship_skill" _if_switch_topic = is_switch_topic(dialog["human_utterances"][-1]) bot_uttr_dict = dialog["bot_utterances"][-1] if len(dialog["bot_utterances"]) > 0 else {} _if_choose_topic = if_choose_topic(dialog["human_utterances"][-1], bot_uttr_dict) _is_ask_me_something = ASK_ME_QUESTION_PATTERN.search(dialog["human_utterances"][-1]["text"]) if len(dialog["human_utterances"]) > 1: _was_cant_do = "cant_do" in get_intents(dialog["human_utterances"][-2]) and ( len(curr_nounphrases) == 0 or is_yes(dialog["human_utterances"][-1]) ) _was_cant_do_stop_it = "cant_do" in get_intents(dialog["human_utterances"][-2]) and is_no( dialog["human_utterances"][-1] ) else: _was_cant_do = False _was_cant_do_stop_it = False if _was_cant_do_stop_it: link_to_question = "Sorry, bye! #+#exit" confs += [1.0] # finish dialog request elif _no_to_first_linkto: confs += [0.99] elif _is_ask_me_something or _if_switch_topic or _was_cant_do or _if_choose_topic: confs += [1.0] # Use it only as response selector retrieve skill output modifier else: confs += [0.05] # Use it only as response selector retrieve skill output modifier cands += [link_to_question] attrs += [{"type": "link_to_for_response_selector"}] human_attrs += [human_attr] bot_attrs += [{}] facts_same_nps = [] for i, nphrase in enumerate(curr_nounphrases): for fact_id in NP_FACTS.get(nphrase, []): facts_same_nps += [ f"Well, now that you've mentioned {nphrase}, I've remembered this. {FACTS_MAP[str(fact_id)]}. " f"{(opinion_request_question() if random.random() < ASK_QUESTION_PROB else '')}" ] if len(facts_same_nps) > 0 and not is_sensitive_case: logger.info("Found special nounphrases for facts. Return fact with the same nounphrase.") cands += [choice(facts_same_nps)] confs += [0.5] attrs += [{"type": "nounphrase_fact"}] human_attrs += [{}] bot_attrs += [{}] total_time = time.time() - st_time logger.info(f"dummy_skill exec time: {total_time:.3f}s") asyncio.create_task( callback(task_id=payload["task_id"], response=[cands, confs, human_attrs, bot_attrs, attrs]) ) except Exception as e: logger.exception(e) sentry_sdk.capture_exception(e) asyncio.create_task(callback(task_id=payload["task_id"], response=e))