def get_comment_phrase(dialog, attr): """ For considered topic propose comment phrase (one after user's opinion expression of proposed topic) for meta-script, assign attributes for dialog. This is the last step of meta-script for now. Args: dialog: dialog itself attr: dictionary of current attributes Returns: tuple of text response, confidence and response attributes """ used_templates = get_used_attributes_by_name( dialog["utterances"], attribute_name="meta_script_comment_template", value_by_default=None, activated=True)[-2:] sentiment = get_sentiment(dialog["human_utterances"][-1], probs=False)[0] template = get_not_used_template( used_templates, meta_script_skill_constants.COMMENTS[sentiment]) attr["meta_script_comment_template"] = template response = template confidence = meta_script_skill_constants.DEFAULT_CONFIDENCE attr["can_continue"] = CAN_NOT_CONTINUE return response, confidence, attr
def sentiment_detected_handler(ctx: Context, actor: Actor) -> bool: if ctx.validation: return False last_request = ctx.misc.get("agent", {}).get("dialog", {}).get("human_utterances", [{}])[-1] sentiment_probs = get_sentiment(last_request, probs=True) return sentiment_probs.get(name, 0) >= threshold
def check_fav_request(ngrams, vars): flag = False annot_utt = state_utils.get_last_human_utterance(vars) utt = annot_utt["text"].lower() if any(["favorite" in utt, "like" in utt, "love" in utt, "prefer" in utt]) and ( "negative" not in get_sentiment(annot_utt, probs=False) ): flag = True logger.info(f"check_fav_request {flag}") return flag
def get_human_sentiment(vars, negative_threshold=0.5, positive_threshold=0.333): sentiment_probs = common_utils.get_sentiment(vars["agent"]["dialog"]["human_utterances"][-1], probs=True) if sentiment_probs and isinstance(sentiment_probs, dict): max_sentiment_prob = max(sentiment_probs.values()) max_sentiments = [ sentiment for sentiment in sentiment_probs if sentiment_probs[sentiment] == max_sentiment_prob ] if max_sentiments: max_sentiment = max_sentiments[0] return_negative = max_sentiment == "negative" and max_sentiment_prob >= negative_threshold return_positive = max_sentiment == "positive" and max_sentiment_prob >= positive_threshold if return_negative or return_positive: return max_sentiment return "neutral"
def get_human_sentiment(ctx: Context, actor: Actor, negative_threshold=0.5, positive_threshold=0.333) -> str: sentiment_probs = (None if ctx.validation else common_utils.get_sentiment( get_last_human_utterance(ctx, actor), probs=True)) if sentiment_probs and isinstance(sentiment_probs, dict): max_sentiment_prob = max(sentiment_probs.values()) max_sentiments = [ sentiment for sentiment in sentiment_probs if sentiment_probs[sentiment] == max_sentiment_prob ] if max_sentiments: max_sentiment = max_sentiments[0] return_negative = max_sentiment == "negative" and max_sentiment_prob >= negative_threshold return_positive = max_sentiment == "positive" and max_sentiment_prob >= positive_threshold if return_negative or return_positive: return max_sentiment return "neutral"
def exit_respond(dialog, response_phrases): # goodbye_fix_phrases = ["goodbye", "bye", "bye bye", "alexa bye", "bye alexa", "goodbye alexa", "alexa bye bye"] apology_bye_phrases = [ "Sorry, have a great day!", "Sorry to bother you, see you next time!", "My bad. Have a great time!", "Didn't mean to be rude. Talk to you next time.", "Sorry for interrupting you. Talk to you soon.", "Terribly sorry. Have a great day!", "Thought you wanted to chat. My bad. See you soon!", "Oh, sorry. Have a great day!", ] utt = dialog["utterances"][-1] response = random.choice(response_phrases).strip() # Neutral response annotation = utt["annotations"] try: sentiment = get_sentiment(utt, probs=False)[0] except KeyError: sentiment = "neutral" # sentiment_confidence = annotation['cobot_sentiment']['confidence'] try: offensiveness = annotation["cobot_offensiveness"]["text"] except KeyError: offensiveness = "non-toxic" # offensiveness_confidence = annotation['cobot_offensiveness']['confidence'] try: is_badlisted = annotation["cobot_offensiveness"][ "is_badlisted"] == "badlist" except KeyError: is_badlisted = False if len(dialog["utterances"]) < 4: response = random.choice(apology_bye_phrases) elif sentiment == "positive": positive = [ "I'm glad to help you! ", "Thanks for the chat! ", "Cool! " ] response = random.choice(positive) + response elif offensiveness == "toxic" or is_badlisted or sentiment == "negative": response = random.choice(apology_bye_phrases) return response
def get_updated_disliked_skills(dialog, can_not_be_disliked_skills=None): can_not_be_disliked_skills = [] if can_not_be_disliked_skills is None else can_not_be_disliked_skills disliked_skills = dialog["human"]["attributes"].get("disliked_skills", []) prev_bot_uttr = dialog["bot_utterances"][-1]["text"].lower() if len( dialog["bot_utterances"]) > 0 else "" linked_to_skill = "" for skill_name, link_phrases in skills_phrases_map.items(): for phrase in link_phrases: if phrase.lower() in prev_bot_uttr: linked_to_skill = skill_name break if linked_to_skill: negative_prob = get_sentiment(dialog["human_utterances"][-1], probs=True).get("negative", 0.0) toxicity = get_toxic(dialog["human_utterances"][-1], probs=False) _is_no = is_no(dialog["human_utterances"][-1]) if negative_prob > 0.8 or toxicity or _is_no: if linked_to_skill not in can_not_be_disliked_skills: disliked_skills.append(linked_to_skill) return disliked_skills
def get_entities_with_attitudes(annotated_uttr: dict, prev_annotated_uttr: dict): entities_with_attitudes = {"like": [], "dislike": []} all_entities = get_entities(annotated_uttr, only_named=False, with_labels=False) all_prev_entities = get_entities(prev_annotated_uttr, only_named=False, with_labels=False) logger.info( f"Consider all curr entities: {all_entities}, and all previous entities: {all_prev_entities}" ) curr_entity = all_entities[0] if all_entities else "" prev_entity = all_prev_entities[-1] if all_prev_entities else "" curr_uttr_text = annotated_uttr.get("text", "") prev_uttr_text = prev_annotated_uttr.get("text", "") curr_sentiment = get_sentiment(annotated_uttr, probs=False, default_labels=["neutral"])[0] current_first_sentence = (annotated_uttr.get("annotations", {}).get( "sentseg", {}).get("segments", [curr_uttr_text])[0]) if "?" in current_first_sentence: pass elif WHAT_FAVORITE_PATTERN.search(prev_uttr_text): # what is your favorite ..? - animals -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif WHAT_LESS_FAVORITE_PATTERN.search(prev_uttr_text): # what is your less favorite ..? - animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif DO_YOU_LOVE_PATTERN.search(prev_uttr_text): if is_no(annotated_uttr): # do you love .. animals? - no -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif is_yes(annotated_uttr): # do you love .. animals? - yes -> `like animals` entities_with_attitudes["like"] += [prev_entity] elif DO_YOU_HATE_PATTERN.search(prev_uttr_text): if is_no(annotated_uttr): # do you hate .. animals? - no -> `like animals` entities_with_attitudes["like"] += [prev_entity] elif is_yes(annotated_uttr): # do you hate .. animals? - yes -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif I_HATE_PATTERN.search(curr_uttr_text): # i hate .. animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif I_LOVE_PATTERN.search(curr_uttr_text) or MY_FAVORITE_PATTERN.search( curr_uttr_text): # i love .. animals -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif if_chat_about_particular_topic( annotated_uttr, prev_annotated_uttr=prev_annotated_uttr, key_words=[curr_entity]): # what do you want to chat about? - ANIMALS -> `like animals` entities_with_attitudes["like"] += [curr_entity] elif if_not_want_to_chat_about_particular_topic( annotated_uttr, prev_annotated_uttr=prev_annotated_uttr): # i don't wanna talk about animals -> `dislike animals` entities_with_attitudes["dislike"] += [curr_entity] elif WHAT_DO_YOU_THINK_PATTERN.search(prev_uttr_text): if curr_sentiment == "negative": # what do you thank .. animals? - negative -> `dislike animals` entities_with_attitudes["dislike"] += [prev_entity] elif curr_sentiment == "positive": # what do you thank .. animals? - positive -> `like animals` entities_with_attitudes["like"] += [prev_entity] entities_with_attitudes["like"] = [ el for el in entities_with_attitudes["like"] if el ] entities_with_attitudes["dislike"] = [ el for el in entities_with_attitudes["dislike"] if el ] return entities_with_attitudes
def is_neutral(annotated_uttr): sentiment = get_sentiment(annotated_uttr, probs=False)[0] return sentiment in ["neutral"]
def is_positive(annotated_uttr): sentiment = get_sentiment(annotated_uttr, probs=False)[0] return sentiment in ["positive", "very_positive"]
def sentiment_detected_handler(ctx: Context, actor: Actor) -> bool: if ctx.validation: return False sentiment_probs = get_sentiment(int_ctx.get_last_human_utterance(ctx, actor), probs=True) return sentiment_probs.get(name, 0) >= threshold
def respond(): st_time = time.time() dialogs_batch = request.json["dialogs"] confidences = [] responses = [] human_attributes = [] bot_attributes = [] attributes = [] for dialog in dialogs_batch: used_topics = dialog["human"]["attributes"].get( "small_talk_topics", []) human_attr = {} bot_attr = {} attr = {} skill_outputs = get_skill_outputs_from_dialog( dialog["utterances"][-3:], skill_name="small_talk_skill", activated=True) if len(skill_outputs) > 0: # small_talk_skill was active on the previous step topic = skill_outputs[0].get("small_talk_topic", "") script_step = skill_outputs[0].get("small_talk_step", 0) script = skill_outputs[0].get("small_talk_script", []) logger.info(f"Found previous step topic: `{topic}`.") else: topic = "" script_step = 0 script = [] _, new_user_topic, new_conf = pickup_topic_and_start_small_talk(dialog) logger.info( f"From current user utterance: `{dialog['human_utterances'][-1]['text']}` " f"extracted topic: `{new_user_topic}`.") sentiment = get_sentiment(dialog["human_utterances"][-1], probs=False)[0] if (len(topic) > 0 and len(script) > 0 and (len(new_user_topic) == 0 or new_conf == FOUND_WORD_START_CONFIDENCE or new_user_topic == topic)): # we continue dialog if new topic was not found or was found just as the key word in user sentence. # because we can start a conversation picking up topic with key word with small proba user_dont_like = NOT_LIKE_PATTERN.search( dialog["human_utterances"][-1]["text"]) user_stop_talking = COMPILE_NOT_WANT_TO_TALK_ABOUT_IT.search( dialog["human_utterances"][-1]["text"]) if sentiment == "negative" or user_dont_like or user_stop_talking: logger.info( "Found negative sentiment to small talk phrase. Finish script." ) response, confidence, attr = ( "", 0.0, { "can_continue": CAN_NOT_CONTINUE, "small_talk_topic": "", "small_talk_step": 0, "small_talk_script": [], }, ) else: response, confidence, attr = get_next_response_on_topic( topic, dialog["human_utterances"][-1], curr_step=script_step + 1, topic_script=script) if response != "": logger.info( f"Continue script on topic: `{topic}`.\n" f"User utterance: `{dialog['human_utterances'][-1]['text']}`.\n" f"Bot response: `{response}`.") else: logger.info( "Try to extract topic from user utterance or offer if requested." ) response, topic, confidence = pickup_topic_and_start_small_talk( dialog) _is_quesion = is_any_question_sentence_in_utterance( dialog["human_utterances"][-1]) _is_lets_chat = if_chat_about_particular_topic( dialog["human_utterances"][-1], dialog["bot_utterances"][-1] if dialog["bot_utterances"] else {}) if len(topic) > 0 and topic not in used_topics and ( not _is_quesion or _is_lets_chat): logger.info( f"Starting script on topic: `{topic}`.\n" f"User utterance: `{dialog['human_utterances'][-1]['text']}`.\n" f"Bot response: `{response}`.") # topic script start, response is already formulated human_attr["small_talk_topics"] = used_topics + [topic] attr["response_parts"] = ["prompt"] attr["can_continue"] = CAN_CONTINUE_PROMPT attr["small_talk_topic"] = topic attr["small_talk_step"] = 0 attr["small_talk_script"] = TOPIC_SCRIPTS.get(topic, []) else: logger.info(f"Can not extract or offer NEW topic.") response = "" if len(response) == 0: confidence = 0.0 responses.append(response) confidences.append(confidence) human_attributes.append(human_attr) bot_attributes.append(bot_attr) attributes.append(attr) total_time = time.time() - st_time logger.info(f"small_talk_skill exec time: {total_time:.3f}s") return jsonify( list( zip(responses, confidences, human_attributes, bot_attributes, attributes)))