def process_dialog(cls, msg, use_task=True): """ Dialog strategy: use sub-task to handle dialog firstly, if failed, use retrieval or generational func to handle it. """ # Task response. if use_task: task_response, cls.dialog_status = TaskCore.task_handle( msg, cls.dialog_status) else: task_response = None # Search response. if len(cls.dialog_status.context) >= 3 and ch_count(msg) <= 4: user_msgs = cls.dialog_status.context[::2][-3:] msg = "<s>".join(user_msgs) mode = "cr" else: mode = "qa" msg_tokens = NlpUtil.tokenize(msg, True) search_response, sim_score = SearchCore.search(msg_tokens, mode=mode) # Seq2seq response. seq2seq_response = cls._predict_via_seq2seq(msg_tokens) log_print("search_response=%s" % search_response) log_print("seq2seq_response=%s" % seq2seq_response) if task_response: response = task_response elif sim_score >= 1.0: response = search_response else: response = seq2seq_response return response
def task_handle(self, msg, dialog_status): try: response = None if dialog_status.intent not in self.intent_not_reset: dialog_status.intent = None dialog_status = self._slots_update(msg, dialog_status) task_type = label(" ".join(jieba.cut(msg))) dialog_status.intent = task_type if task_type != "not_task": handle_func = self.intent_handle_func[dialog_status.intent] response = eval(handle_func)(msg, dialog_status) else: response = None # for intent, update_func in self.intent_update_func: # dialog_status = self._slots_update(msg, dialog_status) # dialog_status = eval(update_func)(msg, dialog_status) # if dialog_status.intent == intent: # handle_func = self.intent_handle_func[dialog_status.intent] # response = eval(handle_func)(msg, dialog_status) # if response: # break log_print("intent=%s, task_response=%s" % (dialog_status.intent, response)) return response, dialog_status except Exception as e: log_print("[ERROR] msg=%s, errmsg=%s" % (msg, e)) return response, dialog_status
def _predict_via_seq2seq(self, msg_tokens): user_msgs = " ".join(self.dialog_status.context[::2][-4:]) log_print("seq2seq_input=%s" % user_msgs) n_layers, hidden_size, reverse = parseFilename(self.model_path, True) response = predict( n_layers, hidden_size, reverse, self.model_path, beam_size=1, msg=user_msgs, voc=self.voc, ) # response = predict(self.seq2seq_inst, user_msgs, ret_size=1) return response
def task_handle(cls, msg, dialog_status): try: response = None if dialog_status.intent not in cls.intent_not_reset: dialog_status.intent = None for intent, update_func in cls.intent_update_func: dialog_status = cls._slots_update(msg, dialog_status) dialog_status = eval(update_func)(msg, dialog_status) if dialog_status.intent == intent: handle_func = cls.intent_handle_func[dialog_status.intent] response = eval(handle_func)(msg, dialog_status) if response: break log_print("intent=%s, task_response=%s" % (dialog_status.intent, response)) return response, dialog_status except Exception as e: log_print("[ERROR] msg=%s, errmsg=%s" % (msg, e)) return response, dialog_status
def search(cls, msg_tokens, mode="qa", filter_pattern=None): query = [w for w in msg_tokens if w in cls.word2id] search_inst = cls.qa_search_inst if mode == "qa" else cls.cr_search_inst sim_items = search_inst.similarity(query, size=10) docs, answers = search_inst.get_docs(sim_items) # User filter pattern. if filter_pattern: new_docs, new_answers = [], [] for doc, ans in zip(docs, answers): if not filter_pattern.search(ans): new_docs.append(doc) new_answers.append(ans) docs, answers = new_docs, new_answers log_print("init_query=%s, filter_query=%s" % ("".join(msg_tokens), "".join(query))) response, score = answers[0], sim_items[0][1] log_print("%s_search_sim_doc=%s, score=%.4f" % (mode, "".join(docs[0]), score)) if score <= 1.0: response, score = "亲爱哒,还有什么小妹可以帮您呢~", 2.0 return response, score
def _predict_via_seq2seq(cls, msg_tokens): user_msgs = " ".join(cls.dialog_status.context[::2][-4:]) log_print("seq2seq_input=%s" % user_msgs) response = predict(cls.seq2seq_inst, user_msgs, ret_size=1) return response