def get_answer(self, sentence, userID='123', show_details=False): results = ChatClient.get_classify(self, sentence) # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in self.dialog_intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: # send "no_matching" message when the matching rate of the message is lower than self.criteria_coincidence. if self.criteria_coincidence > results[0][1]: return "no_matching" # set context for this intent if necessary if 'context_set' in i: if show_details: self.logger.debug('context:', i['context_set']) self.context[userID] = i['context_set'] # check if this intent is contextual and applies to this user's conversation if not 'context_filter' in i or \ (userID in self.context and 'context_filter' in i and i['context_filter'] == self.context[userID]): if show_details: self.logger.debug('tag:', i['tag']) # a random response from the intent return random.choice(i['responses']) results.pop(0)
def get_answer(self, sentence, userID='123', show_details=False): #If a conversation that is not understood during the chatting is in progress more than 3 times, # send a usage guide message. if self.not_matching_count >= 3: self.not_matching_count = 1 self.previous_not_matching = False return self.usage_guide_message results = ChatClient.get_classify(self, sentence) #Echo processing when using bad language three times in a row. if self.slang_matching_count >= 3: if results[0][0] != 'Slang': self.slang_matching_count = 1 self.previous_slang_matching = False else: return sentence # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in self.dialog_intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: # send message when the matching rate of the message is lower than self.criteria_coincidence. if self.criteria_coincidence > results[0][1]: return_message = '"' + sentence + '"' + self.not_matching_message if self.previous_not_matching: self.not_matching_count += 1 self.previous_not_matching = True return return_message # check when using bad language three times in a row. if results[0][0] == 'Slang': if self.previous_slang_matching: self.slang_matching_count += 1 self.previous_slang_matching = True # set context for this intent if necessary if 'context_set' in i: if show_details: self.logger.debug('context:', i['context_set']) self.context[userID] = i['context_set'] # check if this intent is contextual and applies to this user's conversation if not 'context_filter' in i or \ (userID in self.context and 'context_filter' in i and i['context_filter'] == self.context[userID]): if show_details: self.logger.debug('tag:', i['tag']) # a random response from the intent return random.choice(i['responses']) else: return self.context_filter_message results.pop(0)
def test_ChatClient(): # def __init__(self, language, intents_file, training_data_file, tflearn_logs_dir, tflearn_model_file): # os.getcwd() - 현재 프로세스의 작업 디렉토리 얻기 # os.curdir() - 현재 디렉토리 얻기 # os.pardir() - 부모 디렉토리 얻기 #특정 경로에 대해 절대 경로 얻기 os.path.abspath(".\\Scripts") print("현재 프로세스의 작업 디렉토리 [%s]" % os.getcwd()) print("현재 디렉토리[%s]" % os.curdir) print("부모 디렉토리[%s]" % os.pardir) print("부모 디렉토리로 변경[%s]" % os.chdir(os.pardir)) # print("변경후 현재 프로세스의 작업 디렉토리 [%s]" % os.getcwd()) print("ArkChatFramework디렉토리 [%s]" % os.path.abspath("ArkChatFramework")) print("intents 파일 디렉토리[%s]" % os.path.abspath("ArkNLU/DialogIntents/intents_home_kr.json")) input_file_name = os.path.abspath( "ArkNLU/DialogIntents/intents_home_kr.json") print("intents 파일 디렉토리[%s]" % input_file_name) input_training_data_file_name = os.path.abspath( "ArkNLU/NLUModel/training_data_home_kr") print("training 파일 디렉토리[%s]" % input_training_data_file_name) tflearn_logs_dir = os.path.abspath('ArkNLU/NLUModel/home_tflearn_kr_logs') print("tflearn_logs 디렉토리[%s]" % tflearn_logs_dir) tflearn_model_file_name = os.path.abspath( 'ArkNLU/NLUModel/model_home_kr.tflearn') print("tflearn_model 파일 디렉토리[%s]" % tflearn_model_file_name) # intents_file, training_data_file, tflearn_logs_dir, tflearn_model_file learning_model_files = dict( intents_file=input_file_name, training_data_file=input_training_data_file_name, tflearn_logs_dir=tflearn_logs_dir, tflearn_model_file=tflearn_model_file_name) bot = ChatClient('ko-KR', learning_model_files) print("ChatClient instance...") # bot.read_dialog_intents_jsonfile() # bot.restore_training_data_structures() # bot.restore_training_model() userID = 'arkwith7' sentence = '미션이 무엇입니까?' results = bot.get_classify(sentence) print("미션이 무엇입니까? 사용자의도분류[%s]" % results) # 대화 말뭉치와 대화 의도가 정의된 JSON 문서 집합 읽기 print("미션이 무엇입니까? 응답[%s]" % bot.response(sentence, userID, show_details=True))
def get_answer(self, sentence, userID='123', show_details=False): #If a conversation that is not understood during the chatting is in progress more than 3 times, # send a usage guide message. if self.not_matching_count >= 3: self.not_matching_count = 1 self.previous_not_matching = False return self.set_answer("abnormal", "guide_message", self.usage_guide_message, "") results = ChatClient.get_classify(self, sentence) #Echo processing when using bad language three times in a row. if self.slang_matching_count >= 3: if results[0][0] != 'Slang': self.slang_matching_count = 1 self.previous_slang_matching = False else: return self.set_answer("abnormal", "not_matching", sentence, "") # if we have a classification then find the matching intent tag if results: # loop as long as there are matches to process while results: for i in self.dialog_intents['intents']: # find a tag matching the first result if i['tag'] == results[0][0]: self.logger.debug("previous_context_set : [%s] ", self.previous_context_set) # send message when the matching rate of the message is lower than self.criteria_coincidence. if self.criteria_coincidence > results[0][1]: return_message = '"' + sentence + '"' + self.not_matching_message if self.previous_not_matching: self.not_matching_count += 1 self.previous_not_matching = True return self.set_answer("abnormal", "not_matching", return_message, "") # check when using bad language three times in a row. if results[0][0] == 'Slang': if self.previous_slang_matching: self.slang_matching_count += 1 self.previous_slang_matching = True # set context for this intent if necessary if 'context_set' in i: if show_details: self.logger.debug('context:', i['context_set']) self.context[userID] = i['context_set'] # check context_set for this intent if i['context_set']: self.previous_context_set = i['context_set'] self.previous_responses = i['responses'] self.logger.debug("context_set : [%s] ", i['context_set']) response = {} response["response"] = i['responses'] return self.set_answer("list", i['patterns'][0], i['responses'], "") # check if this intent is contextual and applies to this user's conversation if not 'context_filter' in i or \ (userID in self.context and 'context_filter' in i and i['context_filter'] == self.context[userID]): if show_details: self.logger.debug('tag:', i['tag']) # a random response from the intent self.previous_context_set = i['context_set'] self.previous_responses = i['responses'] explanation = [] if ('responses_ref' in i): explanation = i['responses_ref'] return self.set_answer( "qna", i['patterns'][0], random.choice(i['responses']), explanation) elif self.previous_context_set == i['context_filter']: self.logger.debug("context_set : [%s] ", i['context_set']) self.logger.debug("context_filter : [%s] ", i['context_filter']) self.logger.debug("previous_context_set : [%s] ", self.previous_context_set) self.previous_context_set = i['context_set'] self.previous_responses = i['responses'] return self.set_answer("list", i['patterns'][0], i['responses'], "") else: if not self.previous_responses: self.previous_responses = i['context_filter'] return_values = i['context_filter'] else: return_values = "" for value in self.previous_responses: return_values += value + ' ' self.previous_responses = i['context_filter'] # context_filter와 같은 tag의 responses값을 보여 주고 선택하게 함 return self.set_answer( "abnormal", "not_matching", return_values + "와 같은 " + self.context_filter_message, "") results.pop(0)