def chat_fun_english(question, model, chat_settings, chatlog_filepath): #Get the input and check if it is a question or a command, and execute if it is a command #question = input("You: ") is_command, terminate_chat, reload_model = chat_command_handler.handle_command( question, model, chat_settings) if is_command: pass else: #If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = model.chat(question, chat_settings) #Print the answer or answer beams and log to chat log if chat_settings.show_question_context: print("Question with history (context): {0}".format( question_with_history)) print("\n1st if") if chat_settings.show_all_beams: for i in range(len(answer)): print("ChatBot (Beam {0}): {1}".format(i, answer[i])) print("\n2nd if") else: print("ChatBot: {0}".format(answer)) #print("\n else") print() if chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog(chatlog_filepath, question, answer) return answer
def chat_fun_urdu(n_query, model, chat_settings, chatlog_filepath): chk, response = wt.query_check(n_query) terminate_chat = False #Get the input and check if it is a question or a command, and execute if it is a command #question = input("You: ") question = n_query is_command, terminate_chat, reload_model = chat_command_handler.handle_command( question, model, chat_settings) if is_command: pass elif chk: return response else: question = ChatSettings.To_query(n_query) #If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = model.chat(question, chat_settings) #Print the answer or answer beams and log to chat log if chat_settings.show_question_context: print("Question with history (context): {0}".format( question_with_history)) if chat_settings.show_all_beams: for i in range(len(answer)): print("ChatBot (Beam {0}): {1}".format(i, answer[i])) else: n_answer = ChatSettings.To_answer(answer) print("ChatBot: {0}".format(n_answer)) print() return n_answer if chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog(chatlog_filepath, question, answer)
def chat_fun(): terminate_chat = False reload_model = False while not terminate_chat: #Create the model print() print("Initializing model..." if not reload_model else "Re-initializing model...") print() with ChatbotModel(mode="infer", model_hparams=chat_settings.model_hparams, input_vocabulary=input_vocabulary, output_vocabulary=output_vocabulary, model_dir=model_dir) as model: #Load the weights print() print("Loading model weights...") print() model.load(checkpoint) #Show the commands if not reload_model: chat_command_handler.print_commands() while True: #Get the input and check if it is a question or a command, and execute if it is a command question = input("You: ") is_command, terminate_chat, reload_model = chat_command_handler.handle_command( question, model, chat_settings) if terminate_chat or reload_model: break elif is_command: continue else: #If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = model.chat( question, chat_settings) #Print the answer or answer beams and log to chat log if chat_settings.show_question_context: print("Question with history (context): {0}".format( question_with_history)) print("\n1st if") if chat_settings.show_all_beams: for i in range(len(answer)): print("ChatBot (Beam {0}): {1}".format( i, answer[i])) print("\n2nd if") else: print("ChatBot: {0}".format(answer)) print("\n else") print() if chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog( chatlog_filepath, question, answer)
def get(self, question): is_command, terminate_chat, _ = chat_command_handler.handle_command(question, model, chat_settings) if terminate_chat: answer = "[Can't terminate from http request]" elif is_command: answer = "[Command processed]" else: #If it is not a command (it is a question), pass it on to the chatbot model to get the answer _, answer = model.chat(question, chat_settings) if chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog(chatlog_filepath, question, answer) return answer
def chat(self, question): is_command, terminate_chat = chat_command_handler.handle_command( question, self.model, self.chat_settings) if terminate_chat: return "Terminate is not supported in wechat model." elif not is_command: # If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = self.model.chat( question, self.chat_settings) # Print the answer or answer beams and log to chat log if self.chat_settings.show_question_context: return "Question with history (context): {0}".format( question_with_history) if self.chat_settings.show_all_beams: for i in range(len(answer)): return "ChatBot (Beam {0}): {1}".format(i, answer[i]) else: return format(answer) if self.chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog(self.chatlog_filepath, question, answer)
#Load the weights print() print("Loading model weights...") model.load(checkpoint) # Setting up the chat chatlog_filepath = path.join( model_dir, "chat_logs", "chatlog_{0}.txt".format( datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))) chat_settings = ChatSettings(hparams.inference_hparams) chat_command_handler.print_commands() while (True): #Get the input and check if it is a question or a command, and execute if it is a command question = input("You: ") is_command, terminate_chat = chat_command_handler.handle_command( question, model, chat_settings) if terminate_chat: break elif is_command: continue else: #If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = model.chat(question, chat_settings) #Print the answer or answer beams and log to chat log if chat_settings.show_question_context: print("Question with history (context): {0}".format( question_with_history)) if chat_settings.show_all_beams: for i in range(len(answer)):
def chat_fun(n_query): terminate_chat = False reload_model = False chk, response = wt.query_check(n_query) while not terminate_chat: #Create the model print() print("Initializing model..." if not reload_model else "Re-initializing model...") print() with ChatbotModel(mode="infer", model_hparams=chat_settings.model_hparams, input_vocabulary=input_vocabulary, output_vocabulary=output_vocabulary, model_dir=model_dir) as model: #Load the weights print() print("Loading model weights...") print() model.load(checkpoint) #Show the commands if not reload_model: #Uncomment the following line if you want to print commands. #chat_command_handler.print_commands() print('Model Reload!') while True: #Get the input and check if it is a question or a command, and execute if it is a command #question = input("You: ") question = n_query is_command, terminate_chat, reload_model = chat_command_handler.handle_command( question, model, chat_settings) if terminate_chat or reload_model: break elif is_command: continue elif chk: return response else: question = ChatSettings.To_query(n_query) #If it is not a command (it is a question), pass it on to the chatbot model to get the answer question_with_history, answer = model.chat( question, chat_settings) #Print the answer or answer beams and log to chat log if chat_settings.show_question_context: print("Question with history (context): {0}".format( question_with_history)) if chat_settings.show_all_beams: for i in range(len(answer)): print("ChatBot (Beam {0}): {1}".format( i, answer[i])) else: n_answer = ChatSettings.To_answer(answer) print("ChatBot: {0}".format(n_answer)) print() return n_answer if chat_settings.inference_hparams.log_chat: chat_command_handler.append_to_chatlog( chatlog_filepath, question, answer)