def query(): # Parse parameters num_samples = config.getint('decoder', 'num_samples') max_turns_history = config.getint('decoder', 'max_turns_history') # app.logger.info("Running the chatbot...") turns = [] question = request.args.get('question') # process question from_index = max(len(turns)-max_turns_history-1, 0) if max_turns_history >= 0 else 0 # Generate bot messages bot_messages = generate_response( model, tokenizer, question, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer ) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) app.logger.info('question: %s', question) app.logger.info('result >>> %s', bot_message) return jsonify(bot_message)
def query(): # Parse parameters num_samples = config.getint('decoder', 'num_samples') max_turns_history = config.getint('decoder', 'max_turns_history') question = request.args.get('question') # Generate bot messages bot_messages = generate_response( model, tokenizer, question + tokenizer.eos_token, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer ) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) app.logger.info('bot_message: %s', bot_message) app.logger.info('question: %s', question) app.logger.info('result >>> %s', bot_message) result = {} result["msg"] = bot_message result["status"] = "ok" return jsonify(result)
def get_response(prompt, channel_id, do_infinite): global translator global turn global turn2 global num_samples global max_turns_history global model global tokenizer global mmi_model global mmi_tokenizer global config global history_dict global from_index if max_turns_history == 0: # If you still get different responses then set seed turns = [] # A single turn is a group of user messages and bot responses right after turn = {'user_messages': [], 'bot_messages': []} str_channel_id = str(channel_id) #turns.append(turn) turn['user_messages'].append(prompt) if not channel_id in history_dict: history_dict[channel_id] = [] history_dict[channel_id].append(turn) # Merge turns into a single history (don't forget EOS token) history = "" from_index = max(len(history_dict[channel_id]) - max_turns_history - 1, 0) if max_turns_history >= 0 else 0 for message in static_history: history += message + tokenizer.eos_token for i in range(len(history_dict[channel_id])): if (i >= from_index): turn2 = history_dict[channel_id][i] else: continue # Each turn begings with user messages for message in turn2['user_messages']: history += message + tokenizer.eos_token for message in turn2['bot_messages']: history += message + tokenizer.eos_token # Generate bot messages bot_messages = generate_response(model, tokenizer, history, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) turn['bot_messages'].append(bot_message) #print(history_dict) return bot_message
def message(self, update, context): # Parse parameters num_samples = self.config.getint('decoder', 'num_samples') turns_memory = self.config.getint('chatbot', 'turns_memory') if 'turns' not in context.chat_data: context.chat_data['turns'] = [] turns = context.chat_data['turns'] user_message = update.message.text if user_message.lower() == 'bye': # Restart chat context.chat_data['turns'] = [] update.message.reply_text("Bye") return None return_gif = False if '@gif' in user_message: # Return gif return_gif = True user_message = user_message.replace('@gif', '').strip() if turns_memory == 0: # If you still get different responses then set seed context.chat_data['turns'] = [] # A single turn is a group of user messages and bot responses right after turn = { 'user_messages': [], 'bot_messages': [] } turns.append(turn) turn['user_messages'].append(user_message) # Merge turns into a single history (don't forget EOS token) history = "" from_index = max(len(turns)-turns_memory-1, 0) if turns_memory >= 0 else 0 for turn in turns[from_index:]: # Each turn begings with user messages for message in turn['user_messages']: history += message + self.tokenizer.eos_token for message in turn['bot_messages']: history += message + self.tokenizer.eos_token # Generate bot messages bot_messages = generate_response(self.model, self.tokenizer, history, self.config) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) turn['bot_messages'].append(bot_message) if return_gif: # Return response as GIF gif_url = translate_message_to_gif(bot_message, self.config) context.bot.send_animation(update.effective_message.chat_id, gif_url) else: # Return response as text update.message.reply_text(bot_message)
def run_chat(model, tokenizer, config, mmi_model=None, mmi_tokenizer=None): # Parse parameters num_samples = config.getint('decoder', 'num_samples') max_turns_history = config.getint('decoder', 'max_turns_history') logger.info("Running the chatbot...") turns = [] print("Bot >>>", "Just start texting me. If I'm getting annoying, type \"Bye\". To quit the chat type \"Quit\".") while True: prompt = input("User >>> ") if max_turns_history == 0: # If you still get different responses then set seed turns = [] if prompt.lower() == 'bye': print("Bot >>>", "Bye") turns = [] continue if prompt.lower() == 'quit': break # A single turn is a group of user messages and bot responses right after turn = { 'user_messages': [], 'bot_messages': [] } turns.append(turn) turn['user_messages'].append(prompt) # Merge turns into a single history (don't forget EOS token) history = "" from_index = max(len(turns)-max_turns_history-1, 0) if max_turns_history >= 0 else 0 for turn in turns[from_index:]: # Each turn begings with user messages for message in turn['user_messages']: history += message + tokenizer.eos_token for message in turn['bot_messages']: history += message + tokenizer.eos_token # Generate bot messages bot_messages = generate_response( model, tokenizer, history, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer ) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) print("Bot >>>", bot_message) turn['bot_messages'].append(bot_message)
def run_chat(model, tokenizer, config): # Parse parameters turns_memory = config.getint('chatbot', 'turns_memory') logger.info("Running the chatbot...") turns = [] print( "Bot >>>", "Just start texting me. If I'm getting annoying, type \"Bye\". To quit the chat type \"Quit\"." ) while True: prompt = input("User >>> ") if turns_memory == 0: # If you still get different responses then set seed turns = [] if prompt.lower() == 'bye': print("Bot >>>", "Bye") turns = [] continue if prompt.lower() == 'quit': break # A single turn is a group of user messages and bot responses right after turn = {'user_messages': [], 'bot_messages': []} turns.append(turn) turn['user_messages'].append(prompt) # Merge turns into a single history (don't forget EOS token) history = "" from_index = max(len(turns) - turns_memory - 1, 0) if turns_memory >= 0 else 0 for turn in turns[from_index:]: # Each turn begings with user messages for message in turn['user_messages']: history += message + tokenizer.eos_token for message in turn['bot_messages']: history += message + tokenizer.eos_token # Generate bot messages bot_message = generate_response(model, tokenizer, history, config) print("Bot >>>", bot_message) turn['bot_messages'].append(bot_message)
def chat_loop(chat, model, tokenizer, config, mmi_model=None, mmi_tokenizer=None, spyMode=False): file = open('omegleCHAT.txt', 'w+') while True: # Start a new chat every time the old one ends # Parse parameters num_samples = config.getint('decoder', 'num_samples') max_turns_history = config.getint('decoder', 'max_turns_history') logger.info('Running the chatbot...') turns = [] print('- Starting chat -') chat.start() while True: (event, argument) = chat.get_event() if event == ChatEvent.CHAT_WAITING: print('- Waiting for a partner -') elif event == ChatEvent.CHAT_READY: file.write('- Chat started with user - \r\n') print('- Connected to a partner -') if (spyMode): chat.start_typing() response = generate_response( model, tokenizer, argument + tokenizer.eos_token, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer, ) chat.send(response) print("Bot: {}".format(response)) file.write("(SPYMODE)Bot: {} \r\n".format(response)) chat.stop_typing() else: print("Bot: Hey!") chat.send("Hey!") file.write("Bot: Hey \r\n") break # Connected to a partner while True: (event, argument) = chat.get_event() if event == ChatEvent.GOT_SERVER_NOTICE: notice = argument print('- Server notice: {} -'.format(notice)) elif event == ChatEvent.PARTNER_STARTED_TYPING: print('- Partner started typing -') elif event == ChatEvent.PARTNER_STOPPED_TYPING: print('- Partner stopped typing -') elif event == ChatEvent.GOT_MESSAGE: message = argument print('Partner: {}'.format(message)) prompt = message chat.start_typing() if max_turns_history == 0: # If you still get different responses then set seed turns = [] if prompt.lower() == 'bye': print('Bot >>>', 'Bye') turns = [] continue if prompt.lower() == 'quit': break # A single turn is a group of user messages and bot responses right after turn = {'user_messages': [], 'bot_messages': []} turns.append(turn) turn['user_messages'].append(prompt) # Merge turns into a single history (don't forget EOS token) history = '' from_index = (max(len(turns) - max_turns_history - 1, 0) if max_turns_history >= 0 else 0) for turn in turns[from_index:]: # Each turn begings with user messages for message in turn['user_messages']: history += message + tokenizer.eos_token for message in turn['bot_messages']: history += message + tokenizer.eos_token print('generating response') # Generate bot messages bot_messages = generate_response( model, tokenizer, history, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer, ) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) chat.stop_typing() chat.send(bot_message) print('Bot: {}!'.format(bot_message)) file.write('User: {} \r\n'.format(message)) file.write('Bot: {} \r\n'.format(bot_message)) turn['bot_messages'].append(bot_message) elif event == ChatEvent.CHAT_ENDED: print('- Chat ended -') file.write('- Chat ended with user - \r\n') break
def message(self, update, context): # Parse parameters num_samples = self.config.getint('decoder', 'num_samples') max_turns_history = self.config.getint('decoder', 'max_turns_history') if 'turns' not in context.chat_data: context.chat_data['turns'] = [] turns = context.chat_data['turns'] user_message = update.message.text if len(user_message) >= 128: user_message = user_message[0:127] if user_message.lower() == 'bye': # Restart chat context.chat_data['turns'] = [] update.message.reply_text("Bye") return None return_gif = False return_porn = False if '@p**n' in user_message: # Return gif return_porn = True user_message = user_message.replace('@p**n', '').strip() if '@gif' in user_message: # Return gif return_gif = True user_message = user_message.replace('@gif', '').strip() if max_turns_history == 0: # If you still get different responses then set seed context.chat_data['turns'] = [] # A single turn is a group of user messages and bot responses right after turn = { 'user_messages': [], 'bot_messages': [] } turns.append(turn) turn['user_messages'].append(user_message) print(f"{update.effective_message.chat.username} - User >>> {user_message}") # Merge turns into a single history (don't forget EOS token) history = "" from_index = max(len(turns)-max_turns_history-1, 0) if max_turns_history >= 0 else 0 for turn in turns[from_index:]: # Each turn begings with user messages for message in turn['user_messages']: history += gpt_normalize(message) + self.tokenizer.eos_token for message in turn['bot_messages']: history += gpt_normalize(message) + self.tokenizer.eos_token done = False while done == False: # Generate bot messages bot_messages = generate_response( self.model, self.tokenizer, history, self.config, mmi_model=self.mmi_model, mmi_tokenizer=self.mmi_tokenizer ) if num_samples == 1: bot_message = bot_messages[0] else: # TODO: Select a message that is the most appropriate given the context # This way you can avoid loops bot_message = random.choice(bot_messages) gogo = True msgs = [] for turn in turns[from_index:]: for msg in turn['user_messages']: msgs.append(msg) for msg in turn['bot_messages']: msgs.append(msg) for msg in msgs: split = msg.split(' ') msgsplit = bot_message.split(' ') maxlen = min([len(split), len(msgsplit)]) same = 0 for i in range(0, maxlen-1): if split[i] == msgsplit[i]: same = same + 1 #print(same / maxlen) if same / maxlen > 0.66: gogo = False if 'kik' not in bot_message.lower() and 'DM' not in bot_message.upper() and gogo == True: done = True turn['bot_messages'].append(bot_message) if return_gif: # Return response as GIF gif_url = translate_message_to_gif(user_message, self.config) print(f"{update.effective_message.chat.username} - Bot >>> (sends) " + user_message + " gif! :)") context.bot.send_animation(update.effective_message.chat_id, gif_url) elif return_porn: # Return response as GIF porn_url = translate_message_to_porn(user_message, self.config) print(f"{update.effective_message.chat.username} - Bot >>> (sends) " + user_message + " p**n! :)") context.bot.send_photo(update.effective_message.chat_id, porn_url) else: # Return response as text print(f"{update.effective_message.chat.username} - Bot >>> {bot_message}") update.message.reply_text(bot_message)