def main(): # Script arguments can include path of the config arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--config', type=str, default="chatbot.cfg") args = arg_parser.parse_args() # Read the config config = configparser.ConfigParser(allow_no_value=True) with open(args.config) as f: config.read_file(f) # Download and load main model target_folder_name = download_model_folder(config) model, tokenizer = load_model(target_folder_name, config) # Download and load reverse model use_mmi = config.getboolean('model', 'use_mmi') if use_mmi: mmi_target_folder_name = download_reverse_model_folder(config) mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config) else: mmi_model = None mmi_tokenizer = None # Run Telegram bot bot = TelegramBot(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer) bot.run_chat()
def main(): spymode = False # Script arguments can include path of the config arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--config', type=str, default='chatbot.cfg') args = arg_parser.parse_args() # Read the config config = configparser.ConfigParser(allow_no_value=True) with open(args.config) as f: config.read_file(f) # Download and load main model target_folder_name = download_model_folder(config) (model, tokenizer) = load_model(target_folder_name, config) # Download and load reverse model use_mmi = config.getboolean('model', 'use_mmi') if use_mmi: mmi_target_folder_name = download_reverse_model_folder(config) (mmi_model, mmi_tokenizer) = load_model(mmi_target_folder_name, config) else: mmi_model = None mmi_tokenizer = None # Run chatbot with GPT-2 # run_chat(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer) if (spymode): chat = SpyeeChat() chat_loop(chat, model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer, spyMode=spymode) else: chat = RandomChat() chat_loop(chat, model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer, spyMode=spymode)
def main(): global translator global num_samples global max_turns_history global model global tokenizer global mmi_model global mmi_tokenizer global config global number_of_messages global number_of_sent_messages global number_of_servers global history_dict global token token = "TOKEN_GOES_HERE" # Replace TOKEN_GOES_HERE with your discord API bot token! history_dict = {} # Script arguments can include path of the config arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--config', type=str, default="chatbot.cfg") args = arg_parser.parse_args() # Read the config config = configparser.ConfigParser(allow_no_value=True) with open(args.config) as f: config.read_file(f) # Download and load main model target_folder_name = download_model_folder(config) model, tokenizer = load_model(target_folder_name, config) # Download and load reverse model use_mmi = config.getboolean('model', 'use_mmi') if use_mmi: mmi_target_folder_name = download_reverse_model_folder(config) mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config) else: mmi_model = None mmi_tokenizer = None # Run chatbot with GPT-2 run_chat()
def main(): # Script arguments can include path of the config arg_parser = argparse.ArgumentParser() arg_parser.add_argument('--config', type=str, default="chatbot.cfg") args = arg_parser.parse_args() # Read the config config = configparser.ConfigParser(allow_no_value=True) with open(args.config) as f: config.read_file(f) # Download and load main model target_folder_name = download_model_folder(config) # added by Weijian, avoid re-downloading the model # data_folder = config.get('model', 'data_folder') # model_size = config.get('model', 'model_size') # dataset = config.get('model', 'dataset') # from_scratch = config.getboolean('model', 'from_scratch') # target_folder_name = model_size + "_" + dataset + ("_fs" if from_scratch else "_ft") model, tokenizer = load_model(target_folder_name, config) # Download and load reverse model use_mmi = config.getboolean('model', 'use_mmi') if use_mmi: mmi_target_folder_name = download_reverse_model_folder(config) mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config) else: mmi_model = None mmi_tokenizer = None # Run Telegram bot bot = TelegramBot(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer) bot.run_chat()
arg_parser.add_argument('--port', type=str, default="5011") args = arg_parser.parse_args() # Read the config config = configparser.ConfigParser(allow_no_value=True) with open(args.config) as f: config.read_file(f) # Download and load main model target_folder_name = download_model_folder(config) model, tokenizer = load_model(target_folder_name, config) # Download and load reverse model use_mmi = config.getboolean('model', 'use_mmi') if use_mmi: mmi_target_folder_name = download_reverse_model_folder(config) mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config) else: mmi_model = None mmi_tokenizer = None @app.route('/query') def query(): # Parse parameters num_samples = config.getint('decoder', 'num_samples') max_turns_history = config.getint('decoder', 'max_turns_history') # app.logger.info("Running the chatbot...") turns = [] question = request.args.get('question') # process question from_index = max(len(turns)-max_turns_history-1, 0) if max_turns_history >= 0 else 0