def main():
    # Script arguments can include path of the config
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default="chatbot.cfg")
    args = arg_parser.parse_args()

    # Read the config
    config = configparser.ConfigParser(allow_no_value=True)
    with open(args.config) as f:
        config.read_file(f)

    # Download and load main model
    target_folder_name = download_model_folder(config)
    model, tokenizer = load_model(target_folder_name, config)

    # Download and load reverse model
    use_mmi = config.getboolean('model', 'use_mmi')
    if use_mmi:
        mmi_target_folder_name = download_reverse_model_folder(config)
        mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config)
    else:
        mmi_model = None
        mmi_tokenizer = None
    
    # Run Telegram bot
    bot = TelegramBot(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer)
    bot.run_chat()
示例#2
0
def main():
    spymode = False

    # Script arguments can include path of the config

    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default='chatbot.cfg')
    args = arg_parser.parse_args()

    # Read the config

    config = configparser.ConfigParser(allow_no_value=True)
    with open(args.config) as f:
        config.read_file(f)

    # Download and load main model

    target_folder_name = download_model_folder(config)
    (model, tokenizer) = load_model(target_folder_name, config)

    # Download and load reverse model

    use_mmi = config.getboolean('model', 'use_mmi')
    if use_mmi:
        mmi_target_folder_name = download_reverse_model_folder(config)
        (mmi_model, mmi_tokenizer) = load_model(mmi_target_folder_name, config)
    else:
        mmi_model = None
        mmi_tokenizer = None

    # Run chatbot with GPT-2
    # run_chat(model, tokenizer, config, mmi_model=mmi_model, mmi_tokenizer=mmi_tokenizer)
    if (spymode):
        chat = SpyeeChat()
        chat_loop(chat,
                  model,
                  tokenizer,
                  config,
                  mmi_model=mmi_model,
                  mmi_tokenizer=mmi_tokenizer,
                  spyMode=spymode)
    else:
        chat = RandomChat()
        chat_loop(chat,
                  model,
                  tokenizer,
                  config,
                  mmi_model=mmi_model,
                  mmi_tokenizer=mmi_tokenizer,
                  spyMode=spymode)
示例#3
0
def main():
    global translator

    global num_samples
    global max_turns_history
    global model
    global tokenizer
    global mmi_model
    global mmi_tokenizer
    global config
    global number_of_messages
    global number_of_sent_messages
    global number_of_servers
    global history_dict
    global token

    token = "TOKEN_GOES_HERE"  # Replace TOKEN_GOES_HERE with your discord API bot token!
    history_dict = {}
    # Script arguments can include path of the config
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default="chatbot.cfg")
    args = arg_parser.parse_args()

    # Read the config
    config = configparser.ConfigParser(allow_no_value=True)
    with open(args.config) as f:
        config.read_file(f)

    # Download and load main model
    target_folder_name = download_model_folder(config)
    model, tokenizer = load_model(target_folder_name, config)

    # Download and load reverse model
    use_mmi = config.getboolean('model', 'use_mmi')
    if use_mmi:
        mmi_target_folder_name = download_reverse_model_folder(config)
        mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config)
    else:
        mmi_model = None
        mmi_tokenizer = None

    # Run chatbot with GPT-2
    run_chat()
示例#4
0
def main():
    # Script arguments can include path of the config
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default="chatbot.cfg")
    args = arg_parser.parse_args()

    # Read the config
    config = configparser.ConfigParser(allow_no_value=True)
    with open(args.config) as f:
        config.read_file(f)

    # Download model artifacts
    target_dir = download_model_folder(config)

    # Load model and tokenizer
    model, tokenizer = load_model(target_dir, config)

    # Run chatbot with GPT-2
    run_chat(model, tokenizer, config)
示例#5
0
def main():
    # Script arguments can include path of the config
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('--config', type=str, default="chatbot.cfg")
    args = arg_parser.parse_args()

    # Read the config
    config = configparser.ConfigParser(allow_no_value=True)
    with open(args.config) as f:
        config.read_file(f)

    # Download and load main model
    target_folder_name = download_model_folder(config)
    # added by Weijian, avoid re-downloading the model
    # data_folder = config.get('model', 'data_folder')
    # model_size = config.get('model', 'model_size')
    # dataset = config.get('model', 'dataset')
    # from_scratch = config.getboolean('model', 'from_scratch')
    # target_folder_name = model_size + "_" + dataset + ("_fs" if from_scratch else "_ft")
    model, tokenizer = load_model(target_folder_name, config)

    # Download and load reverse model
    use_mmi = config.getboolean('model', 'use_mmi')
    if use_mmi:
        mmi_target_folder_name = download_reverse_model_folder(config)
        mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config)
    else:
        mmi_model = None
        mmi_tokenizer = None

    # Run Telegram bot
    bot = TelegramBot(model,
                      tokenizer,
                      config,
                      mmi_model=mmi_model,
                      mmi_tokenizer=mmi_tokenizer)
    bot.run_chat()
示例#6
0
app = Flask(__name__)

# Script arguments can include path of the config
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--config', type=str, default="emlyon-chatbot.cfg")
arg_parser.add_argument('--host', type=str, default="0.0.0.0")
arg_parser.add_argument('--port', type=str, default="5011")
args = arg_parser.parse_args()

# Read the config
config = configparser.ConfigParser(allow_no_value=True)
with open(args.config) as f:
    config.read_file(f)

# Download and load main model
target_folder_name = download_model_folder(config)
model, tokenizer = load_model(target_folder_name, config)

# Download and load reverse model
use_mmi = config.getboolean('model', 'use_mmi')
if use_mmi:
    mmi_target_folder_name = download_reverse_model_folder(config)
    mmi_model, mmi_tokenizer = load_model(mmi_target_folder_name, config)
else:
    mmi_model = None
    mmi_tokenizer = None

@app.route('/query')
def query():
    # Parse parameters
    num_samples = config.getint('decoder', 'num_samples')
示例#7
0
personality = random.choice(personalities)
# logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
print(personality)
l = chain(*personality)
custom_personality_text = ['I am student from India', 'i work in the field of Computer science', 'i like playing cricket', 'In my free time I like conducting talks for students to learn', 
                            'I am a huge fan of IPL', 'I like talking, its fun to talk!' ]
custom_personality = list(map(tokenizer.encode, custom_personality_text))
print(custom_personality)
logger.info("Selected personality: %s", tokenizer.decode(chain(*custom_personality)))


# Reddit dialogue bot part
config = configparser.ConfigParser(allow_no_value=True)
with open("chatbot.cfg") as f:
    config.read_file(f)
target_dir = download_model_folder(config)
model_reddit, tokenizer_reddit = load_model(target_dir, config)

def cosine_similarity(l1, l2):
    cosine = torch.mm(l1.unsqueeze(0), l2.unsqueeze(0).transpose(0, 1))
    n1, n2 = l1.norm(), l2.norm()
    value = cosine/(n1*n2)
    return value


@app_flask.route('/get_response', methods=['GET', 'POST'])
def get_response():
    print(request.json)
    score = request.json['score']
    history = request.json['history']
    # if score < 0.5: