Exemplo n.º 1
0
def main(args):
    # Load the models
    conf = getattr(configs, 'config_'+args.model)()
    model=torch.load(f='./output/{}/{}/{}/models/model_epo{}.pckl'.format(args.model, args.expname, args.dataset, args.reload_from))
    model.eval()
    # Set the random seed manually for reproducibility.
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(args.seed)
    else:
        print("Note that our pre-trained models require CUDA to evaluate.")
    
    
    # Load data
    data_path=args.data_path+args.dataset+'/'
    glove_path = args.data_path+'glove.twitter.27B.200d.txt'
    corpus = getattr(data, args.dataset+'Corpus')(data_path, wordvec_path=glove_path, wordvec_dim=conf['emb_size'])
    dials, metas = corpus.get_dialogs(), corpus.get_metas()
    test_dial, test_meta = dials.get("test"), metas.get("test")
    # convert to numeric input outputs that fits into TF models
    test_loader = getattr(data, args.dataset+'DataLoader')("Test", test_dial, test_meta, conf['maxlen'])
    test_loader.epoch_init(1, conf['diaglen'], 1, shuffle=False)  
    ivocab = corpus.vocab
    vocab = corpus.ivocab
    
    metrics=Metrics(corpus.word2vec)
    
    f_eval = open("./output/{}/{}/{}/results.txt".format(args.model, args.expname, args.dataset), "w")
    repeat = args.n_samples
    
    evaluate(model, metrics, test_loader, vocab, ivocab, f_eval, repeat)
    "valid"), metas.get("test")
train_loader = getattr(data, args.dataset + 'DataLoader')("Train", train_dial,
                                                          train_meta,
                                                          config['maxlen'])
valid_loader = getattr(data, args.dataset + 'DataLoader')("Valid", valid_dial,
                                                          valid_meta,
                                                          config['maxlen'])
test_loader = getattr(data,
                      args.dataset + 'DataLoader')("Test", test_dial,
                                                   test_meta, config['maxlen'])

vocab = corpus.ivocab
ivocab = corpus.vocab
n_tokens = len(ivocab)

metrics = Metrics(corpus.word2vec)

print("Loaded data!")

###############################################################################
# Define the models
###############################################################################

model = getattr(models, args.model)(
    config, n_tokens) if args.reload_from < 0 else load_model(args.reload_from)
if use_cuda:
    model = model.cuda()

if corpus.word2vec is not None and args.reload_from < 0:
    print("Loaded word2vec")
    model.embedder.weight.data.copy_(torch.from_numpy(corpus.word2vec))
Exemplo n.º 3
0
    "valid"), metas.get("test")
train_loader = getattr(data, args.dataset + 'DataLoader')("Train", train_dial,
                                                          train_meta,
                                                          config['maxlen'])
valid_loader = getattr(data, args.dataset + 'DataLoader')("Valid", valid_dial,
                                                          valid_meta,
                                                          config['maxlen'])
test_loader = getattr(data,
                      args.dataset + 'DataLoader')("Test", test_dial,
                                                   test_meta, config['maxlen'])

vocab = corpus.ivocab
ivocab = corpus.vocab
n_tokens = len(ivocab)

metrics = Metrics(corpus.word2vec)  # 利用glove计算avg, ext, greedy

print("Loaded data!")

###############################################################################
# Define the models
###############################################################################

model = getattr(models, args.model)(
    config, n_tokens) if args.reload_from < 0 else load_model(args.reload_from)
if use_cuda:
    model = model.cuda()

if corpus.word2vec is not None and args.reload_from < 0:
    print("Loaded word2vec")
    model.embedder.weight.data.copy_(torch.from_numpy(corpus.word2vec))