예제 #1
0
n_special = len(special)
n_ctx = context_size_event + context_size_effect
n_vocab = len(text_encoder.encoder) + n_ctx

print(data_loader.__dict__.keys())
opt.net.vSize = n_vocab

print("Building Model")

print(opt.exp)

model = models.make_model(
    opt, n_vocab, n_ctx, 0, load=False, return_acts=False, return_probs=True)

models.load_state_dict(model, model_stuff["state_dict"])

if config.gpu_mode:
    print("Pushing to GPU: {}".format(config.gpu_index))
    cfg.device = config.gpu_index
    cfg.do_gpu = True
    torch.cuda.set_device(cfg.device)
    model.cuda(cfg.device)
    print("Done.")

model.eval()

device = cfg.device
model.to(device)

random.seed(args.seed)
        print("Original number of evaluation sequences: {}".format(
            len(data_loader.sequences[split]["total"])))

        adata.prune_data_for_evaluation(
            data_loader, ["<{}>".format(cat) for cat in opt.eval.categories],
            split)

        print("Pruned number of evaluation sequences for subset: {}".format(
            len(data_loader.sequences[split]["total"])))

    print("Building Model")

    model = models.make_model(opt, n_vocab, n_ctx, n_special, load=False)

    print("Loading Weights")
    models.load_state_dict(model, model_file["state_dict"])

    print("Done Loading Weights")

    model.eval()

    # Initialize variable for # of examples to cycle through
    data.set_max_sizes(data_loader, force_split=split)

    evaluator = evaluate.make_evaluator(opt, model, data_loader)
    evaluator.batch_variables["split"] = split
    model.cuda(cfg.device)

    loss = evaluator.epoch(opt, model, data_loader, split)

    data.save_eval_file(opt, loss, "losses", split=split)