Esempio n. 1
0
start_epoch = 0

# Restore model
if args.load != '':
    for i in range(300 - 1, -1, -1):
        if 'baseline' in args.method_name:
            subdir = 'baseline'
        elif 'oe_tune' in args.method_name:
            subdir = 'oe_tune'
        else:
            subdir = 'oe_scratch'

        model_name = os.path.join(os.path.join(args.load, subdir), args.method_name + '_epoch_' + str(i) + '.pt')
        if os.path.isfile(model_name):
            net.load_state_dict(torch.load(model_name))
            print('Model restored! Epoch:', i)
            start_epoch = i + 1
            break
    if start_epoch == 0:
        assert False, "could not resume"

net.eval()

if args.ngpu > 1:
    net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

if args.ngpu > 0:
    net.cuda()
    # torch.cuda.manual_seed(1)
Esempio n. 2
0
    return file_list

if __name__ == "__main__":

    opt = eval_parse()

    device = torch.device("cuda" if cuda.is_available() else "cpu")
    train_iter, val_iter = dataset.setup(opt)

    # Get list of model paths
    checkpoints = get_model_paths(opt.checkpoint) 

    for path in checkpoints:

        model = ConvNet(opt.nClasses)
        model.load_state_dict(torch.load(path, map_location=device))
        model = model.to(device)


        line = "Loading/evaluating model {}".format(path)
        sys.stderr.write('\r\033[K' + line)
        sys.stderr.flush()

        trainer = Trainer(model, None, train_iter, val_iter, opt)
        error_rate = trainer.val()

        line ="Error rate: {:.2f} | Model: {}\n".format(error_rate, path)
        sys.stderr.write('\r\033[K' + line)
        sys.stderr.flush()