Beispiel #1
0
                            const_lr=args.no_schedule,
                            double_bias_lr=args.double_bias_lr,
                            model_variance=args.model_variance,
                            input_dim=dataset.D,
                            output_dim=output_dim,
                            apply_var=args.noise_var,
                            **model_cfg.kwargs,
                            **extra_args)

mname = args.model
if args.swag:
    mname = mname + args.subspace + args.inference

bb_args = argparse.Namespace(model=mname,
                             dataset=args.dataset,
                             split=args.split,
                             seed=args.seed,
                             database_path=args.database_path)

bb_result = run(bb_args,
                data=dataset,
                model=regression_model,
                is_test=args.database_path == '')
print([(k, bb_result[k]) for k in sorted(bb_result)])

utils.save_checkpoint(args.dir,
                      args.epochs,
                      model_state_dict=regression_model.model.state_dict(),
                      optimizer=regression_model.optimizer.state_dict(),
                      result=bb_result)
Beispiel #2
0
                              tablefmt='simple',
                              floatfmt='8.4f'))
    else:
        print(
            tabulate.tabulate([values],
                              columns,
                              tablefmt='plain',
                              floatfmt='8.4f').split('\n')[1])

print("sigma:",
      torch.nn.functional.softplus(vi_model.inv_softplus_sigma.detach().cpu()))
if not args.no_mu:
    print("mu:", vi_model.mu.detach().cpu().data)

utils.save_checkpoint(args.dir,
                      epoch,
                      name='vi',
                      state_dict=vi_model.state_dict())

eval_model = model_cfg.base(num_classes=num_classes,
                            *model_cfg.args,
                            **model_cfg.kwargs)
eval_model.to(args.device)

num_samples = args.num_samples

query_length = len(meta_testloader.dataset) * len(
    meta_testloader.dataset[0][-1])
ens_predictions = np.zeros((query_length, args.n_ways))
targets = np.zeros((query_length, args.n_ways))

#printf, logfile = utils.get_logging_print(os.path.join(args.dir, args.log_fname + '-%s.txt'))
Beispiel #3
0
    model.load_state_dict(checkpoint['state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer'])

if args.swag and args.swag_resume is not None:
    checkpoint = torch.load(args.swag_resume)
    swag_model.subspace.rank = torch.tensor(0)
    swag_model.load_state_dict(checkpoint['state_dict'])

columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'te_loss', 'te_acc', 'time', 'mem_usage']
if args.swag:
    columns = columns[:-2] + ['swa_te_loss', 'swa_te_acc'] + columns[-2:]
    swag_res = {'loss': None, 'accuracy': None}

utils.save_checkpoint(
    args.dir,
    start_epoch,
    state_dict=model.state_dict(),
    optimizer=optimizer.state_dict()
)

sgd_ens_preds = None
sgd_targets = None
n_ensembled = 0.

for epoch in range(start_epoch, args.epochs):
    time_ep = time.time()

    if not args.no_schedule:
        lr = schedule(epoch, args)
        utils.adjust_learning_rate(optimizer, lr)
    else:
        lr = args.lr_init