transforms.append(AdditiveCouplingBijection(net, split_dim=2))
    if args.stochperm: transforms.append(StochasticPermutation(dim=2))
    else: transforms.append(Shuffle(L, dim=2))
    return transforms


for _ in range(args.num_flows):
    if args.dimwise: transforms = dimwise(transforms)
    if args.lenwise: transforms = lenwise(transforms)
    if args.actnorm: transforms.append(ActNormBijection1d(2))

model = Flow(base_dist=StandardNormal((D, L)),
             transforms=transforms).to(args.device)
if not args.train:
    state_dict = torch.load('models/{}.pt'.format(run_name))
    model.load_state_dict(state_dict)

#######################
## Specify optimizer ##
#######################

if args.optimizer == 'adam':
    optimizer = Adam(model.parameters(), lr=args.lr)
elif args.optimizer == 'adamax':
    optimizer = Adamax(model.parameters(), lr=args.lr)

if args.warmup is not None:
    scheduler_iter = LinearWarmupScheduler(optimizer, total_epoch=args.warmup)
else:
    scheduler_iter = None
Esempio n. 2
0
    elif args.permutation == 'shuffle': transforms.append(Shuffle(D))
transforms.pop()
if args.num_bits is not None:
    transforms.append(Sigmoid())
    transforms.append(VariationalQuantization(decoder, num_bits=args.num_bits))

pi = Flow(base_dist=target, transforms=transforms).to(args.device)

p = StandardNormal(shape).to(args.device)

##############
## Training ##
##############

state_dict = torch.load(path_check)
pi.load_state_dict(state_dict)

print('Running MCMC...')
samples, rate = metropolis_hastings(
    pi=pi,
    num_dims=args.num_dims,
    num_chains=eval_args.num_chains,
    num_samples=eval_args.num_samples,
    steps_per_sample=eval_args.steps_per_sample,
    burnin_steps=eval_args.burnin_steps,
    proposal_scale=eval_args.proposal_scale)

print('')
print('Projecting...')
num_chains, num_samples, dim = samples.shape
theta = torch.zeros(num_chains, num_samples, dim)