Esempio n. 1
0
    def create_approximate_posterior():
        if args.approximate_posterior_type == 'diagonal-normal':
            context_encoder = nn_.ConvEncoder(
                context_features=args.context_features,
                channels_multiplier=16,
                dropout_probability=args.dropout_probability_encoder_decoder)
            approximate_posterior = distributions_.ConditionalDiagonalNormal(
                shape=[args.latent_features], context_encoder=context_encoder)

        else:
            context_encoder = nn.Linear(args.context_features,
                                        2 * args.latent_features)
            distribution = distributions_.ConditionalDiagonalNormal(
                shape=[args.latent_features], context_encoder=context_encoder)

            transform = transforms.CompositeTransform([
                transforms.CompositeTransform([
                    create_linear_transform(),
                    create_base_transform(
                        i, context_features=args.context_features)
                ]) for i in range(args.num_flow_steps)
            ])
            transform = transforms.CompositeTransform(
                [transform, create_linear_transform()])
            approximate_posterior = flows.Flow(
                transforms.InverseTransform(transform), distribution)

        return approximate_posterior
Esempio n. 2
0
def create_flow(c, h, w, flow_checkpoint, _log):
    distribution = distributions.StandardNormal((c * h * w, ))
    transform = create_transform(c, h, w)

    flow = flows.Flow(transform, distribution)

    _log.info('There are {} trainable parameters in this model.'.format(
        utils.get_num_parameters(flow)))

    if flow_checkpoint is not None:
        flow.load_state_dict(torch.load(flow_checkpoint))
        _log.info('Flow state loaded from {}'.format(flow_checkpoint))

    return flow
Esempio n. 3
0
    def create_prior():
        if args.prior_type == 'standard-normal':
            prior = distributions_.StandardNormal((args.latent_features, ))

        else:
            distribution = distributions_.StandardNormal(
                (args.latent_features, ))
            transform = transforms.CompositeTransform([
                transforms.CompositeTransform(
                    [create_linear_transform(),
                     create_base_transform(i)])
                for i in range(args.num_flow_steps)
            ])
            transform = transforms.CompositeTransform(
                [transform, create_linear_transform()])
            prior = flows.Flow(transform, distribution)

        return prior
Esempio n. 4
0
        raise ValueError


def create_transform():
    transform = transforms.CompositeTransform([
        transforms.CompositeTransform(
            [create_linear_transform(),
             create_base_transform(i)]) for i in range(args.num_flow_steps)
    ] + [create_linear_transform()])
    return transform


# create model
distribution = distributions.StandardNormal((features, ))
transform = create_transform()
flow = flows.Flow(transform, distribution).to(device)

n_params = utils.get_num_parameters(flow)
print('There are {} trainable parameters in this model.'.format(n_params))

# create optimizer
optimizer = optim.Adam(flow.parameters(), lr=args.learning_rate)
if args.anneal_learning_rate:
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                     args.num_training_steps,
                                                     0)
else:
    scheduler = None

# create summary writer and write to log directory
timestamp = cutils.get_timestamp()
        apply_unconditional_transform=args.apply_unconditional_transform)
    return base_transform


transform = transforms.CompositeTransform([
    transforms.CompositeTransform(
        [create_linear_transform(),
         create_base_transform(i)]) for i in range(args.num_flow_steps)
] + [create_linear_transform()])

device = torch.device('cpu')
torch.set_default_tensor_type('torch.DoubleTensor')

base_dist = distributions.TweakedUniform(torch.tensor([-np.pi] * feature_dim),
                                         torch.tensor([np.pi] * feature_dim))
flow = flows.Flow(transform, base_dist).to(device)

num_steps = 3000
data = torch.load("./output/flow_step_{}.pt".format(num_steps),
                  map_location=torch.device('cpu'))
flow.load_state_dict(data['state_dict'])
flow.eval()

## compute pq_logp and pq_logq
pq_logp = vmf.log_prob(xyz).numpy() + np.log(np.abs(np.cos(feature[:, 0])))
with torch.no_grad():
    pq_logq = flow.log_prob(torch.from_numpy(feature), None).numpy()

## compute qp_logq and qp_logp
N = feature.shape[0]
z = base_dist.sample(N, None)