예제 #1
0
 def test_log_prob(self):
     batch_size = 10
     input_shape = [2, 3, 4]
     context_shape = [5, 6]
     flow = base.Flow(
         transform=transforms.AffineScalarTransform(scale=2.0),
         distribution=distributions.StandardNormal(input_shape),
     )
     inputs = torch.randn(batch_size, *input_shape)
     maybe_context = torch.randn(batch_size, *context_shape)
     for context in [None, maybe_context]:
         with self.subTest(context=context):
             log_prob = flow.log_prob(inputs, context=context)
             self.assertIsInstance(log_prob, torch.Tensor)
             self.assertEqual(log_prob.shape, torch.Size([batch_size]))
예제 #2
0
 def test_transform_to_noise(self):
     batch_size = 10
     context_size = 20
     shape = [2, 3, 4]
     context_shape = [5, 6]
     flow = base.Flow(
         transform=transforms.AffineScalarTransform(scale=2.0),
         distribution=distributions.StandardNormal(shape),
     )
     inputs = torch.randn(batch_size, *shape)
     maybe_context = torch.randn(context_size, *context_shape)
     for context in [None, maybe_context]:
         with self.subTest(context=context):
             noise = flow.transform_to_noise(inputs, context=context)
             self.assertIsInstance(noise, torch.Tensor)
             self.assertEqual(noise.shape, torch.Size([batch_size] + shape))
예제 #3
0
 def test_sample_and_log_prob(self):
     num_samples = 10
     input_shape = [2, 3, 4]
     flow = base.Flow(
         transform=transforms.AffineScalarTransform(scale=2.0),
         distribution=distributions.StandardNormal(input_shape),
     )
     samples, log_prob_1 = flow.sample_and_log_prob(num_samples)
     log_prob_2 = flow.log_prob(samples)
     self.assertIsInstance(samples, torch.Tensor)
     self.assertIsInstance(log_prob_1, torch.Tensor)
     self.assertIsInstance(log_prob_2, torch.Tensor)
     self.assertEqual(samples.shape, torch.Size([num_samples] + input_shape))
     self.assertEqual(log_prob_1.shape, torch.Size([num_samples]))
     self.assertEqual(log_prob_2.shape, torch.Size([num_samples]))
     self.assertEqual(log_prob_1, log_prob_2)
예제 #4
0
 def test_sample_and_log_prob_with_context(self):
     num_samples = 10
     context_size = 20
     input_shape = [2, 3, 4]
     context_shape = [5, 6]
     flow = base.Flow(
         transform=transforms.AffineScalarTransform(scale=2.0),
         distribution=distributions.StandardNormal(input_shape),
     )
     context = torch.randn(context_size, *context_shape)
     samples, log_prob = flow.sample_and_log_prob(num_samples, context=context)
     self.assertIsInstance(samples, torch.Tensor)
     self.assertIsInstance(log_prob, torch.Tensor)
     self.assertEqual(
         samples.shape, torch.Size([context_size, num_samples] + input_shape)
     )
     self.assertEqual(log_prob.shape, torch.Size([context_size, num_samples]))
예제 #5
0
    def __init__(
        self,
        features,
        hidden_features,
        num_layers,
        num_blocks_per_layer,
        use_volume_preserving=False,
        activation=F.relu,
        dropout_probability=0.0,
        batch_norm_within_layers=False,
        batch_norm_between_layers=False,
    ):

        if use_volume_preserving:
            coupling_constructor = transforms.AdditiveCouplingTransform
        else:
            coupling_constructor = transforms.AffineCouplingTransform

        mask = torch.ones(features)
        mask[::2] = -1

        def create_resnet(in_features, out_features):
            return nn_.ResidualNet(
                in_features,
                out_features,
                hidden_features=hidden_features,
                num_blocks=num_blocks_per_layer,
                activation=activation,
                dropout_probability=dropout_probability,
                use_batch_norm=batch_norm_within_layers,
            )

        layers = []
        for _ in range(num_layers):
            transform = coupling_constructor(
                mask=mask, transform_net_create_fn=create_resnet)
            layers.append(transform)
            mask *= -1
            if batch_norm_between_layers:
                layers.append(transforms.BatchNorm(features=features))

        super().__init__(
            transform=transforms.CompositeTransform(layers),
            distribution=distributions.StandardNormal([features]),
        )
예제 #6
0
    def __init__(
        self,
        features,
        hidden_features,
        num_layers,
        num_blocks_per_layer,
        use_residual_blocks=True,
        use_random_masks=False,
        use_random_permutations=False,
        activation=F.relu,
        dropout_probability=0.0,
        batch_norm_within_layers=False,
        batch_norm_between_layers=False,
    ):

        if use_random_permutations:
            permutation_constructor = transforms.RandomPermutation
        else:
            permutation_constructor = transforms.ReversePermutation

        layers = []
        for _ in range(num_layers):
            layers.append(permutation_constructor(features))
            layers.append(
                transforms.MaskedAffineAutoregressiveTransform(
                    features=features,
                    hidden_features=hidden_features,
                    num_blocks=num_blocks_per_layer,
                    use_residual_blocks=use_residual_blocks,
                    random_mask=use_random_masks,
                    activation=activation,
                    dropout_probability=dropout_probability,
                    use_batch_norm=batch_norm_within_layers,
                ))
            if batch_norm_between_layers:
                layers.append(transforms.BatchNorm(features))

        super().__init__(
            transform=transforms.CompositeTransform(layers),
            distribution=distributions.StandardNormal([features]),
        )
예제 #7
0
 def test_sample(self):
     num_samples = 10
     context_size = 20
     input_shape = [2, 3, 4]
     context_shape = [5, 6]
     flow = base.Flow(
         transform=transforms.AffineScalarTransform(scale=2.0),
         distribution=distributions.StandardNormal(input_shape),
     )
     maybe_context = torch.randn(context_size, *context_shape)
     for context in [None, maybe_context]:
         with self.subTest(context=context):
             samples = flow.sample(num_samples, context=context)
             self.assertIsInstance(samples, torch.Tensor)
             if context is None:
                 self.assertEqual(
                     samples.shape, torch.Size([num_samples] + input_shape)
                 )
             else:
                 self.assertEqual(
                     samples.shape,
                     torch.Size([context_size, num_samples] + input_shape),
                 )
예제 #8
0
def get_neural_likelihood(model, parameter_dim, observation_dim):

    if model == "mdn":
        hidden_features = 50
        neural_likelihood = MultivariateGaussianMDN(
            features=observation_dim,
            context_features=parameter_dim,
            hidden_features=hidden_features,
            hidden_net=nn.Sequential(
                nn.Linear(parameter_dim, hidden_features),
                nn.BatchNorm1d(hidden_features),
                nn.ReLU(),
                nn.Dropout(p=0.0),
                nn.Linear(hidden_features, hidden_features),
                nn.BatchNorm1d(hidden_features),
                nn.ReLU(),
                nn.Linear(hidden_features, hidden_features),
                nn.BatchNorm1d(hidden_features),
                nn.ReLU(),
            ),
            num_components=20,
            custom_initialization=True,
        )

    elif model == "made":
        neural_likelihood = MixtureOfGaussiansMADE(
            features=observation_dim,
            hidden_features=50,
            context_features=parameter_dim,
            num_blocks=4,
            num_mixture_components=10,
            use_residual_blocks=True,
            random_mask=False,
            activation=F.relu,
            use_batch_norm=True,
            dropout_probability=0.0,
            custom_initialization=True,
        )

    elif model == "maf":
        transform = transforms.CompositeTransform(
            [
                transforms.CompositeTransform(
                    [
                        transforms.MaskedAffineAutoregressiveTransform(
                            features=observation_dim,
                            hidden_features=50,
                            context_features=parameter_dim,
                            num_blocks=2,
                            use_residual_blocks=False,
                            random_mask=False,
                            activation=torch.tanh,
                            dropout_probability=0.0,
                            use_batch_norm=True,
                        ),
                        transforms.RandomPermutation(features=observation_dim),
                    ]
                )
                for _ in range(5)
            ]
        )
        distribution = distributions_.StandardNormal((observation_dim,))
        neural_likelihood = flows.Flow(transform, distribution)

    elif model == "nsf":
        transform = transforms.CompositeTransform(
            [
                transforms.CompositeTransform(
                    [
                        transforms.PiecewiseRationalQuadraticCouplingTransform(
                            mask=create_alternating_binary_mask(
                                features=observation_dim, even=(i % 2 == 0)
                            ),
                            transform_net_create_fn=lambda in_features, out_features: nn_.ResidualNet(
                                in_features=in_features,
                                out_features=out_features,
                                hidden_features=50,
                                context_features=parameter_dim,
                                num_blocks=2,
                                activation=F.relu,
                                dropout_probability=0.0,
                                use_batch_norm=False,
                            ),
                            num_bins=10,
                            tails="linear",
                            tail_bound=3.0,
                            apply_unconditional_transform=False,
                        ),
                        transforms.LULinear(observation_dim, identity_init=True),
                    ]
                )
                for i in range(5)
            ]
        )
        distribution = distributions_.StandardNormal((observation_dim,))
        neural_likelihood = flows.Flow(transform, distribution)

    else:
        raise ValueError

    return neural_likelihood
예제 #9
0
def get_neural_posterior(model, parameter_dim, observation_dim, simulator):

    # Everything is a flow because we need to normalize parameters based on prior.

    mean, std = simulator.normalization_parameters
    normalizing_transform = transforms.AffineTransform(shift=-mean / std, scale=1 / std)

    if model == "mdn":
        hidden_features = 50
        neural_posterior = MultivariateGaussianMDN(
            features=parameter_dim,
            context_features=observation_dim,
            hidden_features=hidden_features,
            hidden_net=nn.Sequential(
                nn.Linear(observation_dim, hidden_features),
                nn.ReLU(),
                nn.Dropout(p=0.0),
                nn.Linear(hidden_features, hidden_features),
                nn.ReLU(),
                nn.Linear(hidden_features, hidden_features),
                nn.ReLU(),
            ),
            num_components=20,
            custom_initialization=True,
        )

    elif model == "made":
        num_mixture_components = 5
        transform = normalizing_transform
        distribution = distributions_.MADEMoG(
            features=parameter_dim,
            hidden_features=50,
            context_features=observation_dim,
            num_blocks=2,
            num_mixture_components=num_mixture_components,
            use_residual_blocks=True,
            random_mask=False,
            activation=F.relu,
            dropout_probability=0.0,
            use_batch_norm=False,
            custom_initialization=True,
        )
        neural_posterior = flows.Flow(transform, distribution)

    elif model == "maf":
        transform = transforms.CompositeTransform(
            [
                transforms.CompositeTransform(
                    [
                        transforms.MaskedAffineAutoregressiveTransform(
                            features=parameter_dim,
                            hidden_features=50,
                            context_features=observation_dim,
                            num_blocks=2,
                            use_residual_blocks=False,
                            random_mask=False,
                            activation=torch.tanh,
                            dropout_probability=0.0,
                            use_batch_norm=True,
                        ),
                        transforms.RandomPermutation(features=parameter_dim),
                    ]
                )
                for _ in range(5)
            ]
        )

        transform = transforms.CompositeTransform([normalizing_transform, transform,])

        distribution = distributions_.StandardNormal((parameter_dim,))
        neural_posterior = flows.Flow(transform, distribution)

    elif model == "nsf":
        transform = transforms.CompositeTransform(
            [
                transforms.CompositeTransform(
                    [
                        transforms.PiecewiseRationalQuadraticCouplingTransform(
                            mask=create_alternating_binary_mask(
                                features=parameter_dim, even=(i % 2 == 0)
                            ),
                            transform_net_create_fn=lambda in_features, out_features: nn_.ResidualNet(
                                in_features=in_features,
                                out_features=out_features,
                                hidden_features=50,
                                context_features=observation_dim,
                                num_blocks=2,
                                activation=F.relu,
                                dropout_probability=0.0,
                                use_batch_norm=False,
                            ),
                            num_bins=10,
                            tails="linear",
                            tail_bound=3.0,
                            apply_unconditional_transform=False,
                        ),
                        transforms.LULinear(parameter_dim, identity_init=True),
                    ]
                )
                for i in range(5)
            ]
        )

        distribution = distributions_.StandardNormal((parameter_dim,))
        neural_posterior = flows.Flow(transform, distribution)

    else:
        raise ValueError

    return neural_posterior