def __init__(
        self,
        d: int = 1,
        prior=None,
        expand_dims: int = -1,
        name="MultivariateNormalParameter",
    ):

        # Prior
        if prior is None:
            prior = MultivariateNormal(O.zeros([d]), O.eye(d))

        # Transform
        if expand_dims is not None:
            transform = lambda x: O.expand_dims(x, expand_dims)
        else:
            transform = None

        # Initializer and variable transforms
        initializer = {
            "loc": lambda x: xavier([d]),
            "cov": lambda x: xavier([int(d * (d + 1) / 2)]),
        }
        var_transform = {"loc": None, "cov": O.log_cholesky_transform}

        super().__init__(
            posterior=MultivariateNormal,
            prior=prior,
            transform=transform,
            initializer=initializer,
            var_transform=var_transform,
            name=name,
        )
def test_expand_dims():
    """Tests expand_dims"""

    pf.set_backend("pytorch")

    val = torch.randn(3)
    val = ops.expand_dims(val, 1)
    assert val.ndim == 2
    assert val.shape[0] == 3
    assert val.shape[1] == 1

    val = torch.randn(3)
    val = ops.expand_dims(val, 0)
    assert val.ndim == 2
    assert val.shape[0] == 1
    assert val.shape[1] == 3
Example #3
0
 def _sample(self, x, func, ed=None, axis=1):
     """Sample from the model"""
     samples = []
     for x_data, y_data in make_generator(x, test=True):
         if x_data is None:
             samples += [func(self())]
         else:
             samples += [func(self(O.expand_dims(x_data, ed)))]
     return np.concatenate(to_numpy(samples), axis=axis)
Example #4
0
    def elbo_loss(self, x_data, y_data, n: int, n_mc: int):
        """Compute the negative ELBO, scaled to a single sample.

        Parameters
        ----------
        x_data
            The independent variable values (or None if this is a generative
            model)
        y_data
            The dependent variable values
        n : int
            Total number of datapoints in the dataset
        n_mc : int
            Number of MC samples we're taking from the posteriors
        """
        nb = y_data.shape[0]  # number of samples in this batch
        if n_mc > 1:  # first dim is num MC samples if n_mc > 1
            x_data = None if x_data is None else O.expand_dims(x_data, 0)
            y_data = O.expand_dims(y_data, 0)
        log_loss = self.log_likelihood(x_data, y_data) / nb / n_mc
        kl_loss = self.kl_loss() / n + self.kl_loss_batch() / nb
        return self._kl_weight * kl_loss - log_loss
Example #5
0
    def __init__(
        self,
        d: int = 1,
        prior=None,
        expand_dims: int = -1,
        name="MultivariateNormalParameter",
    ):

        # Transformation for scale parameters
        def log_cholesky_transform(x):
            if get_backend() == "pytorch":
                raise NotImplementedError
            else:
                import tensorflow as tf
                import tensorflow_probability as tfp

                E = tfp.math.fill_triangular(x)
                E = tf.linalg.set_diag(E,
                                       tf.exp(tf.linalg.tensor_diag_part(E)))
                return E @ tf.transpose(E)

        # Prior
        if prior is None:
            prior = MultivariateNormal(O.zeros([d]), O.eye(d))

        # Transform
        if expand_dims is not None:
            transform = lambda x: O.expand_dims(x, expand_dims)
        else:
            transform = None

        # Initializer and variable transforms
        initializer = {
            "loc": lambda x: xavier([d]),
            "cov": lambda x: xavier([int(d * (d + 1) / 2)]),
        }
        var_transform = {"loc": None, "cov": log_cholesky_transform}

        super().__init__(
            posterior=MultivariateNormal,
            prior=prior,
            transform=transform,
            initializer=initializer,
            var_transform=var_transform,
            name=name,
        )