Beispiel #1
0
def test_natural_normal():
    chol = B.randn(2, 2)
    dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-1))
    nat = NaturalNormal.from_normal(dist)

    # Test properties.
    assert dist.dtype == nat.dtype
    for name in ["dim", "mean", "var", "m2"]:
        approx(getattr(dist, name), getattr(nat, name))

    # Test sampling.
    state = B.create_random_state(dist.dtype, seed=0)
    state, sample = nat.sample(state, num=1_000_000)
    emp_mean = B.mean(B.dense(sample), axis=1, squeeze=False)
    emp_var = (sample - emp_mean) @ (sample - emp_mean).T / 1_000_000
    approx(dist.mean, emp_mean, rtol=5e-2)
    approx(dist.var, emp_var, rtol=5e-2)

    # Test KL.
    chol = B.randn(2, 2)
    other_dist = Normal(B.randn(2, 1), B.reg(chol @ chol.T, diag=1e-2))
    other_nat = NaturalNormal.from_normal(other_dist)
    approx(dist.kl(other_dist), nat.kl(other_nat))

    # Test log-pdf.
    x = B.randn(2, 1)
    approx(dist.logpdf(x), nat.logpdf(x))
def test_combine():
    x1 = B.linspace(0, 2, 10)
    x2 = B.linspace(2, 4, 10)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Matern12(), measure=m)
    y1 = p1(x1).sample()
    y2 = p2(x2).sample()

    # Check the one-argument case.
    assert_equal_normals(combine(p1(x1, 1)), p1(x1, 1))
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)))
    assert_equal_normals(fdd_combined, p1(x1, 1))
    approx(y_combined, y1)

    # Check the two-argument case.
    fdd_combined = combine(p1(x1, 1), p2(x2, 2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)),
                                       (p2(x2, 2), y2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    approx(y_combined, B.concat(y1, y2, axis=0))
Beispiel #3
0
def test_ess():
    # Construct a prior and a likelihood.
    prior = Normal(np.array([[0.6, 0.3], [0.3, 0.6]]))
    lik = Normal(
        np.array([[0.2], [0.3]]),
        np.array([[1, 0.2], [0.2, 1]]),
    )

    # Perform sampling.
    sampler = ESS(lik.logpdf, prior.sample)
    num_samples = 30_000
    samples = B.concat(*sampler.sample(num=num_samples), axis=1)

    samples_mean = B.mean(samples, axis=1)[:, None]
    samples_cov = (
        B.matmul(samples - samples_mean, samples - samples_mean, tr_b=True) /
        num_samples)

    # Compute posterior statistics.
    prec_prior = B.inv(prior.var)
    prec_lik = B.inv(lik.var)
    cov = B.inv(prec_prior + prec_lik)
    mean = cov @ (prec_prior @ prior.mean + prec_lik @ lik.mean)

    approx(samples_cov, cov, atol=5e-2)
    approx(samples_mean, mean, atol=5e-2)
Beispiel #4
0
def test_normal1d_logpdf():
    means = B.randn(3, 3)
    covs = B.randn(3, 3) ** 2
    x = B.randn(3, 3)
    logpdfs = normal1d_logpdf(x, covs, means)
    for i in range(3):
        for j in range(3):
            dist = Normal(means[i : i + 1, j : j + 1], covs[i : i + 1, j : j + 1])
            approx(logpdfs[i, j], dist.logpdf(x[i, j]))
Beispiel #5
0
def objective(vs, m, x_data, y_data, locs):
    """NLML objective.

    Args:
        vs (:class:`varz.Vars`): Variable container.
        m (int): Number of latent processes.
        x_data (tensor): Time stamps of the observations.
        y_data (tensor): Observations.
        locs (tensor): Spatial locations of observations.

    Returns:
        scalar: Negative log-marginal likelihood.
    """
    y_proj, _, S, noises_obs = project(vs, m, y_data, locs)
    xs, noise_obs, noises_latent = model(vs, m)

    # Add contribution of latent processes.
    lml = 0
    for i, (x, y) in enumerate(zip(xs, y_proj)):
        e_signal = GP((noise_obs / S[i] + noises_latent[i]) * Delta(),
                      graph=x.graph)
        lml += (x + e_signal)(x_data).logpdf(y)

        e_noise = GP(noise_obs / S[i] * Delta(), graph=x.graph)
        lml -= e_noise(x_data).logpdf(y)

    # Add regularisation contribution.
    lml += B.sum(Normal(Diagonal(noises_obs)).logpdf(B.transpose(y_data)))

    # Return negative the evidence, normalised by the number of data points.
    n, p = B.shape(y_data)
    return -lml / (n * p)
Beispiel #6
0
    def to_normal(self):
        """Convert to normal distribution parametrised by a mean and variance.

        Returns:
            :class:`stheno.Normal`: Normal distribution parametrised by the a mean
                and variance.
        """
        return Normal(self.mean, self.var)
Beispiel #7
0
def laplace_approximation(f, x_init, f_eval=None):
    """Perform a Laplace approximation of a density.

    Args:
        f (function): Possibly unnormalised log-density.
        x_init (column vector): Starting point to start the optimisation.
        f_eval (function): Use this log-density for the evaluation at the MAP estimate.

    Returns:
        tuple[:class:`stheno.Normal`]: Laplace approximation.
    """
    x = maximum_a_posteriori(f, x_init)
    precision = -hessian(f_eval if f_eval is not None else f, x)
    return Normal(x, closest_psd(precision, inv=True))
Beispiel #8
0
    def sample(self, state: B.RandomState, num: int = 1):
        """Sample.

        Args:
            state (random state): Random state.
            num (int): Number of samples.

        Returns:
            tuple[random state, tensor]: Random state and sample.
        """
        state, noise = Normal(self.prec).sample(state, num)
        sample = B.cholsolve(B.chol(self.prec), B.add(noise, self.lam))
        # Remove the matrix type if there is no structure. This eases working with
        # JITs, which aren't happy with matrix types.
        if not structured(sample):
            sample = B.dense(sample)
        return state, sample
Beispiel #9
0
def test_normaliser():
    # Create test data.
    mat = B.randn(3, 3)
    dist = Normal(B.randn(3, 1), mat @ mat.T)
    y = dist.sample(num=10).T

    # Create normaliser.
    norm = Normaliser(y)
    y_norm = norm.normalise(y)

    # Create distribution of normalised data.
    scale = Diagonal(norm.scale[0])
    dist_norm = Normal(
        B.inv(scale) @ (dist.mean - norm.mean.T),
        B.inv(scale) @ dist.var @ B.inv(scale))

    approx(
        B.sum(dist.logpdf(y.T)),
        B.sum(dist_norm.logpdf(y_norm.T)) + norm.normalise_logdet(y),
    )
Beispiel #10
0
def rand_normal(n=3):
    cov = B.randn(n, n)
    cov = B.mm(cov, cov, tr_b=True)
    return Normal(B.randn(n, 1), cov)
Beispiel #11
0
def elbo(lik, p: Normal, q: Normal, num_samples=1):
    return B.mean(lik(q.sample(num_samples))) - q.kl(p)