示例#1
0
def test_ess():
    # Construct a prior and a likelihood.
    prior = Normal(np.array([[0.6, 0.3], [0.3, 0.6]]))
    lik = Normal(
        np.array([[0.2], [0.3]]),
        np.array([[1, 0.2], [0.2, 1]]),
    )

    # Perform sampling.
    sampler = ESS(lik.logpdf, prior.sample)
    num_samples = 30_000
    samples = B.concat(*sampler.sample(num=num_samples), axis=1)

    samples_mean = B.mean(samples, axis=1)[:, None]
    samples_cov = (
        B.matmul(samples - samples_mean, samples - samples_mean, tr_b=True) /
        num_samples)

    # Compute posterior statistics.
    prec_prior = B.inv(prior.var)
    prec_lik = B.inv(lik.var)
    cov = B.inv(prec_prior + prec_lik)
    mean = cov @ (prec_prior @ prior.mean + prec_lik @ lik.mean)

    approx(samples_cov, cov, atol=5e-2)
    approx(samples_mean, mean, atol=5e-2)
示例#2
0
def inv(a: Woodbury):
    diag_inv = B.inv(a.diag)
    # Explicitly computing the inverse is not great numerically, but solving
    # against left or right destroys symmetry, which hinders further algebraic
    # simplifications.
    return B.subtract(
        diag_inv,
        LowRank(B.matmul(diag_inv, a.lr.left), B.matmul(diag_inv, a.lr.right),
                B.inv(B.schur(a))))
示例#3
0
文件: schur.py 项目: wesselb/matrix
def schur(a: Woodbury):
    """Compute the Schur complement associated to a matrix. A Schur complement will need
    to make sense for the type of `a`.

    Args:
        a (matrix): Matrix to compute Schur complement of.

    Returns:
        matrix: Schur complement.
    """
    if a.schur is None:
        second = B.mm(a.lr.right, B.inv(a.diag), a.lr.left, tr_a=True)
        a.schur = B.add(B.inv(a.lr.middle), second)
    return a.schur
示例#4
0
文件: pd_inv.py 项目: wesselb/matrix
def pd_inv(a: Woodbury):
    diag_inv = B.inv(a.diag)
    # See comment in `inv`.
    return B.subtract(
        diag_inv,
        LowRank(
            B.matmul(diag_inv, a.lr.left),
            B.matmul(diag_inv, a.lr.right),
            B.pd_inv(B.pd_schur(a)),
        ),
    )
示例#5
0
def test_cholesky_solve_ut(dense_pd):
    chol = B.cholesky(dense_pd)

    with AssertDenseWarning(
        [
            "solving <upper-triangular> x = <diagonal>",
            "matrix-multiplying <upper-triangular> and <lower-triangular>",
        ]
    ):
        approx(
            B.cholesky_solve(B.transpose(chol), B.eye(chol)),
            B.inv(B.matmul(chol, chol, tr_a=True)),
        )
示例#6
0
    def _project_pattern(self, x, y, pattern):
        # Check whether all data is available.
        no_missing = all(pattern)

        if no_missing:
            # All data is available. Nothing to be done.
            u = self.u
        else:
            # Data is missing. Pick the available entries.
            y = B.take(y, pattern, axis=1)
            # Ensure that `u` remains a structured matrix.
            u = Dense(B.take(self.u, pattern))

        # Get number of data points and outputs in this part of the data.
        n = B.shape(x)[0]
        p = sum(pattern)

        # Perform projection.
        proj_y_partial = B.matmul(y, B.pinv(u), tr_b=True)
        proj_y = B.matmul(proj_y_partial, B.inv(self.s_sqrt), tr_b=True)

        # Compute projected noise.
        u_square = B.matmul(u, u, tr_a=True)
        proj_noise = (
            self.noise_obs / B.diag(self.s_sqrt) ** 2 * B.diag(B.pd_inv(u_square))
        )

        # Convert projected noise to weights.
        noises = self.model.noises
        weights = noises / (noises + proj_noise)
        proj_w = B.ones(B.dtype(weights), n, self.m) * weights[None, :]

        # Compute Frobenius norm.
        frob = B.sum(y ** 2)
        frob = frob - B.sum(proj_y_partial * B.matmul(proj_y_partial, u_square))

        # Compute regularising term.
        reg = 0.5 * (
            n * (p - self.m) * B.log(2 * B.pi * self.noise_obs)
            + frob / self.noise_obs
            + n * B.logdet(B.matmul(u, u, tr_a=True))
            + n * 2 * B.logdet(self.s_sqrt)
        )

        return x, proj_y, proj_w, reg
示例#7
0
def inv(a: Kronecker):
    return Kronecker(B.inv(a.left), B.inv(a.right))
示例#8
0
def inv(a: Dense):
    return Dense(B.inv(a.mat))
示例#9
0
def test_pd_inv_correctness(dense_pd):
    approx(B.pd_inv(dense_pd), B.inv(dense_pd))
示例#10
0
文件: pd_inv.py 项目: wesselb/matrix
def pd_inv(a: Diagonal):
    return B.inv(a)
示例#11
0
def test_cholesky_solve_lt(dense_pd):
    chol = B.cholesky(dense_pd)

    with AssertDenseWarning("solving <lower-triangular> x = <diagonal>"):
        approx(B.cholesky_solve(chol, B.eye(chol)), B.inv(dense_pd))
示例#12
0
def solve(a: Woodbury, b: AbstractMatrix):
    # `B.inv` is optimised with the matrix inversion lemma.
    return B.matmul(B.inv(a), b)
示例#13
0
def solve(a: Diagonal, b: AbstractMatrix):
    return B.matmul(B.inv(a), b)