Пример #1
0
 def __eq__(self, other):
     return B.diag(self) == B.diag(other)
Пример #2
0
 def dim(self):
     """Dimensionality."""
     return B.shape(self.var)[0]
Пример #3
0
 def lmatmul(self, other):
     return Normal(B.dot(B.dot(other, self.var), other, tr_b=True),
                   B.dot(other, self.mean))
Пример #4
0
    def __init__(self, z, e, x, y, ref=None):
        Observations.__init__(self, x, y, ref=ref)

        # Extract processes.
        p_x, x = type_parameter(self.x), self.x.get()
        z = ensure_at(z, self._ref)
        p_z, z = type_parameter(z), z.get()

        # Construct the necessary kernel matrices.
        K_zx = self.graph.kernels[p_z, p_x](z, x)
        K_z = self.graph.kernels[p_z](z)

        # Evaluating `e.kernel(x)` will yield incorrect results if `x` is a
        # `MultiInput`, because `x` then still designates the particular
        # components of `f`. Fix that by instead designating the elements of
        # `e`.
        if isinstance(x, MultiInput):
            x_n = MultiInput(*(p(xi.get())
                               for p, xi in zip(e.kernel.ps, x.get())))
        else:
            x_n = x

        # Construct the noise kernel matrix.
        K_n = e.kernel(x_n)

        # The approximation can only handle diagonal noise matrices.
        if not isinstance(K_n, Diagonal):
            raise RuntimeError('Kernel matrix of noise must be diagonal.')

        # And construct the components for the inducing point approximation.
        L_z = B.cholesky(matrix(K_z))
        A = B.eye_from(K_z) + B.qf(K_n, B.transpose(B.trisolve(L_z, K_zx)))
        y_bar = uprank(self.y) - e.mean(x_n) - self.graph.means[p_x](x)
        prod_y_bar = B.trisolve(L_z, B.qf(K_n, B.transpose(K_zx), y_bar))

        # Compute the optimal mean.
        mean = self.graph.means[p_z](z) + \
               B.qf(A, B.trisolve(L_z, K_z), prod_y_bar)

        # Compute the ELBO.
        # NOTE: The calculation of `trace_part` asserts that `K_n` is diagonal.
        #       The rest, however, is completely generic.
        trace_part = B.ratio(
            Diagonal(self.graph.kernels[p_x].elwise(x)[:, 0]) -
            Diagonal(B.qf_diag(K_z, K_zx)), K_n)
        det_part = B.logdet(2 * B.pi * K_n) + B.logdet(A)
        qf_part = B.qf(K_n, y_bar)[0, 0] - B.qf(A, prod_y_bar)[0, 0]
        elbo = -0.5 * (trace_part + det_part + qf_part)

        # Store relevant quantities.
        self.elbo = elbo
        self.A = A

        # Update observations to reflect pseudo-points.
        self.x = p_z(z)
        self.y = mean
Пример #5
0
 def __eq__(self, other):
     return B.all(self.scale == other.scale) and self[0] == other[0]
Пример #6
0
def test_qf():
    # Generate some test inputs.
    b, c = np.random.randn(5, 3), np.random.randn(5, 3)

    # Generate some matrices to test.
    a = np.random.randn(5, 5)
    a = Dense(a.dot(a.T))
    d = Diagonal(B.diag(a))
    e = np.random.randn(2, 2)
    wb = d + LowRank(left=np.random.randn(5, 2),
                     middle=e.dot(e.T))

    for x in [a, d, wb]:
        allclose(B.qf(x, b), np.linalg.solve(to_np(x), b).T.dot(b))
        allclose(B.qf(x, b, b), B.qf(x, b))
        allclose(B.qf(x, b, c), np.linalg.solve(to_np(x), b).T.dot(c))
        allclose(B.qf_diag(x, b),
                 np.diag(np.linalg.solve(to_np(x), b).T.dot(b)))
        allclose(B.qf_diag(x, b, b), B.qf_diag(x, b, b))
        allclose(B.qf_diag(x, b, c),
                 np.diag(np.linalg.solve(to_np(x), b).T.dot(c)))

    # Test `LowRank`.
    lr = LowRank(np.random.randn(5, 3))
    with pytest.raises(RuntimeError):
        B.qf(lr, b)
    with pytest.raises(RuntimeError):
        B.qf(lr, b, c)
    with pytest.raises(RuntimeError):
        B.qf_diag(lr, b)
    with pytest.raises(RuntimeError):
        B.qf_diag(lr, b, c)
Пример #7
0
def test_shorthands():
    a = Dense(np.random.randn(4, 4))
    allclose(a.T, B.transpose(a))
    allclose(a.__matmul__(a), B.matmul(a, a))
Пример #8
0
 def __call__(self, x):
     return B.concat(*[self(xi) for xi in x.get()], axis=0)
Пример #9
0
def _take_x(k: MultiOutputKernel, x: FDD, mask: B.Numeric):
    if x.p not in k.ps:
        raise ValueError(f"Process {x.p} is not part of the multi-output kernel.")
    return B.take(x, mask)
Пример #10
0
 def elwise(self, x, y):
     if len(x.get()) != len(y.get()):
         raise ValueError('MultiOutputKernel.elwise must be called with '
                          'similarly sized MultiInputs.')
     return B.concat(*[self.elwise(xi, yi)
                       for xi, yi in zip(x.get(), y.get())], axis=0)
Пример #11
0
def to_np(a: AbstractMatrix):
    return B.dense(a)
Пример #12
0
 def __call__(self, x, y):
     return B.block(*[[self(xi, yi) for yi in y.get()] for xi in x.get()])
Пример #13
0
def qf_diag(a, b, c):
    return B.diag(B.qf(a, b, c))
Пример #14
0
def qf_diag(a, b):
    return B.qf_diag(a, b, b)
Пример #15
0
def test_inverse_and_logdet():
    # Test `Dense`.
    a = np.random.randn(3, 3)
    a = Dense(a.dot(a.T))
    allclose(B.matmul(a, B.inverse(a)), np.eye(3))
    allclose(B.matmul(B.inverse(a), a), np.eye(3))
    allclose(B.logdet(a), np.log(np.linalg.det(to_np(a))))

    # Test `Diagonal`.
    d = Diagonal(np.array([1, 2, 3]))
    allclose(B.matmul(d, B.inverse(d)), np.eye(3))
    allclose(B.matmul(B.inverse(d), d), np.eye(3))
    allclose(B.logdet(d), np.log(np.linalg.det(to_np(d))))
    assert B.shape(B.inverse(Diagonal(np.array([1, 2]),
                                      rows=2, cols=4))) == (4, 2)

    # Test `Woodbury`.
    a = np.random.randn(3, 2)
    b = np.random.randn(2, 2) + 1e-2 * np.eye(2)
    wb = d + LowRank(left=a, middle=b.dot(b.T))
    for _ in range(4):
        allclose(B.matmul(wb, B.inverse(wb)), np.eye(3))
        allclose(B.matmul(B.inverse(wb), wb), np.eye(3))
        allclose(B.logdet(wb), np.log(np.linalg.det(to_np(wb))))
        wb = B.inverse(wb)

    # Test `LowRank`.
    with pytest.raises(RuntimeError):
        B.inverse(wb.lr)
    with pytest.raises(RuntimeError):
        B.logdet(wb.lr)
Пример #16
0
def test_normal_1d():
    # Test broadcasting.
    d = Normal1D(1, 0)
    assert type(d.var) == UniformlyDiagonal
    assert B.shape(d.var) == (1, 1)
    assert B.shape(d.mean) == (1, 1)

    d = Normal1D(1, np.array([0, 0, 0]))
    assert type(d.var) == UniformlyDiagonal
    assert B.shape(d.var) == (3, 3)
    assert B.shape(d.mean) == (3, 1)

    d = Normal1D(np.array([1, 2, 3]), 0)
    assert type(d.var) == Diagonal
    assert B.shape(d.var) == (3, 3)
    assert B.shape(d.mean) == (3, 1)

    d = Normal1D(np.array([1, 2, 3]), np.array([0, 0, 0]))
    assert type(d.var) == Diagonal
    assert B.shape(d.var) == (3, 3)
    assert B.shape(d.mean) == (3, 1)

    d = Normal1D(1)
    assert type(d.var) == UniformlyDiagonal
    assert B.shape(d.var) == (1, 1)
    assert B.shape(d.mean) == (1, 1)

    d = Normal1D(np.array([1, 2, 3]))
    assert type(d.var) == Diagonal
    assert B.shape(d.var) == (3, 3)
    assert B.shape(d.mean) == (3, 1)

    with pytest.raises(ValueError):
        Normal1D(np.eye(3))
    with pytest.raises(ValueError):
        Normal1D(np.eye(3), 0)
    with pytest.raises(ValueError):
        Normal1D(1, np.ones((3, 1)))
    with pytest.raises(ValueError):
        Normal1D(np.array([1, 2]), np.ones((3, 1)))
Пример #17
0
def test_lr_diff():
    # First, test correctness.
    a = np.random.randn(3, 2)
    b = np.random.randn(2, 2)
    lr1 = LowRank(left=a, right=np.random.randn(3, 2), middle=b.dot(b.T))
    a = np.random.randn(3, 2)
    b = np.random.randn(2, 2)
    lr2 = LowRank(left=a, right=np.random.randn(3, 2), middle=b.dot(b.T))

    allclose(B.lr_diff(lr1, lr1), B.zeros(3, 3))
    allclose(B.lr_diff(lr1 + lr2, lr1), lr2)
    allclose(B.lr_diff(lr1 + lr2, lr2), lr1)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1), lr1 + lr2)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr2), lr1 + lr1)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1), lr2)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr2), lr1)
    allclose(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1 + lr2), B.zeros(3, 3))

    # Second, test positive definiteness.
    lr1 = LowRank(left=lr1.left, middle=lr1.middle)
    lr2 = LowRank(left=lr2.left, middle=lr2.middle)

    B.cholesky(B.lr_diff(lr1, 0.999 * lr1))
    B.cholesky(B.lr_diff(lr1 + lr2, lr1))
    B.cholesky(B.lr_diff(lr1 + lr2, lr2))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr2))
    B.cholesky(B.lr_diff(lr1 + lr1 + lr2, lr1 + lr1 + 0.999 * lr2))
Пример #18
0
def test_uprank():
    allclose(uprank(0), [[0]])
    allclose(uprank(np.array([0])), [[0]])
    allclose(uprank(np.array([[0]])), [[0]])
    assert type(uprank(Component('test')(0))) == Component('test')

    k = OneKernel()

    assert B.shape(k(0, 0)) == (1, 1)
    assert B.shape(k(0, np.ones(5))) == (1, 5)
    assert B.shape(k(0, np.ones((5, 2)))) == (1, 5)

    assert B.shape(k(np.ones(5), 0)) == (5, 1)
    assert B.shape(k(np.ones(5), np.ones(5))) == (5, 5)
    assert B.shape(k(np.ones(5), np.ones((5, 2)))) == (5, 5)

    assert B.shape(k(np.ones((5, 2)), 0)) == (5, 1)
    assert B.shape(k(np.ones((5, 2)), np.ones(5))) == (5, 5)
    assert B.shape(k(np.ones((5, 2)), np.ones((5, 2)))) == (5, 5)

    with pytest.raises(ValueError):
        k(0, np.ones((5, 2, 1)))
    with pytest.raises(ValueError):
        k(np.ones((5, 2, 1)))

    m = OneMean()

    assert B.shape(m(0)) == (1, 1)
    assert B.shape(m(np.ones(5))) == (5, 1)
    assert B.shape(m(np.ones((5, 2)))) == (5, 1)

    p = GP(EQ(), graph=Graph())
    x = np.linspace(0, 10, 10)

    approx(p.condition(1, 1)(1).mean, np.array([[1]]))
    approx(p.condition(x, x)(x).mean, x[:, None])
    approx(p.condition(x, x[:, None])(x).mean, x[:, None])
Пример #19
0
 def compare(a):
     allclose(B.transpose(a), to_np(a).T)
Пример #20
0
def test_block_matrix():
    dt = np.float64

    # Check correctness.
    rows = [[np.random.randn(4, 3), np.random.randn(4, 5)],
            [np.random.randn(6, 3), np.random.randn(6, 5)]]
    allclose(B.block_matrix(*rows), B.concat2d(*rows))

    # Check that grid is checked correctly.
    assert type(B.block_matrix([Zero(dt, 3, 7), Zero(dt, 3, 4)],
                               [Zero(dt, 4, 5), Zero(dt, 4, 6)])) == Dense
    with pytest.raises(ValueError):
        B.block_matrix([Zero(dt, 5, 5), Zero(dt, 3, 6)],
                       [Zero(dt, 2, 5), Zero(dt, 4, 6)])

    # Test zeros.
    res = B.block_matrix([Zero(dt, 3, 5), Zero(dt, 3, 6)],
                         [Zero(dt, 4, 5), Zero(dt, 4, 6)])
    assert type(res) == Zero
    allclose(res, Zero(dt, 7, 11))

    # Test ones.
    res = B.block_matrix([One(dt, 3, 5), One(dt, 3, 6)],
                         [One(dt, 4, 5), One(dt, 4, 6)])
    assert type(res) == One
    allclose(res, One(dt, 7, 11))

    # Test diagonal.
    res = B.block_matrix([Diagonal(np.array([1, 2])), Zero(dt, 2, 3)],
                         [Zero(dt, 3, 2), Diagonal(np.array([3, 4, 5]))])
    assert type(res) == Diagonal
    allclose(res, Diagonal(np.array([1, 2, 3, 4, 5])))
    # Check that all blocks on the diagonal must be diagonal or zero.
    assert type(B.block_matrix([Diagonal(np.array([1, 2])), Zero(dt, 2, 3)],
                               [Zero(dt, 3, 2), One(dt, 3)])) == Dense
    assert type(B.block_matrix([Diagonal(np.array([1, 2])), Zero(dt, 2, 3)],
                               [Zero(dt, 3, 2), Zero(dt, 3)])) == Diagonal
    # Check that all blocks on the diagonal must be square.
    assert type(B.block_matrix([Diagonal(np.array([1, 2])), Zero(dt, 2, 4)],
                               [Zero(dt, 3, 2), Zero(dt, 3, 4)])) == Dense
    # Check that all other blocks must be zero.
    assert type(B.block_matrix([Diagonal(np.array([1, 2])), One(dt, 2, 3)],
                               [Zero(dt, 3, 2),
                                Diagonal(np.array([3, 4, 5]))])) == Dense
Пример #21
0
def test_eye():
    a = Dense(np.random.randn(5, 10))
    allclose(B.eye(a), np.eye(5, 10))
    allclose(B.eye(a.T), np.eye(10, 5))
Пример #22
0
def test_trace():
    a = np.random.randn(10, 10)
    allclose(B.trace(Dense(a)), np.trace(a))
Пример #23
0
 def ones(x):
     return B.ones([B.shape(x)[0], 1], dtype=B.dtype(x))
Пример #24
0
def test_diag_len():
    assert B.diag_len(np.ones((5, 5))) == 5
    assert B.diag_len(np.ones((10, 5))) == 5
    assert B.diag_len(np.ones((5, 10))) == 5
Пример #25
0
 def dtype(self):
     """Data type."""
     return B.dtype(self.var)
Пример #26
0
def test_matmul():
    diag_square = Diagonal(np.array([1, 2]), 3)
    diag_tall = Diagonal(np.array([3, 4]), 5, 3)
    diag_wide = Diagonal(np.array([5, 6]), 2, 3)

    dense_square = Dense(np.random.randn(3, 3))
    dense_tall = Dense(np.random.randn(5, 3))
    dense_wide = Dense(np.random.randn(2, 3))

    lr = LowRank(left=np.random.randn(5, 2),
                 right=np.random.randn(3, 2),
                 middle=np.random.randn(2, 2))

    def compare(a, b):
        return np.allclose(to_np(B.matmul(a, b)),
                           B.matmul(to_np(a), to_np(b)))

    # Test `Dense`.
    assert compare(dense_wide, dense_tall.T), 'dense w x dense t'

    # Test `LowRank`.
    assert compare(lr, dense_tall.T), 'lr x dense t'
    assert compare(dense_wide, lr.T), 'dense w x lr'
    assert compare(lr, diag_tall.T), 'lr x diag t'
    assert compare(diag_wide, lr.T), 'diag w x lr'
    assert compare(lr, lr.T), 'lr x lr'
    assert compare(lr.T, lr), 'lr x lr (2)'

    # Test `Diagonal`.
    #   Test multiplication between diagonal matrices.
    assert compare(diag_square, diag_square.T), 'diag s x diag s'
    assert compare(diag_tall, diag_square.T), 'diag t x diag s'
    assert compare(diag_wide, diag_square.T), 'diag w x diag s'
    assert compare(diag_square, diag_tall.T), 'diag s x diag t'
    assert compare(diag_tall, diag_tall.T), 'diag t x diag t'
    assert compare(diag_wide, diag_tall.T), 'diag w x diag t'
    assert compare(diag_square, diag_wide.T), 'diag s x diag w'
    assert compare(diag_tall, diag_wide.T), 'diag t x diag w'
    assert compare(diag_wide, diag_wide.T), 'diag w x diag w'

    #   Test multiplication between diagonal and dense matrices.
    assert compare(diag_square, dense_square.T), 'diag s x dense s'
    assert compare(diag_square, dense_tall.T), 'diag s x dense t'
    assert compare(diag_square, dense_wide.T), 'diag s x dense w'
    assert compare(diag_tall, dense_square.T), 'diag t x dense s'
    assert compare(diag_tall, dense_tall.T), 'diag t x dense t'
    assert compare(diag_tall, dense_wide.T), 'diag t x dense w'
    assert compare(diag_wide, dense_square.T), 'diag w x dense s'
    assert compare(diag_wide, dense_tall.T), 'diag w x dense t'
    assert compare(diag_wide, dense_wide.T), 'diag w x dense w'

    assert compare(dense_square, diag_square.T), 'dense s x diag s'
    assert compare(dense_square, diag_tall.T), 'dense s x diag t'
    assert compare(dense_square, diag_wide.T), 'dense s x diag w'
    assert compare(dense_tall, diag_square.T), 'dense t x diag s'
    assert compare(dense_tall, diag_tall.T), 'dense t x diag t'
    assert compare(dense_tall, diag_wide.T), 'dense t x diag w'
    assert compare(dense_wide, diag_square.T), 'dense w x diag s'
    assert compare(dense_wide, diag_tall.T), 'dense w x diag t'
    assert compare(dense_wide, diag_wide.T), 'dense w x diag w'

    # Test `B.matmul` with three matrices simultaneously.
    allclose(B.matmul(dense_tall, dense_square, dense_wide, tr_c=True),
             (to_np(dense_tall)
              .dot(to_np(dense_square))
              .dot(to_np(dense_wide).T)))

    # Test `Woodbury`.
    wb = lr + dense_tall
    assert compare(wb, dense_square.T), 'wb x dense s'
    assert compare(dense_square, wb.T), 'dense s x wb'
    assert compare(wb, wb.T), 'wb x wb'
    assert compare(wb.T, wb), 'wb x wb (2)'
Пример #27
0
 def m2(self):
     """Second moment."""
     return self.var + B.outer(B.squeeze(self.mean))
Пример #28
0
 def compare(a, b):
     return np.allclose(to_np(B.matmul(a, b)),
                        B.matmul(to_np(a), to_np(b)))
Пример #29
0
    def __init__(self, var, mean=None):
        # Consider all various ranks of `var` and `mean` for convenient
        # broadcasting behaviour.
        if mean is not None:
            if B.rank(var) == 1:
                if B.rank(mean) == 0:
                    mean = mean * B.ones(B.dtype(var), B.shape(var)[0], 1)
                elif B.rank(mean) == 1:
                    mean = mean[:, None]
                else:
                    raise ValueError('Invalid rank {} of mean.'
                                     ''.format(B.rank(mean)))
                var = Diagonal(var)

            elif B.rank(var) == 0:
                if B.rank(mean) == 0:
                    mean = mean * B.ones(B.dtype(var), 1, 1)
                    var = UniformlyDiagonal(var, 1)
                elif B.rank(mean) == 1:
                    mean = mean[:, None]
                    var = UniformlyDiagonal(var, B.shape(mean)[0])
                else:
                    raise ValueError('Invalid rank {} of mean.'
                                     ''.format(B.rank(mean)))

            else:
                raise ValueError('Invalid rank {} of variance.'
                                 ''.format(B.rank(var)))
        else:
            if B.rank(var) == 0:
                var = UniformlyDiagonal(var, 1)
            elif B.rank(var) == 1:
                var = Diagonal(var)
            else:
                raise ValueError('Invalid rank {} of variance.'
                                 ''.format(B.rank(var)))

        Normal.__init__(self, var, mean)
Пример #30
0
def qf(a, b, c):
    return B.matmul(b, B.inverse(a), c, tr_a=True)