Пример #1
0
def test_delta():
    k = Delta()
    x1 = np.random.randn(10, 2)
    x2 = np.random.randn(5, 2)

    # Verify that the kernel has the right properties.
    yield eq, k.stationary, True
    yield eq, k.var, 1
    yield eq, k.length_scale, 0
    yield eq, k.period, np.inf
    yield eq, str(k), 'Delta()'

    # Check equality.
    yield eq, Delta(), Delta()
    yield neq, Delta(), Delta(epsilon=k.epsilon * 10)
    yield neq, Delta(), EQ()

    # Check caching.
    yield ok, allclose(k(x1), np.eye(10)), 'same'
    yield ok, allclose(k(x1, x2), np.zeros((10, 5))), 'others'

    # Standard tests:
    for x in kernel_generator(k):
        yield x

    # Test `Unique` inputs.
    yield assert_instance, k(Unique(x1), Unique(x1.copy())), Zero
    yield assert_instance, k(Unique(x1), Unique(x1)), UniformlyDiagonal
    yield assert_instance, k(Unique(x1), x1), Zero
    yield assert_instance, k(x1, Unique(x1)), Zero

    yield assert_instance, k.elwise(Unique(x1), Unique(x1.copy())), Zero
    yield assert_instance, k.elwise(Unique(x1), Unique(x1)), One
    yield assert_instance, k.elwise(Unique(x1), x1), Zero
    yield assert_instance, k.elwise(x1, Unique(x1)), Zero
Пример #2
0
def test_logpdf():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Exp(), measure=m)
    e = GP(Delta(), measure=m)
    p3 = p1 + p2

    x1 = B.linspace(0, 2, 5)
    x2 = B.linspace(1, 3, 6)
    x3 = B.linspace(2, 4, 7)
    y1, y2, y3 = m.sample(p1(x1), p2(x2), p3(x3))

    # Test case that only one process is fed.
    approx(p1(x1).logpdf(y1), m.logpdf(p1(x1), y1))
    approx(p1(x1).logpdf(y1), m.logpdf((p1(x1), y1)))

    # Compute the logpdf with the product rule.
    d1 = m
    d2 = d1 | (p1(x1), y1)
    d3 = d2 | (p2(x2), y2)
    approx(
        d1(p1)(x1).logpdf(y1) + d2(p2)(x2).logpdf(y2) + d3(p3)(x3).logpdf(y3),
        m.logpdf((p1(x1), y1), (p2(x2), y2), (p3(x3), y3)),
    )

    # Check that `Measure.logpdf` allows `Obs` and `SparseObs`.
    obs = Obs(p3(x3), y3)
    approx(m.logpdf(obs), p3(x3).logpdf(y3))
    obs = SparseObs(p3(x3), e, p3(x3), y3)
    approx(m.logpdf(obs), (p3 + e)(x3).logpdf(y3))
Пример #3
0
def test_basic_arithmetic():
    k1 = EQ()
    k2 = RQ(1e-1)
    k3 = Matern12()
    k4 = Matern32()
    k5 = Matern52()
    k6 = Delta()
    k7 = Linear()
    xs1 = np.random.randn(10, 2), np.random.randn(20, 2)
    xs2 = np.random.randn(), np.random.randn()

    yield ok, allclose(k6(xs1[0]), k6(xs1[0], xs1[0])), 'dispatch'
    yield ok, allclose((k1 * k2)(*xs1), k1(*xs1) * k2(*xs1)), 'prod'
    yield ok, allclose((k1 * k2)(*xs2), k1(*xs2) * k2(*xs2)), 'prod 2'
    yield ok, allclose((k3 + k4)(*xs1), k3(*xs1) + k4(*xs1)), 'sum'
    yield ok, allclose((k3 + k4)(*xs2), k3(*xs2) + k4(*xs2)), 'sum 2'
    yield ok, allclose((5. * k5)(*xs1), 5. * k5(*xs1)), 'prod 3'
    yield ok, allclose((5. * k5)(*xs2), 5. * k5(*xs2)), 'prod 4'
    yield ok, allclose((5. + k7)(*xs1), 5. + k7(*xs1)), 'sum 3'
    yield ok, allclose((5. + k7)(*xs2), 5. + k7(*xs2)), 'sum 4'
    yield ok, allclose(k1.stretch(2.)(*xs1), k1(xs1[0] / 2.,
                                                xs1[1] / 2.)), 'stretch'
    yield ok, allclose(k1.stretch(2.)(*xs2), k1(xs2[0] / 2.,
                                                xs2[1] / 2.)), 'stretch 2'
    yield ok, allclose(
        k1.periodic(1.)(*xs1),
        k1.periodic(1.)(xs1[0], xs1[1] + 5.)), 'periodic'
    yield ok, allclose(
        k1.periodic(1.)(*xs2),
        k1.periodic(1.)(xs2[0], xs2[1] + 5.)), 'periodic 2'
Пример #4
0
def test_terms():
    k = EQ() + EQ() * Linear() + RQ(1) * RQ(2) + Delta()
    yield eq, k.num_terms, 4
    yield eq, str(k.term(0)), 'EQ()'
    yield eq, str(k.term(1)), 'EQ() * Linear()'
    yield eq, str(k.term(2)), 'RQ(1) * RQ(2)'
    yield eq, str(k.term(3)), 'Delta()'
    yield raises, IndexError, lambda: k.term(4)
    yield raises, IndexError, lambda: EQ().term(1)
Пример #5
0
def test_terms():
    k = EQ() + EQ() * Linear() + RQ(1) * RQ(2) + Delta()
    assert k.num_terms == 4
    assert str(k.term(0)) == 'EQ()'
    assert str(k.term(1)) == 'EQ() * Linear()'
    assert str(k.term(2)) == 'RQ(1) * RQ(2)'
    assert str(k.term(3)) == 'Delta()'
    with pytest.raises(IndexError):
        k.term(4)
    with pytest.raises(IndexError):
        EQ().term(1)
Пример #6
0
def test_delta_evaluations(x1, x2):
    k = Delta()
    n1 = B.shape(B.uprank(x1))[0]
    n2 = B.shape(B.uprank(x2))[0]

    # Check uniqueness checks.
    allclose(k(x1), B.eye(n1))
    allclose(k(x1, x2), B.zeros(n1, n2))

    # Standard tests:
    standard_kernel_tests(k)

    # Test `Unique` inputs.
    assert isinstance(k(Unique(x1), Unique(x1.copy())), Zero)
    assert isinstance(k(Unique(x1), Unique(x1)), Diagonal)
    assert isinstance(k(Unique(x1), x1), Zero)
    assert isinstance(k(x1, Unique(x1)), Zero)

    allclose(k.elwise(Unique(x1), Unique(x1.copy())), B.zeros(n1, 1))
    allclose(k.elwise(Unique(x1), Unique(x1)), B.ones(n1, 1))
    allclose(k.elwise(Unique(x1), x1), B.zeros(n1, 1))
    allclose(k.elwise(x1, Unique(x1)), B.zeros(n1, 1))
Пример #7
0
def test_factors():
    k = EQ() * Linear()
    yield eq, k.num_factors, 2
    yield eq, str(k.factor(0)), 'EQ()'
    yield eq, str(k.factor(1)), 'Linear()'
    yield raises, IndexError, lambda: k.factor(2)

    k = (EQ() + EQ()) * Delta() * (RQ(1) + Linear())
    yield eq, k.num_factors, 4
    yield eq, str(k.factor(0)), '2'
    yield eq, str(k.factor(1)), 'EQ()'
    yield eq, str(k.factor(2)), 'Delta()'
    yield eq, str(k.factor(3)), 'RQ(1) + Linear()'
    yield raises, IndexError, lambda: k.factor(4)
    yield raises, IndexError, lambda: EQ().factor(1)
Пример #8
0
def test_case_blr():
    m = Measure()
    x = B.linspace(0, 10, 100)

    slope = GP(1, measure=m)
    intercept = GP(1, measure=m)
    f = slope * (lambda x: x) + intercept
    y = f + 1e-2 * GP(Delta(), measure=m)

    # Sample observations, true slope, and intercept.
    y_obs, true_slope, true_intercept = m.sample(y(x), slope(0), intercept(0))

    # Predict.
    post = m | (y(x), y_obs)
    approx(post(slope)(0).mean, true_slope, atol=5e-2)
    approx(post(intercept)(0).mean, true_intercept, atol=5e-2)
Пример #9
0
def test_delta():
    k = Delta()

    # Verify that the kernel has the right properties.
    assert k.stationary
    assert str(k) == 'Delta()'

    # Check equality.
    assert Delta() == Delta()
    assert Delta() != Delta(epsilon=k.epsilon * 10)
    assert Delta() != EQ()
Пример #10
0
def test_factors():
    k = EQ() * Linear()
    assert k.num_factors == 2
    assert str(k.factor(0)) == 'EQ()'
    assert str(k.factor(1)) == 'Linear()'
    with pytest.raises(IndexError):
        k.factor(2)

    k = (EQ() + EQ()) * Delta() * (RQ(1) + Linear())
    assert k.num_factors == 4
    assert str(k.factor(0)) == '2'
    assert str(k.factor(1)) == 'EQ()'
    assert str(k.factor(2)) == 'Delta()'
    assert str(k.factor(3)) == 'RQ(1) + Linear()'
    with pytest.raises(IndexError):
        k.factor(4)
    with pytest.raises(IndexError):
        EQ().factor(1)
Пример #11
0
def test_delta_evaluations(x1, w1, x2, w2):
    k = Delta()
    n1 = num_elements(x1)
    n2 = num_elements(x2)

    # Check uniqueness checks.
    approx(k(x1), B.eye(n1))
    approx(k(x1, x2), B.zeros(n1, n2))

    # Standard tests:
    standard_kernel_tests(k)

    # Test `Unique` inputs.
    assert isinstance(k(Unique(x1), Unique(x1.copy())), Zero)
    assert isinstance(k(Unique(x1), Unique(x1)), Diagonal)
    assert isinstance(k(Unique(x1), x1), Zero)
    assert isinstance(k(x1, Unique(x1)), Zero)

    approx(k.elwise(Unique(x1), Unique(x1.copy())), B.zeros(n1, 1))
    approx(k.elwise(Unique(x1), Unique(x1)), B.ones(n1, 1))
    approx(k.elwise(Unique(x1), x1), B.zeros(n1, 1))

    # Test `WeightedUnique` inputs.
    assert isinstance(k(WeightedUnique(x1, w1), WeightedUnique(x1.copy(), w1)),
                      Zero)
    assert isinstance(k(WeightedUnique(x1, w1), WeightedUnique(x1, w1)),
                      Diagonal)
    assert isinstance(k(WeightedUnique(x1, w1), x1), Zero)
    assert isinstance(k(x1, WeightedUnique(x1, w1)), Zero)

    approx(k.elwise(WeightedUnique(x1, w1), WeightedUnique(x1.copy(), w1)),
           B.zeros(n1, 1))
    approx(k.elwise(WeightedUnique(x1, w1), WeightedUnique(x1, w1)),
           B.ones(n1, 1))
    approx(k.elwise(WeightedUnique(x1, w1), x1), B.zeros(n1, 1))
    approx(k.elwise(x1, WeightedUnique(x1, w1)), B.zeros(n1, 1))
    approx(k.elwise(x1, WeightedUnique(x1, w1)), B.zeros(n1, 1))
Пример #12
0
def test_case_blr():
    model = Graph()
    x = np.linspace(0, 10, 100)

    slope = GP(1, graph=model)
    intercept = GP(1, graph=model)
    f = slope * (lambda x: x) + intercept
    y = f + 1e-2 * GP(Delta(), graph=model)

    # Sample observations, true slope, and intercept.
    y_obs, true_slope, true_intercept = \
        model.sample(y(x), slope(0), intercept(0))

    # Predict.
    post_slope, post_intercept = (slope, intercept) | Obs(y(x), y_obs)
    mean_slope, mean_intercept = post_slope(0).mean, post_intercept(0).mean

    yield le, np.abs(true_slope[0, 0] - mean_slope[0, 0]), 5e-2
    yield le, np.abs(true_intercept[0, 0] - mean_intercept[0, 0]), 5e-2
Пример #13
0
def test_delta():
    k = Delta()

    # Verify that the kernel has the right properties.
    assert k.stationary
    assert k.var == 1
    assert k.length_scale == 0
    assert k.period == np.inf
    assert str(k) == 'Delta()'

    # Check equality.
    assert Delta() == Delta()
    assert Delta() != Delta(epsilon=k.epsilon * 10)
    assert Delta() != EQ()
Пример #14
0
def test_basic_arithmetic():
    k1 = EQ()
    k2 = RQ(1e-1)
    k3 = Matern12()
    k4 = Matern32()
    k5 = Matern52()
    k6 = Delta()
    k7 = Linear()
    xs1 = B.randn(10, 2), B.randn(20, 2)
    xs2 = B.randn(), B.randn()

    approx(k6(xs1[0]), k6(xs1[0], xs1[0]))
    approx((k1 * k2)(*xs1), k1(*xs1) * k2(*xs1))
    approx((k1 * k2)(*xs2), k1(*xs2) * k2(*xs2))
    approx((k3 + k4)(*xs1), k3(*xs1) + k4(*xs1))
    approx((k3 + k4)(*xs2), k3(*xs2) + k4(*xs2))
    approx((5.0 * k5)(*xs1), 5.0 * k5(*xs1))
    approx((5.0 * k5)(*xs2), 5.0 * k5(*xs2))
    approx((5.0 + k7)(*xs1), 5.0 + k7(*xs1))
    approx((5.0 + k7)(*xs2), 5.0 + k7(*xs2))
    approx(k1.stretch(2.0)(*xs1), k1(xs1[0] / 2.0, xs1[1] / 2.0))
    approx(k1.stretch(2.0)(*xs2), k1(xs2[0] / 2.0, xs2[1] / 2.0))
    approx(k1.periodic(1.0)(*xs1), k1.periodic(1.0)(xs1[0], xs1[1] + 5.0))
    approx(k1.periodic(1.0)(*xs2), k1.periodic(1.0)(xs2[0], xs2[1] + 5.0))
Пример #15
0
def test_sparse_conditioning():
    model = Graph()
    f = GP(EQ().stretch(3), graph=model)
    e = GP(1e-2 * Delta(), graph=model)
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)

    y = f(x).sample()

    # Test that noise matrix must indeed be diagonal.
    yield raises, RuntimeError, lambda: SparseObs(f(x), f, f(x), y).elbo

    # Test posterior.
    post_sparse = (f | SparseObs(f(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 1', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    post_sparse = (f | SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2))(x_new)
    post_ref = (f | ((2 * f + 2 + e)(x), 2 * y + 2))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 2', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    post_sparse = (f | SparseObs((2 * f + 2)(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 3', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test ELBO.
    e = GP(1e-2 * Delta(), graph=model)
    yield assert_allclose, \
          SparseObs(f(x), e, f(x), y).elbo, \
          (f + e)(x).logpdf(y)
    yield assert_allclose, \
          SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2).elbo, \
          (2 * f + 2 + e)(x).logpdf(2 * y + 2)
    yield assert_allclose, \
          SparseObs((2 * f + 2)(x), e, f(x), y).elbo, \
          (f + e)(x).logpdf(y)

    # Test multiple observations.
    x1 = np.linspace(0, 5, 10)
    x2 = np.linspace(10, 15, 10)
    x_new = np.linspace(6, 9, 10)
    x_ind = np.concatenate((x1, x2, x_new), axis=0)
    y1, y2 = model.sample((f + e)(x1), (f + e)(x2))

    post_sparse = (f | SparseObs(f(x_ind),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    post_ref = (f | Obs(((f + e)(x1), y1), ((f + e)(x2), y2)))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test multiple observations and multiple inducing points.
    post_sparse = (f | SparseObs((f(x1), f(x2), f(x_new)),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 4', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test multiple inducing points.
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)
    x_ind1 = x[:5]
    x_ind2 = x[5:]
    y = model.sample((f + e)(x))

    post_sparse = (f | SparseObs((f(x_ind1), f(x_ind2)), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 5', 1e-4, 1e-4
    yield assert_allclose, post_sparse.var, post_ref.var
Пример #16
0
def test_sparse_conditioning_and_elbo():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Exp(), measure=m)
    e = GP(Delta(), measure=m)
    p_sum = p1 + p2

    # Sample some data to condition on.
    x1 = B.linspace(0, 2, 2)
    y1 = (p1 + e)(x1).sample()
    x_sum = B.linspace(3, 5, 3)
    y_sum = (p_sum + e)(x_sum).sample()

    # Determine FDDs to check.
    x_check = B.linspace(0, 5, 5)
    fdds_check = [
        cross(p1, p2, p_sum)(x_check),
        p1(x_check),
        p2(x_check),
        p_sum(x_check),
    ]

    # Check conditioning and ELBO on one data set.
    assert_equal_measures(
        fdds_check,
        m | ((p_sum + e)(x_sum), y_sum),
        m | SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum),
        m | SparseObs((p_sum(x_sum), ), e, p_sum(x_sum), y_sum),
        m | SparseObs((p_sum(x_sum), p1(x1)), e, p_sum(x_sum), y_sum),
        m | SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum)),
        m | SparseObs((p_sum(x_sum), ), (e, p_sum(x_sum), y_sum)),
        m.condition(
            SparseObs(
                (p_sum(x_sum), p1(x1)),
                (e, p_sum(x_sum), y_sum),
            )),
    )
    approx(
        m.logpdf(Obs((p_sum + e)(x_sum), y_sum)),
        SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum)).elbo(m),
    )

    # Check conditioning and ELBO on two data sets.
    assert_equal_measures(
        fdds_check,
        m | (((p_sum + e)(x_sum), y_sum), ((p1 + e)(x1), y1)),
        m.condition(
            SparseObs((p_sum(x_sum), p1(x1)), (e, p_sum(x_sum), y_sum),
                      (e, p1(x1), y1))),
    )
    approx(
        m.logpdf(Obs(((p_sum + e)(x_sum), y_sum), ((p1 + e)(x1), y1))),
        SparseObs((p_sum(x_sum), p1(x1)), (e, p_sum(x_sum), y_sum),
                  (e, p1(x1), y1)).elbo(m),
    )

    # The following lose information, so check them separately.
    assert_equal_measures(
        fdds_check,
        m | SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum), (e, p1(x1), y1)),
        m | SparseObs((p_sum(x_sum), ), (e, p_sum(x_sum), y_sum),
                      (e, p1(x1), y1)),
    )

    # Test lazy computation.
    obs = SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum)
    for name in ["K_z", "elbo", "mu", "A"]:
        approx(
            getattr(SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum), name)(m),
            getattr(obs, name)(m),
        )

    # Test requirement that noise must be diagonal.
    with pytest.raises(RuntimeError):
        SparseObs(p_sum(x_sum), p_sum, p_sum(x_sum), y_sum).elbo(m)
Пример #17
0
def test_sparse_conditioning():
    model = Graph()
    f = GP(EQ().stretch(3), graph=model)
    e = GP(1e-2 * Delta(), graph=model)
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)

    y = f(x).sample()

    # Test that noise matrix must indeed be diagonal.
    with pytest.raises(RuntimeError):
        SparseObs(f(x), f, f(x), y).elbo

    # Test posterior.
    post_sparse = (f | SparseObs(f(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 1', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    post_sparse = (f | SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2))(x_new)
    post_ref = (f | ((2 * f + 2 + e)(x), 2 * y + 2))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 2', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    post_sparse = (f | SparseObs((2 * f + 2)(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 3', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    # Test ELBO.
    e = GP(1e-2 * Delta(), graph=model)
    allclose(SparseObs(f(x), e, f(x), y).elbo, (f + e)(x).logpdf(y))
    allclose(SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2).elbo,
             (2 * f + 2 + e)(x).logpdf(2 * y + 2))
    allclose(SparseObs((2 * f + 2)(x), e, f(x), y).elbo, (f + e)(x).logpdf(y))

    # Test multiple observations.
    x1 = np.linspace(0, 5, 10)
    x2 = np.linspace(10, 15, 10)
    x_new = np.linspace(6, 9, 10)
    x_ind = np.concatenate((x1, x2, x_new), axis=0)
    y1, y2 = model.sample((f + e)(x1), (f + e)(x2))

    post_sparse = (f | SparseObs(f(x_ind),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    post_ref = (f | Obs(((f + e)(x1), y1), ((f + e)(x2), y2)))(x_new)
    allclose(post_sparse.mean, post_ref.mean)
    allclose(post_sparse.var, post_ref.var)

    # Test multiple observations and multiple inducing points.
    post_sparse = (f | SparseObs((f(x1), f(x2), f(x_new)),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 4', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    # Test multiple inducing points.
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)
    x_ind1 = x[:5]
    x_ind2 = x[5:]
    y = model.sample((f + e)(x))

    post_sparse = (f | SparseObs((f(x_ind1), f(x_ind2)), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 5', atol=1e-4,
             rtol=1e-4)
    allclose(post_sparse.var, post_ref.var)

    # Test caching of mean.
    obs = SparseObs(f(x), e, f(x), y)
    mu = obs.mu
    allclose(mu, obs.mu)

    # Test caching of corrective kernel parameter.
    obs = SparseObs(f(x), e, f(x), y)
    A = obs.A
    allclose(A, obs.A)

    # Test caching of elbo.
    obs = SparseObs(f(x), e, f(x), y)
    elbo = obs.elbo
    allclose(elbo, obs.elbo)

    # Test that `Graph.logpdf` takes an `SparseObservations` object.
    obs = SparseObs(f(x), e, f(x), y)
    allclose(model.logpdf(obs), (f + e)(x).logpdf(y))