Пример #1
0
def test_approximate_multiplication():
    model = Graph()

    # Construct model.
    p1 = GP(EQ(), 20, graph=model)
    p2 = GP(EQ(), 20, graph=model)
    p_prod = p1 * p2
    x = np.linspace(0, 10, 50)

    # Sample functions.
    s1, s2 = model.sample(p1(x), p2(x))

    # Infer product.
    post = p_prod.condition((p1(x), s1), (p2(x), s2))
    yield le, rel_err(post(x).mean, s1 * s2), 1e-2

    # Perform division.
    cur_epsilon = B.epsilon
    B.epsilon = 1e-8
    post = p2.condition((p1(x), s1), (p_prod(x), s1 * s2))
    yield le, rel_err(post(x).mean, s2), 1e-2
    B.epsilon = cur_epsilon

    # Check graph check.
    model2 = Graph()
    p3 = GP(EQ(), graph=model2)
    yield raises, RuntimeError, lambda: p3 * p1
Пример #2
0
def test_multi_logpdf():
    model = Graph()

    p1 = GP(EQ(), graph=model)
    p2 = GP(2 * Exp(), graph=model)
    p3 = p1 + p2

    x1 = np.linspace(0, 2, 5)
    x2 = np.linspace(1, 3, 6)
    x3 = np.linspace(2, 4, 7)

    y1, y2, y3 = model.sample(p1(x1), p2(x2), p3(x3))

    # Test case that only one process is fed.
    yield assert_allclose, p1(x1).logpdf(y1), model.logpdf(p1(x1), y1)
    yield assert_allclose, p1(x1).logpdf(y1), model.logpdf((p1(x1), y1))

    # Test that all inputs must be specified.
    yield raises, ValueError, lambda: model.logpdf((x1, y1), (p2(x2), y2))
    yield raises, ValueError, lambda: model.logpdf((p1(x1), y1), (x2, y2))

    # Compute the logpdf with the product rule.
    logpdf1 = p1(x1).logpdf(y1) + \
              p2(x2).logpdf(y2) + \
              (p3 | ((p1(x1), y1), (p2(x2), y2)))(x3).logpdf(y3)
    logpdf2 = model.logpdf((p1(x1), y1), (p2(x2), y2), (p3(x3), y3))
    yield assert_allclose, logpdf1, logpdf2
Пример #3
0
def test_multi_sample():
    model = Graph()
    p1 = GP(0, 1, graph=model)
    p2 = GP(0, 2, graph=model)
    p3 = GP(0, 3, graph=model)

    x1 = np.linspace(0, 1, 10)
    x2 = np.linspace(0, 1, 20)
    x3 = np.linspace(0, 1, 30)

    s1, s2, s3 = model.sample(p1(x1), p2(x2), p3(x3))

    yield eq, s1.shape, (10, 1)
    yield eq, s2.shape, (20, 1)
    yield eq, s3.shape, (30, 1)
    yield eq, np.shape(model.sample(p1(x1))), (10, 1)

    yield le, abs_err(s1 - 1), 1e-4
    yield le, abs_err(s2 - 2), 1e-4
    yield le, abs_err(s3 - 3), 1e-4
Пример #4
0
def test_multi_sample():
    model = Graph()
    p1 = GP(0, 1, graph=model)
    p2 = GP(0, 2, graph=model)
    p3 = GP(0, 3, graph=model)

    x1 = np.linspace(0, 1, 10)
    x2 = np.linspace(0, 1, 20)
    x3 = np.linspace(0, 1, 30)

    s1, s2, s3 = model.sample(p1(x1), p2(x2), p3(x3))

    assert s1.shape == (10, 1)
    assert s2.shape == (20, 1)
    assert s3.shape == (30, 1)
    assert np.shape(model.sample(p1(x1))) == (10, 1)

    assert abs_err(s1 - 1) <= 1e-4
    assert abs_err(s2 - 2) <= 1e-4
    assert abs_err(s3 - 3) <= 1e-4
Пример #5
0
def test_case_blr():
    model = Graph()
    x = np.linspace(0, 10, 100)

    slope = GP(1, graph=model)
    intercept = GP(1, graph=model)
    f = slope * (lambda x: x) + intercept
    y = f + 1e-2 * GP(Delta(), graph=model)

    # Sample observations, true slope, and intercept.
    y_obs, true_slope, true_intercept = \
        model.sample(y(x), slope(0), intercept(0))

    # Predict.
    post_slope, post_intercept = (slope, intercept) | Obs(y(x), y_obs)
    mean_slope, mean_intercept = post_slope(0).mean, post_intercept(0).mean

    yield le, np.abs(true_slope[0, 0] - mean_slope[0, 0]), 5e-2
    yield le, np.abs(true_intercept[0, 0] - mean_intercept[0, 0]), 5e-2
Пример #6
0
def test_multi_conditioning():
    model = Graph()

    p1 = GP(EQ(), graph=model)
    p2 = GP(2 * Exp().stretch(2), graph=model)
    p3 = GP(.5 * RQ(1e-1).stretch(.5), graph=model)

    p = p1 + p2 + p3

    x1 = np.linspace(0, 2, 10)
    x2 = np.linspace(1, 3, 10)
    x3 = np.linspace(0, 3, 10)

    s1, s2 = model.sample(p1(x1), p2(x2))

    post1 = ((p | (p1(x1), s1)) | ((p2 | (p1(x1), s1))(x2), s2))(x3)
    post2 = (p | ((p1(x1), s1), (p2(x2), s2)))(x3)
    post3 = (p | ((p2(x2), s2), (p1(x1), s1)))(x3)

    yield assert_allclose, post1.mean, post2.mean, 'means 1', 1e-6, 1e-6
    yield assert_allclose, post1.mean, post3.mean, 'means 2', 1e-6, 1e-6
    yield assert_allclose, post1.var, post2.var
    yield assert_allclose, post1.var, post3.var
Пример #7
0
def test_sparse_conditioning():
    model = Graph()
    f = GP(EQ().stretch(3), graph=model)
    e = GP(1e-2 * Delta(), graph=model)
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)

    y = f(x).sample()

    # Test that noise matrix must indeed be diagonal.
    yield raises, RuntimeError, lambda: SparseObs(f(x), f, f(x), y).elbo

    # Test posterior.
    post_sparse = (f | SparseObs(f(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 1', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    post_sparse = (f | SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2))(x_new)
    post_ref = (f | ((2 * f + 2 + e)(x), 2 * y + 2))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 2', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    post_sparse = (f | SparseObs((2 * f + 2)(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 3', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test ELBO.
    e = GP(1e-2 * Delta(), graph=model)
    yield assert_allclose, \
          SparseObs(f(x), e, f(x), y).elbo, \
          (f + e)(x).logpdf(y)
    yield assert_allclose, \
          SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2).elbo, \
          (2 * f + 2 + e)(x).logpdf(2 * y + 2)
    yield assert_allclose, \
          SparseObs((2 * f + 2)(x), e, f(x), y).elbo, \
          (f + e)(x).logpdf(y)

    # Test multiple observations.
    x1 = np.linspace(0, 5, 10)
    x2 = np.linspace(10, 15, 10)
    x_new = np.linspace(6, 9, 10)
    x_ind = np.concatenate((x1, x2, x_new), axis=0)
    y1, y2 = model.sample((f + e)(x1), (f + e)(x2))

    post_sparse = (f | SparseObs(f(x_ind),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    post_ref = (f | Obs(((f + e)(x1), y1), ((f + e)(x2), y2)))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test multiple observations and multiple inducing points.
    post_sparse = (f | SparseObs((f(x1), f(x2), f(x_new)),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 4', 1e-6, 1e-6
    yield assert_allclose, post_sparse.var, post_ref.var

    # Test multiple inducing points.
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)
    x_ind1 = x[:5]
    x_ind2 = x[5:]
    y = model.sample((f + e)(x))

    post_sparse = (f | SparseObs((f(x_ind1), f(x_ind2)), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    yield assert_allclose, post_sparse.mean, post_ref.mean, \
          'means 5', 1e-4, 1e-4
    yield assert_allclose, post_sparse.var, post_ref.var
Пример #8
0
def test_sparse_conditioning():
    model = Graph()
    f = GP(EQ().stretch(3), graph=model)
    e = GP(1e-2 * Delta(), graph=model)
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)

    y = f(x).sample()

    # Test that noise matrix must indeed be diagonal.
    with pytest.raises(RuntimeError):
        SparseObs(f(x), f, f(x), y).elbo

    # Test posterior.
    post_sparse = (f | SparseObs(f(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 1', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    post_sparse = (f | SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2))(x_new)
    post_ref = (f | ((2 * f + 2 + e)(x), 2 * y + 2))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 2', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    post_sparse = (f | SparseObs((2 * f + 2)(x), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 3', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    # Test ELBO.
    e = GP(1e-2 * Delta(), graph=model)
    allclose(SparseObs(f(x), e, f(x), y).elbo, (f + e)(x).logpdf(y))
    allclose(SparseObs(f(x), e, (2 * f + 2)(x), 2 * y + 2).elbo,
             (2 * f + 2 + e)(x).logpdf(2 * y + 2))
    allclose(SparseObs((2 * f + 2)(x), e, f(x), y).elbo, (f + e)(x).logpdf(y))

    # Test multiple observations.
    x1 = np.linspace(0, 5, 10)
    x2 = np.linspace(10, 15, 10)
    x_new = np.linspace(6, 9, 10)
    x_ind = np.concatenate((x1, x2, x_new), axis=0)
    y1, y2 = model.sample((f + e)(x1), (f + e)(x2))

    post_sparse = (f | SparseObs(f(x_ind),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    post_ref = (f | Obs(((f + e)(x1), y1), ((f + e)(x2), y2)))(x_new)
    allclose(post_sparse.mean, post_ref.mean)
    allclose(post_sparse.var, post_ref.var)

    # Test multiple observations and multiple inducing points.
    post_sparse = (f | SparseObs((f(x1), f(x2), f(x_new)),
                                 (e, f(Unique(x1)), y1),
                                 (e, f(Unique(x2)), y2)))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 4', atol=1e-6,
             rtol=1e-6)
    allclose(post_sparse.var, post_ref.var)

    # Test multiple inducing points.
    x = np.linspace(0, 5, 10)
    x_new = np.linspace(6, 10, 10)
    x_ind1 = x[:5]
    x_ind2 = x[5:]
    y = model.sample((f + e)(x))

    post_sparse = (f | SparseObs((f(x_ind1), f(x_ind2)), e, f(x), y))(x_new)
    post_ref = (f | ((f + e)(x), y))(x_new)
    allclose(post_sparse.mean, post_ref.mean, desc='means 5', atol=1e-4,
             rtol=1e-4)
    allclose(post_sparse.var, post_ref.var)

    # Test caching of mean.
    obs = SparseObs(f(x), e, f(x), y)
    mu = obs.mu
    allclose(mu, obs.mu)

    # Test caching of corrective kernel parameter.
    obs = SparseObs(f(x), e, f(x), y)
    A = obs.A
    allclose(A, obs.A)

    # Test caching of elbo.
    obs = SparseObs(f(x), e, f(x), y)
    elbo = obs.elbo
    allclose(elbo, obs.elbo)

    # Test that `Graph.logpdf` takes an `SparseObservations` object.
    obs = SparseObs(f(x), e, f(x), y)
    allclose(model.logpdf(obs), (f + e)(x).logpdf(y))