Exemplo n.º 1
0
def test_multi_sample():
    m = Measure()
    p1 = GP(1, 0, measure=m)
    p2 = GP(2, 0, measure=m)
    p3 = GP(3, 0, measure=m)

    x1 = B.linspace(0, 1, 5)
    x2 = B.linspace(0, 1, 10)
    x3 = B.linspace(0, 1, 15)

    fdds = (p1(x1), p2(x2), p3(x3))
    s1, s2, s3 = m.sample(*fdds)

    assert B.shape(p1(x1).sample()) == s1.shape == (5, 1)
    assert B.shape(p2(x2).sample()) == s2.shape == (10, 1)
    assert B.shape(p3(x3).sample()) == s3.shape == (15, 1)

    approx(s1, 1 * B.ones(5, 1))
    approx(s2, 2 * B.ones(10, 1))
    approx(s3, 3 * B.ones(15, 1))

    # Test random state.
    state, s11, s21, s31 = m.sample(B.create_random_state(np.float64, seed=0),
                                    *fdds)
    state, s12, s22, s32 = m.sample(B.create_random_state(np.float64, seed=0),
                                    *fdds)
    assert isinstance(state, B.RandomState)
    approx(s11, s12)
    approx(s21, s22)
    approx(s31, s32)
Exemplo n.º 2
0
def test_logpdf():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Exp(), measure=m)
    p3 = p1 + p2

    x1 = B.linspace(0, 2, 5)
    x2 = B.linspace(1, 3, 6)
    x3 = B.linspace(2, 4, 7)
    y1, y2, y3 = m.sample(p1(x1), p2(x2), p3(x3))

    # Test case that only one process is fed.
    approx(p1(x1).logpdf(y1), m.logpdf(p1(x1), y1))
    approx(p1(x1).logpdf(y1), m.logpdf((p1(x1), y1)))

    # Compute the logpdf with the product rule.
    d1 = m
    d2 = d1 | (p1(x1), y1)
    d3 = d2 | (p2(x2), y2)
    approx(
        d1(p1)(x1).logpdf(y1) + d2(p2)(x2).logpdf(y2) + d3(p3)(x3).logpdf(y3),
        m.logpdf((p1(x1), y1), (p2(x2), y2), (p3(x3), y3)),
    )

    # Check that `Measure.logpdf` allows `Obs` and `PseudoObs`.
    obs = Obs(p3(x3), y3)
    approx(m.logpdf(obs), p3(x3).logpdf(y3))
    obs = PseudoObs(p3(x3), p3(x3, 1), y3)
    approx(m.logpdf(obs), p3(x3, 1).logpdf(y3))
Exemplo n.º 3
0
def test_default_measure():
    with Measure() as m1:
        p1 = GP(EQ())

        with Measure() as m2:
            p2 = GP(EQ())

        p3 = GP(EQ())

    p4 = GP(EQ())

    assert p1.measure is m1
    assert p2.measure is m2
    assert p3.measure is m1
    assert p4.measure is not m1
    assert p4.measure is not m2
Exemplo n.º 4
0
def test_blr():
    m = Measure()
    x = B.linspace(0, 10, 100)

    slope = GP(1, measure=m)
    intercept = GP(1, measure=m)
    f = slope * (lambda x: x) + intercept
    y = f + 1e-2 * GP(Delta(), measure=m)

    # Sample observations, true slope, and intercept.
    y_obs, true_slope, true_intercept = m.sample(y(x), slope(0), intercept(0))

    # Predict.
    post = m | (y(x), y_obs)
    approx(post(slope)(0).mean, true_slope, atol=5e-2)
    approx(post(intercept)(0).mean, true_intercept, atol=5e-2)
Exemplo n.º 5
0
def test_additive_model():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(EQ(), measure=m)
    p_sum = p1 + p2

    x = B.linspace(0, 5, 10)
    y1 = p1(x).sample()
    y2 = p2(x).sample()

    # First, test independence:
    assert m.kernels[p2, p1] == ZeroKernel()
    assert m.kernels[p1, p2] == ZeroKernel()

    # Now run through some test cases:
    post = (m | (p1(x), y1)) | (p2(x), y2)
    approx(post(p_sum)(x).mean, y1 + y2)

    post = (m | (p2(x), y2)) | (p1(x), y1)
    approx(post(p_sum)(x).mean, y1 + y2)

    post = (m | (p1(x), y1)) | (p_sum(x), y1 + y2)
    approx(post(p2)(x).mean, y2)

    post = (m | (p_sum(x), y1 + y2)) | (p1(x), y1)
    approx(post(p2)(x).mean, y2)

    post = (m | (p2(x), y2)) | (p_sum(x), y1 + y2)
    approx(post(p1)(x).mean, y1)

    post = (m | (p_sum(x), y1 + y2)) | (p2(x), y2)
    approx(post(p1)(x).mean, y1)
Exemplo n.º 6
0
def test_take_x():
    m = Measure()
    f1 = GP(EQ())
    f2 = GP(EQ())
    k = MultiOutputKernel(m, f1)
    with pytest.raises(ValueError):
        _take_x(k, f2(B.linspace(0, 1, 10)), B.randn(10) > 0)
Exemplo n.º 7
0
def test_manual_new_gp():
    m = Measure()
    p1 = GP(1, EQ(), measure=m)
    p2 = GP(2, EQ(), measure=m)
    p_sum = p1 + p2

    p1_equivalent = m.add_gp(
        m.means[p_sum] - m.means[p2],
        (m.kernels[p_sum] + m.kernels[p2] - m.kernels[p_sum, p2] -
         m.kernels[p2, p_sum]),
        lambda j: m.kernels[p_sum, j] - m.kernels[p2, j],
    )

    x = B.linspace(0, 10, 5)
    s1, s2 = m.sample(p1(x), p1_equivalent(x))
    approx(s1, s2, atol=1e-4)
Exemplo n.º 8
0
def test_conditioning(generate_noise_tuple):
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Exp(), measure=m)
    p_sum = p1 + p2

    # Sample some data to condition on.
    x1 = B.linspace(0, 2, 3)
    n1 = generate_noise_tuple(x1)
    y1 = p1(x1, *n1).sample()
    tup1 = (p1(x1, *n1), y1)
    x_sum = B.linspace(3, 5, 3)
    n_sum = generate_noise_tuple(x_sum)
    y_sum = p_sum(x_sum, *n_sum).sample()
    tup_sum = (p_sum(x_sum, *n_sum), y_sum)

    # Determine FDDs to check.
    x_check = B.linspace(0, 5, 5)
    fdds_check = [
        cross(p1, p2, p_sum)(x_check),
        p1(x_check),
        p2(x_check),
        p_sum(x_check),
    ]

    assert_equal_measures(
        fdds_check,
        m.condition(*tup_sum),
        m.condition(tup_sum),
        m | tup_sum,
        m | (tup_sum, ),
        m | Obs(*tup_sum),
        m | Obs(tup_sum),
    )

    assert_equal_measures(
        fdds_check,
        m.condition(tup1, tup_sum),
        m | (tup1, tup_sum),
        m | Obs(tup1, tup_sum),
    )

    # Check that conditioning gives an FDD and that it is consistent.
    post = m | tup1
    assert isinstance(post(p1(x1, 0.1)), FDD)
    assert_equal_measures(post(p1(x1, 0.1)), post(p1)(x1, 0.1))
Exemplo n.º 9
0
def test_sample_correct_measure():
    m = Measure()
    p1 = GP(1, EQ(), measure=m)

    post = m | (p1(0), 1)

    # Test that `post.sample` indeed samples under `post`.
    approx(post.sample(10, p1(0)), B.ones(1, 10), atol=1e-4)
Exemplo n.º 10
0
def test_multi_sample():
    m = Measure()
    p1 = GP(1, 0, measure=m)
    p2 = GP(2, 0, measure=m)
    p3 = GP(3, 0, measure=m)

    x1 = B.linspace(0, 1, 5)
    x2 = B.linspace(0, 1, 10)
    x3 = B.linspace(0, 1, 15)

    s1, s2, s3 = m.sample(p1(x1), p2(x2), p3(x3))

    assert B.shape(p1(x1).sample()) == s1.shape == (5, 1)
    assert B.shape(p2(x2).sample()) == s2.shape == (10, 1)
    assert B.shape(p3(x3).sample()) == s3.shape == (15, 1)

    approx(s1, 1 * B.ones(5, 1))
    approx(s2, 2 * B.ones(10, 1))
    approx(s3, 3 * B.ones(15, 1))
Exemplo n.º 11
0
def test_approximate_multiplication():
    m = Measure()
    p1 = GP(20, EQ(), measure=m)
    p2 = GP(20, EQ(), measure=m)
    p_prod = p1 * p2

    # Sample functions.
    x = B.linspace(0, 10, 50)
    s1, s2 = m.sample(p1(x), p2(x))

    # Perform product.
    post = m | ((p1(x), s1), (p2(x), s2))
    approx(post(p_prod)(x).mean, s1 * s2, rtol=1e-2)

    # Perform division.
    cur_epsilon = B.epsilon
    B.epsilon = 1e-8
    post = m | ((p1(x), s1), (p_prod(x), s1 * s2))
    approx(post(p2)(x).mean, s2, rtol=1e-2)
    B.epsilon = cur_epsilon
Exemplo n.º 12
0
def test_conditioning_consistency():
    m = Measure()
    p = GP(EQ(), measure=m)
    e = GP(0.1 * Delta(), measure=m)
    e2 = GP(e.kernel, measure=m)

    x = B.linspace(0, 5, 10)
    y = (p + e)(x).sample()

    post1 = m | ((p + e)(x), y)
    post2 = m | (p(x, 0.1), y)

    assert_equal_measures([p(x), (p + e2)(x)], post1, post2)
    with pytest.raises(AssertionError):
        assert_equal_normals(post1((p + e)(x)), post2((p + e)(x)))
Exemplo n.º 13
0
def test_stationarity():
    m = Measure()

    p1 = GP(EQ(), measure=m)
    p2 = GP(EQ().stretch(2), measure=m)
    p3 = GP(EQ().periodic(10), measure=m)

    p = p1 + 2 * p2

    assert p.stationary

    p = p3 + p

    assert p.stationary

    p = p + GP(Linear(), measure=m)

    assert not p.stationary
Exemplo n.º 14
0
def test_mo_batched():
    x = B.randn(16, 10, 1)

    with Measure():
        p = cross(GP(1, 2 * EQ().stretch(0.5)), GP(2, 2 * EQ().stretch(0.5)))
    y = p(x).sample()
    logpdf = p(x, 0.1).logpdf(y)

    assert B.shape(logpdf) == (16, )
    assert B.shape(y) == (16, 20, 1)

    p = p | (p(x), y)
    y2 = p(x).sample()
    logpdf2 = p(x, 0.1).logpdf(y)

    assert B.shape(y2) == (16, 20, 1)
    assert B.shape(logpdf2) == (16, )
    assert B.all(logpdf2 > logpdf)
    approx(y, y2, atol=1e-5)
Exemplo n.º 15
0
def test_naming():
    m = Measure()

    p1 = GP(EQ(), 1, measure=m)
    p2 = GP(EQ(), 2, measure=m)

    # Test setting and getting names.
    p1.name = "name"

    assert m["name"] is p1
    assert p1.name == "name"
    assert m[p1] == "name"
    with pytest.raises(KeyError):
        m["other_name"]
    with pytest.raises(KeyError):
        m[p2]

    # Check that names can not be doubly assigned.
    def doubly_assign():
        p2.name = "name"

    with pytest.raises(RuntimeError):
        doubly_assign()

    # Move name to other GP.
    p1.name = "other_name"
    p2.name = "name"

    # Check that everything has been properly assigned.
    assert m["name"] is p2
    assert p2.name == "name"
    assert m[p2] == "name"
    assert m["other_name"] is p1
    assert p1.name == "other_name"
    assert m[p1] == "other_name"

    # Test giving a name to the constructor.
    p3 = GP(EQ(), name="yet_another_name", measure=m)
    assert m["yet_another_name"] is p3
    assert p3.name == "yet_another_name"
    assert m[p3] == "yet_another_name"
Exemplo n.º 16
0
def test_mom():
    x = B.linspace(0, 1, 10)

    prior = Measure()
    p1 = GP(lambda x: 2 * x, 1 * EQ(), measure=prior)
    p2 = GP(1, 2 * EQ().stretch(2), measure=prior)

    m = MultiOutputMean(prior, p1, p2)
    ms = prior.means

    # Check dimensionality.
    assert dimensionality(m) == 2

    # Check representation.
    assert str(m) == "MultiOutputMean(<lambda>, 1)"

    # Check computation.
    approx(m(x), B.concat(ms[p1](x), ms[p2](x), axis=0))
    approx(m(p1(x)), ms[p1](x))
    approx(m(p2(x)), ms[p2](x))
    approx(m((p2(x), p1(x))), B.concat(ms[p2](x), ms[p1](x), axis=0))
Exemplo n.º 17
0
def test_measure_groups():
    prior = Measure()
    f1 = GP(EQ(), measure=prior)
    f2 = GP(EQ(), measure=prior)

    assert f1._measures == f2._measures

    x = B.linspace(0, 5, 10)
    y = f1(x).sample()

    post = prior | (f1(x), y)

    assert f1._measures == f2._measures == [prior, post]

    # Further extend the prior.

    f_sum = f1 + f2
    assert f_sum._measures == [prior, post]

    f3 = GP(EQ(), measure=prior)
    f_sum = f1 + f3
    assert f3._measures == f_sum._measures == [prior]

    with pytest.raises(AssertionError):
        post(f1) + f3

    # Extend the posterior.

    f_sum = post(f1) + post(f2)
    assert f_sum._measures == [post]

    f3 = GP(EQ(), measure=post)
    f_sum = post(f1) + f3
    assert f3._measures == f_sum._measures == [post]

    with pytest.raises(AssertionError):
        f1 + f3
Exemplo n.º 18
0
def test_formatting():
    p = 2 * GP(1, EQ(), measure=Measure())
    assert str(p.display(lambda x: x ** 2)) == "GP(4 * 1, 16 * EQ())"
Exemplo n.º 19
0
def test_mok():
    x1 = B.linspace(0, 1, 10)
    x2 = B.linspace(1, 2, 5)
    x3 = B.linspace(1, 2, 10)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)

    k = MultiOutputKernel(m, p1, p2)
    ks = m.kernels

    # Check dimensionality.
    assert dimensionality(k) == 2

    # Check representation.
    assert str(k) == "MultiOutputKernel(EQ(), 2 * (EQ() > 2))"

    # Input versus input:
    approx(
        k(x1, x2),
        B.concat2d(
            [ks[p1, p1](x1, x2), ks[p1, p2](x1, x2)],
            [ks[p2, p1](x1, x2), ks[p2, p2](x1, x2)],
        ),
    )
    approx(
        k.elwise(x1, x3),
        B.concat(ks[p1, p1].elwise(x1, x3), ks[p2, p2].elwise(x1, x3), axis=0),
    )

    # Input versus `FDD`:
    approx(k(p1(x1), x2), B.concat(ks[p1, p1](x1, x2), ks[p1, p2](x1, x2), axis=1))
    approx(k(p2(x1), x2), B.concat(ks[p2, p1](x1, x2), ks[p2, p2](x1, x2), axis=1))
    approx(k(x1, p1(x2)), B.concat(ks[p1, p1](x1, x2), ks[p2, p1](x1, x2), axis=0))
    approx(k(x1, p2(x2)), B.concat(ks[p1, p2](x1, x2), ks[p2, p2](x1, x2), axis=0))
    with pytest.raises(ValueError):
        k.elwise(x1, p2(x3))
    with pytest.raises(ValueError):
        k.elwise(p1(x1), x3)

    # `FDD` versus `FDD`:
    approx(k(p1(x1), p1(x2)), ks[p1](x1, x2))
    approx(k(p1(x1), p2(x2)), ks[p1, p2](x1, x2))
    approx(k.elwise(p1(x1), p1(x3)), ks[p1].elwise(x1, x3))
    approx(k.elwise(p1(x1), p2(x3)), ks[p1, p2].elwise(x1, x3))

    # Multiple inputs versus input:
    approx(
        k((p2(x1), p1(x2)), x1),
        B.concat2d(
            [ks[p2, p1](x1, x1), ks[p2, p2](x1, x1)],
            [ks[p1, p1](x2, x1), ks[p1, p2](x2, x1)],
        ),
    )
    approx(
        k(x1, (p2(x1), p1(x2))),
        B.concat2d(
            [ks[p1, p2](x1, x1), ks[p1, p1](x1, x2)],
            [ks[p2, p2](x1, x1), ks[p2, p1](x1, x2)],
        ),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), p2(x1))
    with pytest.raises(ValueError):
        k.elwise(p2(x1), (p2(x1), p1(x3)))

    # Multiple inputs versus `FDD`:
    approx(
        k((p2(x1), p1(x2)), p2(x1)),
        B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0),
    )
    approx(
        k(p2(x1), (p2(x1), p1(x2))),
        B.concat(ks[p2, p2](x1, x1), ks[p2, p1](x1, x2), axis=1),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), p2(x1))
    with pytest.raises(ValueError):
        k.elwise(p2(x1), (p2(x1), p1(x3)))

    # Multiple inputs versus multiple inputs:
    approx(
        k((p2(x1), p1(x2)), (p2(x1))),
        B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0),
    )
    with pytest.raises(ValueError):
        k.elwise((p2(x1), p1(x3)), (p2(x1),))
    approx(
        k.elwise((p2(x1), p1(x3)), (p2(x1), p1(x3))),
        B.concat(ks[p2, p2].elwise(x1, x1), ks[p1, p1].elwise(x3, x3), axis=0),
    )
Exemplo n.º 20
0
def test_pseudo_conditioning_and_elbo(generate_noise_tuple):
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Exp(), measure=m)
    p_sum = p1 + p2

    # Sample some data to condition on.
    x1 = B.linspace(0, 2, 3)
    n1 = generate_noise_tuple(x1)
    y1 = p1(x1, *n1).sample()
    tup1 = (p1(x1, *n1), y1)
    x_sum = B.linspace(3, 5, 3)
    n_sum = generate_noise_tuple(x_sum)
    y_sum = p_sum(x_sum, *n_sum).sample()
    tup_sum = (p_sum(x_sum, *n_sum), y_sum)

    # Determine FDDs to check.
    x_check = B.linspace(0, 5, 5)
    fdds_check = [
        cross(p1, p2, p_sum)(x_check),
        p1(x_check),
        p2(x_check),
        p_sum(x_check),
    ]

    # Check conditioning and ELBO on one data set.
    assert_equal_measures(
        fdds_check,
        m | tup_sum,
        m | PseudoObs(p_sum(x_sum), *tup_sum),
        m | PseudoObs((p_sum(x_sum), ), *tup_sum),
        m | PseudoObs((p_sum(x_sum), p1(x1)), *tup_sum),
        m | PseudoObs(p_sum(x_sum), tup_sum),
        m | PseudoObs((p_sum(x_sum), ), tup_sum),
        m.condition(PseudoObs((p_sum(x_sum), p1(x1)), tup_sum)),
    )
    approx(
        m.logpdf(Obs(*tup_sum)),
        PseudoObs(p_sum(x_sum), tup_sum).elbo(m),
    )

    # Check conditioning and ELBO on two data sets.
    assert_equal_measures(
        fdds_check,
        m | (tup_sum, tup1),
        m.condition(PseudoObs((p_sum(x_sum), p1(x1)), tup_sum, tup1)),
    )
    approx(
        m.logpdf(Obs(tup_sum, tup1)),
        PseudoObs((p_sum(x_sum), p1(x1)), tup_sum, tup1).elbo(m),
    )

    # The following lose information, so check them separately.
    assert_equal_measures(
        fdds_check,
        m | PseudoObs(p_sum(x_sum), tup_sum, tup1),
        m | PseudoObs((p_sum(x_sum), ), tup_sum, tup1),
    )

    # Test caching.
    for name in ["K_z", "elbo", "mu", "A"]:
        obs = PseudoObs(p_sum(x_sum), *tup_sum)
        assert getattr(obs, name)(m) is getattr(obs, name)(m)

    # Test requirement that noise must be diagonal.
    with pytest.raises(RuntimeError):
        PseudoObs(p_sum(x_sum), (p_sum(x_sum,
                                       p_sum(x_sum).var), y_sum)).elbo(m)

    # Test that noise on inducing points loses information.
    with pytest.raises(AssertionError):
        assert_equal_measures(
            fdds_check,
            m | tup_sum,
            m | PseudoObs(p_sum(x_sum, 0.1), *tup_sum),
        )