Пример #1
0
def test_selection():
    model = Graph()

    # Test construction:
    p = GP(EQ(), TensorProductMean(lambda x: x ** 2), graph=model)
    yield eq, str(p.select(1)), 'GP(EQ() : [1], <lambda> : [1])'
    yield eq, str(p.select(1, 2)), 'GP(EQ() : [1, 2], <lambda> : [1, 2])'

    # Test case:
    p = GP(EQ(), graph=model)  # 1D
    p2 = p.select(0)  # 2D

    n = 5
    x = np.linspace(0, 10, n)[:, None]
    x1 = np.concatenate((x, np.random.randn(n, 1)), axis=1)
    x2 = np.concatenate((x, np.random.randn(n, 1)), axis=1)
    y = p2(x).sample()

    post = p.condition(p2(x1), y)
    yield assert_allclose, post(x).mean, y
    yield le, abs_err(B.diag(post(x).var)), 1e-10

    post = p.condition(p2(x2), y)
    yield assert_allclose, post(x).mean, y
    yield le, abs_err(B.diag(post(x).var)), 1e-10

    post = p2.condition(p(x), y)
    yield assert_allclose, post(x1).mean, y
    yield assert_allclose, post(x2).mean, y
    yield le, abs_err(B.diag(post(x1).var)), 1e-10
    yield le, abs_err(B.diag(post(x2).var)), 1e-10
Пример #2
0
def test_approximate_multiplication():
    model = Graph()

    # Construct model.
    p1 = GP(EQ(), 20, graph=model)
    p2 = GP(EQ(), 20, graph=model)
    p_prod = p1 * p2
    x = np.linspace(0, 10, 50)

    # Sample functions.
    s1, s2 = model.sample(p1(x), p2(x))

    # Infer product.
    post = p_prod.condition((p1(x), s1), (p2(x), s2))
    yield le, rel_err(post(x).mean, s1 * s2), 1e-2

    # Perform division.
    cur_epsilon = B.epsilon
    B.epsilon = 1e-8
    post = p2.condition((p1(x), s1), (p_prod(x), s1 * s2))
    yield le, rel_err(post(x).mean, s2), 1e-2
    B.epsilon = cur_epsilon

    # Check graph check.
    model2 = Graph()
    p3 = GP(EQ(), graph=model2)
    yield raises, RuntimeError, lambda: p3 * p1
Пример #3
0
def test_derivative():
    # Test construction:
    p = GP(EQ(), TensorProductMean(lambda x: x ** 2), graph=Graph())
    yield eq, str(p.diff(1)), 'GP(d(1) EQ(), d(1) <lambda>)'

    # Test case:
    B.backend_to_tf()
    s = B.Session()

    model = Graph()
    x = np.linspace(0, 1, 100)[:, None]
    y = 2 * x

    p = GP(EQ(), graph=model)
    dp = p.diff()

    # Test conditioning on function.
    yield le, abs_err(s.run(dp.condition(p(x), y)(x).mean - 2)), 1e-3

    # Test conditioning on derivative.
    post = p.condition((B.cast(0., np.float64), B.cast(0., np.float64)),
                       (dp(x), y))
    yield le, abs_err(s.run(post(x).mean - x ** 2)), 1e-3

    s.close()
    B.backend_to_np()
Пример #4
0
def test_case_reflection():
    model = Graph()
    p = GP(EQ(), graph=model)
    p2 = 5 - p

    x = np.linspace(0, 1, 10)[:, None]
    y = p(x).sample()

    yield le, abs_err(p2.condition(p(x), y)(x).mean - (5 - y)), 1e-5
    yield le, abs_err(p.condition(p2(x), 5 - y)(x).mean - y), 1e-5

    model = Graph()
    p = GP(EQ(), graph=model)
    p2 = -p

    x = np.linspace(0, 1, 10)[:, None]
    y = p(x).sample()

    yield le, abs_err(p2.condition(p(x), y)(x).mean + y), 1e-5
    yield le, abs_err(p.condition(p2(x), -y)(x).mean - y), 1e-5
Пример #5
0
def test_uprank():
    allclose(uprank(0), [[0]])
    allclose(uprank(np.array([0])), [[0]])
    allclose(uprank(np.array([[0]])), [[0]])
    assert type(uprank(Component('test')(0))) == Component('test')

    k = OneKernel()

    assert B.shape(k(0, 0)) == (1, 1)
    assert B.shape(k(0, np.ones(5))) == (1, 5)
    assert B.shape(k(0, np.ones((5, 2)))) == (1, 5)

    assert B.shape(k(np.ones(5), 0)) == (5, 1)
    assert B.shape(k(np.ones(5), np.ones(5))) == (5, 5)
    assert B.shape(k(np.ones(5), np.ones((5, 2)))) == (5, 5)

    assert B.shape(k(np.ones((5, 2)), 0)) == (5, 1)
    assert B.shape(k(np.ones((5, 2)), np.ones(5))) == (5, 5)
    assert B.shape(k(np.ones((5, 2)), np.ones((5, 2)))) == (5, 5)

    with pytest.raises(ValueError):
        k(0, np.ones((5, 2, 1)))
    with pytest.raises(ValueError):
        k(np.ones((5, 2, 1)))

    m = OneMean()

    assert B.shape(m(0)) == (1, 1)
    assert B.shape(m(np.ones(5))) == (5, 1)
    assert B.shape(m(np.ones((5, 2)))) == (5, 1)

    p = GP(EQ(), graph=Graph())
    x = np.linspace(0, 10, 10)

    approx(p.condition(1, 1)(1).mean, np.array([[1]]))
    approx(p.condition(x, x)(x).mean, x[:, None])
    approx(p.condition(x, x[:, None])(x).mean, x[:, None])
Пример #6
0
def test_uprank():
    yield assert_allclose, uprank(0), [[0]]
    yield assert_allclose, uprank(np.array([0])), [[0]]
    yield assert_allclose, uprank(np.array([[0]])), [[0]]
    yield eq, type(uprank(Component('test')(0))), Component('test')

    k = OneKernel()

    yield eq, B.shape(k(0, 0)), (1, 1)
    yield eq, B.shape(k(0, np.ones(5))), (1, 5)
    yield eq, B.shape(k(0, np.ones((5, 2)))), (1, 5)

    yield eq, B.shape(k(np.ones(5), 0)), (5, 1)
    yield eq, B.shape(k(np.ones(5), np.ones(5))), (5, 5)
    yield eq, B.shape(k(np.ones(5), np.ones((5, 2)))), (5, 5)

    yield eq, B.shape(k(np.ones((5, 2)), 0)), (5, 1)
    yield eq, B.shape(k(np.ones((5, 2)), np.ones(5))), (5, 5)
    yield eq, B.shape(k(np.ones((5, 2)), np.ones((5, 2)))), (5, 5)

    yield raises, ValueError, lambda: k(0, np.ones((5, 2, 1)))
    yield raises, ValueError, lambda: k(np.ones((5, 2, 1)))

    m = OneMean()

    yield eq, B.shape(m(0)), (1, 1)
    yield eq, B.shape(m(np.ones(5))), (5, 1)
    yield eq, B.shape(m(np.ones((5, 2)))), (5, 1)

    p = GP(EQ(), graph=Graph())
    x = np.linspace(0, 10, 10)

    yield assert_approx_equal, p.condition(1, 1)(1).mean, np.array([[1]])
    yield assert_array_almost_equal, p.condition(x, x)(x).mean, x[:, None]
    yield assert_array_almost_equal, p.condition(x, x[:, None])(x).mean, \
          x[:, None]
Пример #7
0
def test_derivative():
    # Test construction:
    p = GP(EQ(), TensorProductMean(lambda x: x ** 2), graph=Graph())
    assert str(p.diff(1)) == 'GP(d(1) EQ(), d(1) <lambda>)'

    # Test case:
    model = Graph()
    x = B.linspace(tf.float64, 0, 1, 100)[:, None]
    y = 2 * x

    p = GP(EQ(), graph=model)
    dp = p.diff()

    # Test conditioning on function.
    assert abs_err(dp.condition(p(x), y)(x).mean, 2) <= 1e-3

    # Test conditioning on derivative.
    post = p.condition((B.cast(tf.float64, 0),
                        B.cast(tf.float64, 0)), (dp(x), y))
    assert abs_err(post(x).mean, x ** 2) <= 1e-3
Пример #8
0
def test_case_approximate_derivative():
    model = Graph()
    x = np.linspace(0, 1, 100)[:, None]
    y = 2 * x

    p = GP(EQ().stretch(1.), graph=model)
    dp = p.diff_approx()

    # Test conditioning on function.
    yield le, abs_err(dp.condition(p(x), y)(x).mean - 2), 1e-3

    # Add some regularisation for this test case.
    orig_epsilon = B.epsilon
    B.epsilon = 1e-10

    # Test conditioning on derivative.
    post = p.condition((0, 0), (dp(x), y))
    yield le, abs_err(post(x).mean - x ** 2), 1e-3

    # Set regularisation back.
    B.epsilon = orig_epsilon
Пример #9
0
def test_shifting():
    model = Graph()

    # Test construction:
    p = GP(Linear(), TensorProductMean(lambda x: x ** 2), graph=model)
    yield eq, str(p.shift(1)), 'GP(Linear() shift 1, <lambda> shift 1)'

    # Test case:
    p = GP(EQ(), graph=model)
    p2 = p.shift(5)

    n = 5
    x = np.linspace(0, 10, n)[:, None]
    y = p2(x).sample()

    post = p.condition(p2(x), y)
    yield assert_allclose, post(x - 5).mean, y
    yield le, abs_err(B.diag(post(x - 5).var)), 1e-10

    post = p2.condition(p(x), y)
    yield assert_allclose, post(x + 5).mean, y
    yield le, abs_err(B.diag(post(x + 5).var)), 1e-10
Пример #10
0
def test_stretching():
    model = Graph()

    # Test construction:
    p = GP(EQ(), TensorProductMean(lambda x: x ** 2), graph=model)
    assert str(p.stretch(1)) == 'GP(EQ() > 1, <lambda> > 1)'

    # Test case:
    p = GP(EQ(), graph=model)
    p2 = p.stretch(5)

    n = 5
    x = np.linspace(0, 10, n)[:, None]
    y = p2(x).sample()

    post = p.condition(p2(x), y)
    allclose(post(x / 5).mean, y)
    assert abs_err(B.diag(post(x / 5).var)) <= 1e-10

    post = p2.condition(p(x), y)
    allclose(post(x * 5).mean, y)
    assert abs_err(B.diag(post(x * 5).var)) <= 1e-10
Пример #11
0
def test_input_transform():
    model = Graph()

    # Test construction:
    p = GP(EQ(), TensorProductMean(lambda x: x ** 2), graph=model)
    yield eq, str(p.transform(lambda x, c: x)), \
          'GP(EQ() transform <lambda>, <lambda> transform <lambda>)'

    # Test case:
    p = GP(EQ(), graph=model)
    p2 = p.transform(lambda x, B: x / 5)

    n = 5
    x = np.linspace(0, 10, n)[:, None]
    y = p2(x).sample()

    post = p.condition(p2(x), y)
    yield assert_allclose, post(x / 5).mean, y
    yield le, abs_err(B.diag(post(x / 5).var)), 1e-10

    post = p2.condition(p(x), y)
    yield assert_allclose, post(x * 5).mean, y
    yield le, abs_err(B.diag(post(x * 5).var)), 1e-10