def test_logpdf(): m = Measure() p1 = GP(EQ(), measure=m) p2 = GP(Exp(), measure=m) e = GP(Delta(), measure=m) p3 = p1 + p2 x1 = B.linspace(0, 2, 5) x2 = B.linspace(1, 3, 6) x3 = B.linspace(2, 4, 7) y1, y2, y3 = m.sample(p1(x1), p2(x2), p3(x3)) # Test case that only one process is fed. approx(p1(x1).logpdf(y1), m.logpdf(p1(x1), y1)) approx(p1(x1).logpdf(y1), m.logpdf((p1(x1), y1))) # Compute the logpdf with the product rule. d1 = m d2 = d1 | (p1(x1), y1) d3 = d2 | (p2(x2), y2) approx( d1(p1)(x1).logpdf(y1) + d2(p2)(x2).logpdf(y2) + d3(p3)(x3).logpdf(y3), m.logpdf((p1(x1), y1), (p2(x2), y2), (p3(x3), y3)), ) # Check that `Measure.logpdf` allows `Obs` and `SparseObs`. obs = Obs(p3(x3), y3) approx(m.logpdf(obs), p3(x3).logpdf(y3)) obs = SparseObs(p3(x3), e, p3(x3), y3) approx(m.logpdf(obs), (p3 + e)(x3).logpdf(y3))
def test_case_blr(): m = Measure() x = B.linspace(0, 10, 100) slope = GP(1, measure=m) intercept = GP(1, measure=m) f = slope * (lambda x: x) + intercept y = f + 1e-2 * GP(Delta(), measure=m) # Sample observations, true slope, and intercept. y_obs, true_slope, true_intercept = m.sample(y(x), slope(0), intercept(0)) # Predict. post = m | (y(x), y_obs) approx(post(slope)(0).mean, true_slope, atol=5e-2) approx(post(intercept)(0).mean, true_intercept, atol=5e-2)
def test_case_additive_model(): m = Measure() p1 = GP(EQ(), measure=m) p2 = GP(EQ(), measure=m) p_sum = p1 + p2 x = B.linspace(0, 5, 10) y1 = p1(x).sample() y2 = p2(x).sample() # First, test independence: assert m.kernels[p2, p1] == ZeroKernel() assert m.kernels[p1, p2] == ZeroKernel() # Now run through some test cases: post = (m | (p1(x), y1)) | (p2(x), y2) approx(post(p_sum)(x).mean, y1 + y2) post = (m | (p2(x), y2)) | (p1(x), y1) approx(post(p_sum)(x).mean, y1 + y2) post = (m | (p1(x), y1)) | (p_sum(x), y1 + y2) approx(post(p2)(x).mean, y2) post = (m | (p_sum(x), y1 + y2)) | (p1(x), y1) approx(post(p2)(x).mean, y2) post = (m | (p2(x), y2)) | (p_sum(x), y1 + y2) approx(post(p1)(x).mean, y1) post = (m | (p_sum(x), y1 + y2)) | (p2(x), y2) approx(post(p1)(x).mean, y1)
def test_multi_sample(): m = Measure() p1 = GP(1, 0, measure=m) p2 = GP(2, 0, measure=m) p3 = GP(3, 0, measure=m) x1 = B.linspace(0, 1, 5) x2 = B.linspace(0, 1, 10) x3 = B.linspace(0, 1, 15) s1, s2, s3 = m.sample(p1(x1), p2(x2), p3(x3)) assert B.shape(p1(x1).sample()) == s1.shape == (5, 1) assert B.shape(p2(x2).sample()) == s2.shape == (10, 1) assert B.shape(p3(x3).sample()) == s3.shape == (15, 1) approx(s1, 1 * B.ones(5, 1)) approx(s2, 2 * B.ones(10, 1)) approx(s3, 3 * B.ones(15, 1))
def test_approximate_multiplication(): m = Measure() p1 = GP(20, EQ(), measure=m) p2 = GP(20, EQ(), measure=m) p_prod = p1 * p2 # Sample functions. x = B.linspace(0, 10, 50) s1, s2 = m.sample(p1(x), p2(x)) # Perform product. post = m | ((p1(x), s1), (p2(x), s2)) approx(post(p_prod)(x).mean, s1 * s2, rtol=1e-2) # Perform division. cur_epsilon = B.epsilon B.epsilon = 1e-8 post = m | ((p1(x), s1), (p_prod(x), s1 * s2)) approx(post(p2)(x).mean, s2, rtol=1e-2) B.epsilon = cur_epsilon
def test_conditioning(): m = Measure() p1 = GP(EQ(), measure=m) p2 = GP(Exp(), measure=m) p_sum = p1 + p2 # Sample some data to condition on. x1 = B.linspace(0, 2, 2) y1 = p1(x1).sample() x_sum = B.linspace(3, 5, 3) y_sum = p_sum(x_sum).sample() # Determine FDDs to check. x_check = B.linspace(0, 5, 5) fdds_check = [ cross(p1, p2, p_sum)(x_check), p1(x_check), p2(x_check), p_sum(x_check), ] assert_equal_measures( fdds_check, m.condition(p_sum(x_sum), y_sum), m.condition((p_sum(x_sum), y_sum)), m | (p_sum(x_sum), y_sum), m | ((p_sum(x_sum), y_sum), ), m | Obs(p_sum(x_sum), y_sum), m | Obs((p_sum(x_sum), y_sum)), ) assert_equal_measures( fdds_check, m.condition((p1(x1), y1), (p_sum(x_sum), y_sum)), m | ((p1(x1), y1), (p_sum(x_sum), y_sum)), m | Obs((p1(x1), y1), (p_sum(x_sum), y_sum)), )
def test_momean(x): prior = Measure() p1 = GP(lambda x: 2 * x, 1 * EQ(), measure=prior) p2 = GP(1, 2 * EQ().stretch(2), measure=prior) m = MultiOutputMean(prior, p1, p2) ms = prior.means # Check representation. assert str(m) == "MultiOutputMean(<lambda>, 1)" # Check computation. approx(m(x), B.concat(ms[p1](x), ms[p2](x), axis=0)) approx(m(p1(x)), ms[p1](x)) approx(m(p2(x)), ms[p2](x)) approx(m(MultiInput(p2(x), p1(x))), B.concat(ms[p2](x), ms[p1](x), axis=0))
def test_stationarity(): m = Measure() p1 = GP(EQ(), measure=m) p2 = GP(EQ().stretch(2), measure=m) p3 = GP(EQ().periodic(10), measure=m) p = p1 + 2 * p2 assert p.stationary p = p3 + p assert p.stationary p = p + GP(Linear(), measure=m) assert not p.stationary
def test_naming(): m = Measure() p1 = GP(EQ(), 1, measure=m) p2 = GP(EQ(), 2, measure=m) # Test setting and getting names. p1.name = "name" assert m["name"] is p1 assert p1.name == "name" assert m[p1] == "name" with pytest.raises(KeyError): m["other_name"] with pytest.raises(KeyError): m[p2] # Check that names can not be doubly assigned. def doubly_assign(): p2.name = "name" with pytest.raises(RuntimeError): doubly_assign() # Move name to other GP. p1.name = "other_name" p2.name = "name" # Check that everything has been properly assigned. assert m["name"] is p2 assert p2.name == "name" assert m[p2] == "name" assert m["other_name"] is p1 assert p1.name == "other_name" assert m[p1] == "other_name" # Test giving a name to the constructor. p3 = GP(EQ(), name="yet_another_name", measure=m) assert m["yet_another_name"] is p3 assert p3.name == "yet_another_name" assert m[p3] == "yet_another_name"
def test_measure_groups(): prior = Measure() f1 = GP(EQ(), measure=prior) f2 = GP(EQ(), measure=prior) assert f1._measures == f2._measures x = B.linspace(0, 5, 10) y = f1(x).sample() post = prior | (f1(x), y) assert f1._measures == f2._measures == [prior, post] # Further extend the prior. f_sum = f1 + f2 assert f_sum._measures == [prior, post] f3 = GP(EQ(), measure=prior) f_sum = f1 + f3 assert f3._measures == f_sum._measures == [prior] with pytest.raises(AssertionError): post(f1) + f3 # Extend the posterior. f_sum = post(f1) + post(f2) assert f_sum._measures == [post] f3 = GP(EQ(), measure=post) f_sum = post(f1) + f3 assert f3._measures == f_sum._measures == [post] with pytest.raises(AssertionError): f1 + f3
def test_mokernel(x1, x2, x3): m = Measure() p1 = GP(1 * EQ(), measure=m) p2 = GP(2 * EQ().stretch(2), measure=m) k = MultiOutputKernel(m, p1, p2) ks = m.kernels # Check representation. assert str(k) == "MultiOutputKernel(EQ(), 2 * (EQ() > 2))" # Input versus input: approx( k(x1, x2), B.concat2d( [ks[p1, p1](x1, x2), ks[p1, p2](x1, x2)], [ks[p2, p1](x1, x2), ks[p2, p2](x1, x2)], ), ) approx( k.elwise(x1, x3), B.concat(ks[p1, p1].elwise(x1, x3), ks[p2, p2].elwise(x1, x3), axis=0), ) # Input versus `FDD`: approx(k(p1(x1), x2), B.concat(ks[p1, p1](x1, x2), ks[p1, p2](x1, x2), axis=1)) approx(k(p2(x1), x2), B.concat(ks[p2, p1](x1, x2), ks[p2, p2](x1, x2), axis=1)) approx(k(x1, p1(x2)), B.concat(ks[p1, p1](x1, x2), ks[p2, p1](x1, x2), axis=0)) approx(k(x1, p2(x2)), B.concat(ks[p1, p2](x1, x2), ks[p2, p2](x1, x2), axis=0)) with pytest.raises(ValueError): k.elwise(x1, p2(x3)) with pytest.raises(ValueError): k.elwise(p1(x1), x3) # `FDD` versus `FDD`: approx(k(p1(x1), p1(x2)), ks[p1](x1, x2)) approx(k(p1(x1), p2(x2)), ks[p1, p2](x1, x2)) approx(k.elwise(p1(x1), p1(x3)), ks[p1].elwise(x1, x3)) approx(k.elwise(p1(x1), p2(x3)), ks[p1, p2].elwise(x1, x3)) # `MultiInput` versus input: approx( k(MultiInput(p2(x1), p1(x2)), x1), B.concat2d( [ks[p2, p1](x1, x1), ks[p2, p2](x1, x1)], [ks[p1, p1](x2, x1), ks[p1, p2](x2, x1)], ), ) approx( k(x1, MultiInput(p2(x1), p1(x2))), B.concat2d( [ks[p1, p2](x1, x1), ks[p1, p1](x1, x2)], [ks[p2, p2](x1, x1), ks[p2, p1](x1, x2)], ), ) with pytest.raises(ValueError): k.elwise(MultiInput(p2(x1), p1(x3)), p2(x1)) with pytest.raises(ValueError): k.elwise(p2(x1), MultiInput(p2(x1), p1(x3))) # `MultiInput` versus `FDD`: approx( k(MultiInput(p2(x1), p1(x2)), p2(x1)), B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0), ) approx( k(p2(x1), MultiInput(p2(x1), p1(x2))), B.concat(ks[p2, p2](x1, x1), ks[p2, p1](x1, x2), axis=1), ) with pytest.raises(ValueError): k.elwise(MultiInput(p2(x1), p1(x3)), p2(x1)) with pytest.raises(ValueError): k.elwise(p2(x1), MultiInput(p2(x1), p1(x3))) # `MultiInput` versus `MultiInput`: approx( k(MultiInput(p2(x1), p1(x2)), MultiInput(p2(x1))), B.concat(ks[p2, p2](x1, x1), ks[p1, p2](x2, x1), axis=0), ) with pytest.raises(ValueError): k.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1))) approx( k.elwise(MultiInput(p2(x1), p1(x3)), MultiInput(p2(x1), p1(x3))), B.concat(ks[p2, p2].elwise(x1, x1), ks[p1, p1].elwise(x3, x3), axis=0), )
def test_formatting(): p = 2 * GP(1, EQ(), measure=Measure()) assert str(p.display(lambda x: x**2)) == "GP(4 * 1, 16 * EQ())"
def test_sparse_conditioning_and_elbo(): m = Measure() p1 = GP(EQ(), measure=m) p2 = GP(Exp(), measure=m) e = GP(Delta(), measure=m) p_sum = p1 + p2 # Sample some data to condition on. x1 = B.linspace(0, 2, 2) y1 = (p1 + e)(x1).sample() x_sum = B.linspace(3, 5, 3) y_sum = (p_sum + e)(x_sum).sample() # Determine FDDs to check. x_check = B.linspace(0, 5, 5) fdds_check = [ cross(p1, p2, p_sum)(x_check), p1(x_check), p2(x_check), p_sum(x_check), ] # Check conditioning and ELBO on one data set. assert_equal_measures( fdds_check, m | ((p_sum + e)(x_sum), y_sum), m | SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum), m | SparseObs((p_sum(x_sum), ), e, p_sum(x_sum), y_sum), m | SparseObs((p_sum(x_sum), p1(x1)), e, p_sum(x_sum), y_sum), m | SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum)), m | SparseObs((p_sum(x_sum), ), (e, p_sum(x_sum), y_sum)), m.condition( SparseObs( (p_sum(x_sum), p1(x1)), (e, p_sum(x_sum), y_sum), )), ) approx( m.logpdf(Obs((p_sum + e)(x_sum), y_sum)), SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum)).elbo(m), ) # Check conditioning and ELBO on two data sets. assert_equal_measures( fdds_check, m | (((p_sum + e)(x_sum), y_sum), ((p1 + e)(x1), y1)), m.condition( SparseObs((p_sum(x_sum), p1(x1)), (e, p_sum(x_sum), y_sum), (e, p1(x1), y1))), ) approx( m.logpdf(Obs(((p_sum + e)(x_sum), y_sum), ((p1 + e)(x1), y1))), SparseObs((p_sum(x_sum), p1(x1)), (e, p_sum(x_sum), y_sum), (e, p1(x1), y1)).elbo(m), ) # The following lose information, so check them separately. assert_equal_measures( fdds_check, m | SparseObs(p_sum(x_sum), (e, p_sum(x_sum), y_sum), (e, p1(x1), y1)), m | SparseObs((p_sum(x_sum), ), (e, p_sum(x_sum), y_sum), (e, p1(x1), y1)), ) # Test lazy computation. obs = SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum) for name in ["K_z", "elbo", "mu", "A"]: approx( getattr(SparseObs(p_sum(x_sum), e, p_sum(x_sum), y_sum), name)(m), getattr(obs, name)(m), ) # Test requirement that noise must be diagonal. with pytest.raises(RuntimeError): SparseObs(p_sum(x_sum), p_sum, p_sum(x_sum), y_sum).elbo(m)