def test_logpdf(x, w): prior = Measure() f1, noise1 = GP(EQ(), measure=prior), 2e-1 f2, noise2 = GP(Linear(), measure=prior), 1e-1 gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2)) # Generate some data. y = gpar.sample(x, w, latent=True) # Compute logpdf. x1 = x x2 = B.concat(x, y[:, 0:1], axis=1) logpdf1 = f1(x1, noise1 / w[:, 0]).logpdf(y[:, 0]) logpdf2 = f2(x2, noise2 / w[:, 1]).logpdf(y[:, 1]) # Test computation of GPAR. assert gpar.logpdf(x, y, w) == logpdf1 + logpdf2 assert gpar.logpdf(x, y, w, only_last_layer=True) == logpdf2 # Test resuming computation. x_partial, x_ind_partial = gpar.logpdf(x, y, w, return_inputs=True, outputs=[0]) assert gpar.logpdf(x_partial, y, w, x_ind=x_ind_partial, outputs=[1]) == logpdf2 # Test that sampling missing gives a stochastic estimate. y[1, 0] = np.nan all_different( gpar.logpdf(x, y, w, sample_missing=True), gpar.logpdf(x, y, w, sample_missing=True), )
def test_logpdf(): graph = Graph() f1, e1 = GP(EQ(), graph=graph), GP(2e-1 * Delta(), graph=graph) f2, e2 = GP(Linear(), graph=graph), GP(1e-1 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) # Sample some data from GPAR. x = B.linspace(0, 2, 10, dtype=torch.float64)[:, None] y = gpar.sample(x, latent=True) # Compute logpdf. logpdf1 = (f1 + e1)(x).logpdf(y[:, 0]) logpdf2 = (f2 + e2)(B.concat([x, y[:, 0:1]], axis=1)).logpdf(y[:, 1]) # Test computation of GPAR. yield eq, gpar.logpdf(x, y), logpdf1 + logpdf2 yield eq, gpar.logpdf(x, y, only_last_layer=True), logpdf2 # Test resuming computation. x_int, x_ind_int = gpar.logpdf(x, y, return_inputs=True, outputs=[0]) yield eq, gpar.logpdf(x_int, y, x_ind=x_ind_int, outputs=[1]), logpdf2 # Test that sampling missing gives a stochastic estimate. y[1, 0] = np.nan yield ge, \ B.abs(gpar.logpdf(x, y, sample_missing=True) - gpar.logpdf(x, y, sample_missing=True)).numpy(), \ 1e-3
def test_logpdf(x, w): prior = Measure() f1, e1 = GP(EQ(), measure=prior), GP(2e-1 * Delta(), measure=prior) f2, e2 = GP(Linear(), measure=prior), GP(1e-1 * Delta(), measure=prior) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) # Generate some data. y = gpar.sample(x, w, latent=True) # Compute logpdf. x1 = WeightedUnique(x, w[:, 0]) x2 = WeightedUnique(B.concat(x, y[:, 0:1], axis=1), w[:, 1]) logpdf1 = (f1 + e1)(x1).logpdf(y[:, 0]) logpdf2 = (f2 + e2)(x2).logpdf(y[:, 1]) # Test computation of GPAR. assert gpar.logpdf(x, y, w) == logpdf1 + logpdf2 assert gpar.logpdf(x, y, w, only_last_layer=True) == logpdf2 # Test resuming computation. x_partial, x_ind_partial = gpar.logpdf(x, y, w, return_inputs=True, outputs=[0]) assert gpar.logpdf(x_partial, y, w, x_ind=x_ind_partial, outputs=[1]) == logpdf2 # Test that sampling missing gives a stochastic estimate. y[1, 0] = np.nan all_different( gpar.logpdf(x, y, w, sample_missing=True), gpar.logpdf(x, y, w, sample_missing=True), )
def test_sample(): graph = Graph() x = array([1, 2, 3])[:, None] # Test that it produces random samples. Not sure how to test for # correctness. f1, e1 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph) f2, e2 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) yield ge, B.sum(B.abs(gpar.sample(x) - gpar.sample(x))), 1e-3 yield ge, \ B.sum(B.abs(gpar.sample(x, latent=True) - gpar.sample(x, latent=True))), \ 1e-3 # Test that posterior latent samples are around the data that is # conditioned on. graph = Graph() f1, e1 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph) f2, e2 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph) gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2)) y = gpar.sample(x, latent=True) gpar = gpar | (x, y) yield approx, gpar.sample(x), y, 3 yield approx, gpar.sample(x, latent=True), y, 3
def test_sample(x, w): prior = Measure() # Test that it produces random samples. f1, noise1 = GP(EQ(), measure=prior), 1e-1 f2, noise2 = GP(EQ(), measure=prior), 2e-1 gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2)) all_different(gpar.sample(x, w), gpar.sample(x, w)) all_different(gpar.sample(x, w, latent=True), gpar.sample(x, w, latent=True)) # Test that posterior latent samples are around the data that is conditioned on. prior = Measure() f1, noise1 = GP(EQ(), measure=prior), 1e-10 f2, noise2 = GP(EQ(), measure=prior), 2e-10 gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2)) y = gpar.sample(x, w, latent=True) gpar = gpar | (x, y, w) approx(gpar.sample(x, w), y, atol=1e-3) approx(gpar.sample(x, w, latent=True), y, atol=1e-3)