예제 #1
0
def test_conditioning(x, w):
    prior = Measure()
    f1, e1 = GP(EQ(), measure=prior), GP(1e-10 * Delta(), measure=prior)
    f2, e2 = GP(EQ(), measure=prior), GP(2e-10 * Delta(), measure=prior)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))

    # Generate some data.
    y = B.concat((f1 + e1)(x).sample(), (f2 + e2)(x).sample(), axis=1)

    # Extract posterior processes.
    gpar = gpar | (x, y, w)
    f1_post, e1_post = gpar.layers[0]()
    f2_post, e2_post = gpar.layers[1]()

    # Test independence of noises.
    assert f1_post.measure.kernels[f1_post, e1_post] == ZeroKernel()
    assert f2_post.measure.kernels[f2_post, e2_post] == ZeroKernel()

    # Test form of noises.
    assert e1.mean == e1_post.mean
    assert e1.kernel == e1_post.kernel
    assert e2.mean == e2_post.mean
    assert e2.kernel == e2_post.kernel

    # Test posteriors.
    approx(f1_post.mean(x), y[:, 0:1], atol=1e-3)
    approx(f2_post.mean(B.concat(x, y[:, 0:1], axis=1)), y[:, 1:2], atol=1e-3)
예제 #2
0
파일: test_model.py 프로젝트: Zhanghq8/gpar
def test_sample():
    graph = Graph()
    x = array([1, 2, 3])[:, None]

    # Test that it produces random samples. Not sure how to test for
    # correctness.
    f1, e1 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph)
    f2, e2 = GP(EQ(), graph=graph), GP(1e-1 * Delta(), graph=graph)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))
    yield ge, B.sum(B.abs(gpar.sample(x) - gpar.sample(x))), 1e-3
    yield ge, \
          B.sum(B.abs(gpar.sample(x, latent=True) -
                      gpar.sample(x, latent=True))), \
          1e-3

    # Test that posterior latent samples are around the data that is
    # conditioned on.
    graph = Graph()
    f1, e1 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph)
    f2, e2 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))
    y = gpar.sample(x, latent=True)
    gpar = gpar | (x, y)
    yield approx, gpar.sample(x), y, 3
    yield approx, gpar.sample(x, latent=True), y, 3
예제 #3
0
파일: test_model.py 프로젝트: Zhanghq8/gpar
def test_conditioning():
    graph = Graph()
    f1, e1 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph)
    f2, e2 = GP(EQ(), graph=graph), GP(2e-8 * Delta(), graph=graph)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))

    x = array([[1], [2], [3]])
    y = array([[4, 5], [6, 7], [8, 9]])
    gpar = gpar | (x, y)

    # Extract posterior processes.
    f1_post, e1_post = gpar.layers[0]()
    f2_post, e2_post = gpar.layers[1]()

    # Test independence of noises.
    yield eq, graph.kernels[f1_post, e1_post], ZeroKernel()
    yield eq, graph.kernels[f2_post, e2_post], ZeroKernel()

    # Test form of noises.
    yield eq, e1.mean, e1_post.mean
    yield eq, e1.kernel, e1_post.kernel
    yield eq, e2.mean, e2_post.mean
    yield eq, e2.kernel, e2_post.kernel

    # Test posteriors.
    yield approx, f1_post.mean(x), y[:, 0:1]
    yield approx, f2_post.mean(B.concat([x, y[:, 0:1]], axis=1)), y[:, 1:2]
예제 #4
0
def test_conditioning():
    graph = Graph()
    f1, e1 = GP(EQ(), graph=graph), GP(1e-8 * Delta(), graph=graph)
    f2, e2 = GP(EQ(), graph=graph), GP(2e-8 * Delta(), graph=graph)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))

    x = tensor([[1], [2], [3]])
    y = tensor([[4, 5],
                [6, 7],
                [8, 9]])
    gpar = gpar | (x, y)

    # Extract posterior processes.
    f1_post, e1_post = gpar.layers[0]()
    f2_post, e2_post = gpar.layers[1]()

    # Test independence of noises.
    assert graph.kernels[f1_post, e1_post] == ZeroKernel()
    assert graph.kernels[f2_post, e2_post] == ZeroKernel()

    # Test form of noises.
    assert e1.mean == e1_post.mean
    assert e1.kernel == e1_post.kernel
    assert e2.mean == e2_post.mean
    assert e2.kernel == e2_post.kernel

    # Test posteriors.
    approx(f1_post.mean(x), y[:, 0:1])
    approx(f2_post.mean(B.concat(x, y[:, 0:1], axis=1)), y[:, 1:2])
예제 #5
0
파일: lmm.py 프로젝트: wesselb/wbml
def test_lmm_missing_data():
    # Setup model.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    H = np.random.randn(3, 2)

    # Construct model.
    lmm = LMMPP(kernels, noise_obs, noises_latent, H)

    # Construct data.
    x = np.linspace(0, 3, 5)
    y = lmm.sample(x, latent=False)

    # Throw away random data points and check that the logpdf computes.
    y2 = y.copy()
    y2[0, 0] = np.nan
    y2[2, 2] = np.nan
    y2[4, 1] = np.nan
    assert not np.isnan(lmm.logpdf(x, y2))

    # Throw away an entire time point and check correctness.
    y2 = y.copy()
    y2[1, :] = np.nan
    approx(lmm.logpdf(x[[0, 2, 3, 4]], y[[0, 2, 3, 4]]), lmm.logpdf(x, y2))

    # Check LML after conditioning.
    lmm = lmm.condition(x, y2)
    approx(lmm.logpdf(x[[0, 2, 3, 4]], y[[0, 2, 3, 4]]), lmm.logpdf(x, y2))
예제 #6
0
파일: test_ilmm.py 프로젝트: wesselb/oilmm
def construct_ilmm():
    # Setup model.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    h = Dense(B.randn(3, 2))

    def construct_ilmm(noise_amplification=1):
        noise_obs = noise_amplification
        noises_latent = np.array([0.1, 0.2]) * noise_amplification
        return ILMMPP(kernels, h, noise_obs, noises_latent)

    return construct_ilmm
예제 #7
0
def test_infer_size():
    x = B.linspace(0, 2, 5)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)
    k = MultiOutputKernel(m, p1, p2)

    assert infer_size(k, x) == 10
    assert infer_size(k, p1(x)) == 5
    assert infer_size(k, (x, p1(x))) == 15
예제 #8
0
def construct_oilmm():
    # Setup model.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    u, s_sqrt = B.svd(B.randn(3, 2))[:2]
    u = Dense(u)
    s_sqrt = Diagonal(s_sqrt)

    def construct_iolmm(noise_amplification=1):
        noise_obs = noise_amplification
        noises_latent = np.array([0.1, 0.2]) * noise_amplification
        return OILMM(kernels, u, s_sqrt, noise_obs, noises_latent)

    return construct_iolmm
예제 #9
0
def test_combine():
    x1 = B.linspace(0, 2, 10)
    x2 = B.linspace(2, 4, 10)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(Matern12(), measure=m)
    y1 = p1(x1).sample()
    y2 = p2(x2).sample()

    # Check the one-argument case.
    assert_equal_normals(combine(p1(x1, 1)), p1(x1, 1))
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)))
    assert_equal_normals(fdd_combined, p1(x1, 1))
    approx(y_combined, y1)

    # Check the two-argument case.
    fdd_combined = combine(p1(x1, 1), p2(x2, 2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    fdd_combined, y_combined = combine((p1(x1, 1), B.squeeze(y1)),
                                       (p2(x2, 2), y2))
    assert_equal_normals(
        fdd_combined,
        Normal(B.block_diag(p1(x1, 1).var,
                            p2(x2, 2).var)),
    )
    approx(y_combined, B.concat(y1, y2, axis=0))
예제 #10
0
파일: test_model.py 프로젝트: Zhanghq8/gpar
def test_logpdf():
    graph = Graph()
    f1, e1 = GP(EQ(), graph=graph), GP(2e-1 * Delta(), graph=graph)
    f2, e2 = GP(Linear(), graph=graph), GP(1e-1 * Delta(), graph=graph)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))

    # Sample some data from GPAR.
    x = B.linspace(0, 2, 10, dtype=torch.float64)[:, None]
    y = gpar.sample(x, latent=True)

    # Compute logpdf.
    logpdf1 = (f1 + e1)(x).logpdf(y[:, 0])
    logpdf2 = (f2 + e2)(B.concat([x, y[:, 0:1]], axis=1)).logpdf(y[:, 1])

    # Test computation of GPAR.
    yield eq, gpar.logpdf(x, y), logpdf1 + logpdf2
    yield eq, gpar.logpdf(x, y, only_last_layer=True), logpdf2

    # Test resuming computation.
    x_int, x_ind_int = gpar.logpdf(x, y, return_inputs=True, outputs=[0])
    yield eq, gpar.logpdf(x_int, y, x_ind=x_ind_int, outputs=[1]), logpdf2

    # Test that sampling missing gives a stochastic estimate.
    y[1, 0] = np.nan
    yield ge, \
          B.abs(gpar.logpdf(x, y, sample_missing=True) -
                gpar.logpdf(x, y, sample_missing=True)).numpy(), \
          1e-3
예제 #11
0
파일: eeg_igp.py 프로젝트: wesselb/oilmm
 def construct_model(vs):
     kernels = [
         vs.pos(1, name=f"{i}/var") *
         EQ().stretch(vs.pos(0.02, name=f"{i}/scale")) for i in range(p)
     ]
     noises = vs.pos(1e-2 * B.ones(p), name="noises")
     return IGP(kernels, noises)
예제 #12
0
def test_infer_size():
    x = B.linspace(0, 2, 5)

    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)
    k = MultiOutputKernel(m, p1, p2)

    assert infer_size(k, x) == 10
    assert infer_size(k, p1(x)) == 5
    assert infer_size(k, (x, p1(x))) == 15

    # Check that the dimensionality must be inferrable.
    assert infer_size(EQ(), x) == 5
    with pytest.raises(RuntimeError):
        infer_size(ADK(EQ()), x)
예제 #13
0
def test_logpdf(x, w):
    prior = Measure()
    f1, noise1 = GP(EQ(), measure=prior), 2e-1
    f2, noise2 = GP(Linear(), measure=prior), 1e-1
    gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2))

    # Generate some data.
    y = gpar.sample(x, w, latent=True)

    # Compute logpdf.
    x1 = x
    x2 = B.concat(x, y[:, 0:1], axis=1)
    logpdf1 = f1(x1, noise1 / w[:, 0]).logpdf(y[:, 0])
    logpdf2 = f2(x2, noise2 / w[:, 1]).logpdf(y[:, 1])

    # Test computation of GPAR.
    assert gpar.logpdf(x, y, w) == logpdf1 + logpdf2
    assert gpar.logpdf(x, y, w, only_last_layer=True) == logpdf2

    # Test resuming computation.
    x_partial, x_ind_partial = gpar.logpdf(x, y, w, return_inputs=True, outputs=[0])
    assert gpar.logpdf(x_partial, y, w, x_ind=x_ind_partial, outputs=[1]) == logpdf2

    # Test that sampling missing gives a stochastic estimate.
    y[1, 0] = np.nan
    all_different(
        gpar.logpdf(x, y, w, sample_missing=True),
        gpar.logpdf(x, y, w, sample_missing=True),
    )
예제 #14
0
def test_obs(x):
    prior = Measure()
    f = GP(EQ(), measure=prior)
    noise = 0.1

    # Generate some data.
    w = B.rand(B.shape(x)[0]) + 1e-2
    y = f(x, 0.1).sample()

    # Set some observations to be missing.
    y_missing = y.copy()
    y_missing[::2] = np.nan

    # Check dense case.
    gpar = GPAR()
    obs = gpar._obs(x, None, y_missing, w, f, noise)
    assert isinstance(obs, Obs)
    approx(
        prior.logpdf(obs),
        f(x[1::2], noise / w[1::2]).logpdf(y[1::2]),
        atol=1e-6,
    )

    # Check sparse case.
    gpar = GPAR(x_ind=x)
    obs = gpar._obs(x, x, y_missing, w, f, noise)
    assert isinstance(obs, SparseObs)
    approx(
        prior.logpdf(obs),
        f(x[1::2], noise / w[1::2]).logpdf(y[1::2]),
        atol=1e-6,
    )
예제 #15
0
def test_logpdf(x, w):
    prior = Measure()
    f1, e1 = GP(EQ(), measure=prior), GP(2e-1 * Delta(), measure=prior)
    f2, e2 = GP(Linear(), measure=prior), GP(1e-1 * Delta(), measure=prior)
    gpar = GPAR().add_layer(lambda: (f1, e1)).add_layer(lambda: (f2, e2))

    # Generate some data.
    y = gpar.sample(x, w, latent=True)

    # Compute logpdf.
    x1 = WeightedUnique(x, w[:, 0])
    x2 = WeightedUnique(B.concat(x, y[:, 0:1], axis=1), w[:, 1])
    logpdf1 = (f1 + e1)(x1).logpdf(y[:, 0])
    logpdf2 = (f2 + e2)(x2).logpdf(y[:, 1])

    # Test computation of GPAR.
    assert gpar.logpdf(x, y, w) == logpdf1 + logpdf2
    assert gpar.logpdf(x, y, w, only_last_layer=True) == logpdf2

    # Test resuming computation.
    x_partial, x_ind_partial = gpar.logpdf(x,
                                           y,
                                           w,
                                           return_inputs=True,
                                           outputs=[0])
    assert gpar.logpdf(x_partial, y, w, x_ind=x_ind_partial,
                       outputs=[1]) == logpdf2

    # Test that sampling missing gives a stochastic estimate.
    y[1, 0] = np.nan
    all_different(
        gpar.logpdf(x, y, w, sample_missing=True),
        gpar.logpdf(x, y, w, sample_missing=True),
    )
예제 #16
0
def test_logpdf_missing_data():
    # Setup model.
    m = 3
    noise = 1e-2
    latent_noises = 2e-2 * B.ones(m)
    kernels = [0.5 * EQ().stretch(0.75) for _ in range(m)]
    x = B.linspace(0, 10, 20)

    # Concatenate two orthogonal matrices, to make the missing data
    # approximation exact.
    u1 = B.svd(B.randn(m, m))[0]
    u2 = B.svd(B.randn(m, m))[0]
    u = Dense(B.concat(u1, u2, axis=0) / B.sqrt(2))

    s_sqrt = Diagonal(B.rand(m))

    # Construct a reference model.
    oilmm_pp = ILMMPP(kernels, u @ s_sqrt, noise, latent_noises)

    # Sample to generate test data.
    y = oilmm_pp.sample(x, latent=False)

    # Throw away data, but retain orthogonality.
    y[5:10, 3:] = np.nan
    y[10:, :3] = np.nan

    # Construct OILMM to test.
    oilmm = OILMM(kernels, u, s_sqrt, noise, latent_noises)

    # Check that evidence is still exact.
    approx(oilmm_pp.logpdf(x, y), oilmm.logpdf(x, y), atol=1e-7)
예제 #17
0
파일: lmm.py 프로젝트: wesselb/wbml
def test_compare_lmm_olmm():
    # Setup models.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    U, S, _ = B.svd(B.randn(3, 2))
    H = np.dot(U, np.diag(S)**0.5)

    # Construct models.
    lmm = LMMPP(kernels, noise_obs, noises_latent, H)
    olmm = OLMM(kernels, noise_obs, noises_latent, H)

    # Construct data.
    x = np.linspace(0, 3, 5)
    y = lmm.sample(x, latent=False)
    x2 = np.linspace(4, 7, 5)
    y2 = lmm.sample(x2, latent=False)

    # Check LML before conditioning.
    approx(lmm.logpdf(x, y), olmm.logpdf(x, y))
    approx(lmm.logpdf(x2, y2), olmm.logpdf(x2, y2))

    # Check LML after conditioning.
    lmm = lmm.condition(x, y)
    olmm = olmm.condition(x, y)
    # Note: `lmm_pp.lml(x, y)` will not equal `olmm.lml(x, y)` due to
    # assumptions in the OLMM, so the follow is not tested.
    # allclose(lmm.logpdf(x, y), olmm.logpdf(x, y))
    approx(lmm.logpdf(x2, y2), olmm.logpdf(x2, y2))

    # Predict.
    preds_pp, means_pp, vars_pp = lmm.marginals(x2)
    preds, means, vars = olmm.marginals(x2)

    # Check predictions per time point.
    for i in range(5):
        approx(means_pp[i], means[i])
        approx(vars_pp[i], vars[i])

    # Check predictions per output.
    for i in range(3):
        approx(preds_pp[i][0], preds[i][0])
        approx(preds_pp[i][1], preds[i][1])
        approx(preds_pp[i][2], preds[i][2])
예제 #18
0
def test_sample(x, w):
    prior = Measure()

    # Test that it produces random samples.
    f1, noise1 = GP(EQ(), measure=prior), 1e-1
    f2, noise2 = GP(EQ(), measure=prior), 2e-1
    gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2))
    all_different(gpar.sample(x, w), gpar.sample(x, w))
    all_different(gpar.sample(x, w, latent=True), gpar.sample(x, w, latent=True))

    # Test that posterior latent samples are around the data that is conditioned on.
    prior = Measure()
    f1, noise1 = GP(EQ(), measure=prior), 1e-10
    f2, noise2 = GP(EQ(), measure=prior), 2e-10
    gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2))
    y = gpar.sample(x, w, latent=True)
    gpar = gpar | (x, y, w)
    approx(gpar.sample(x, w), y, atol=1e-3)
    approx(gpar.sample(x, w, latent=True), y, atol=1e-3)
예제 #19
0
파일: eeg_ilmm.py 프로젝트: wesselb/oilmm
    def construct_model(vs):
        # Parametrise different kernels.
        kernels = [
            vs.pos(1, name=f"{i}/var") * EQ().stretch(vs.pos(0.02, name=f"{i}/scale"))
            for i in range(m)
        ]
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        noise = vs.pos(1e-2, name="noise")
        h = Dense(vs.get(shape=(p, m), name="h"))

        return ILMMPP(kernels, h, noise, latent_noises)
예제 #20
0
    def construct_model(vs):
        kernels = [
            vs.pos(1, name=f"{i}/var") *
            EQ().stretch(vs.pos(0.02, name=f"{i}/scale")) for i in range(m)
        ]
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        u = Dense(vs.orth(shape=(p, m), name="u"))
        s_sqrt = Diagonal(vs.pos(shape=(m, ), name="s_sqrt"))

        return OILMM(kernels, u, s_sqrt, noise, latent_noises)
예제 #21
0
def test_dimensionality():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)

    k1 = MultiOutputKernel(m, p1, p2)
    k2 = MultiOutputKernel(m, p1, p1)

    assert dimensionality(EQ()) == 1
    assert dimensionality(k1) == 2

    # Test the unpacking of `Wrapped`s and `Join`s.
    assert dimensionality(k1 + k2) == 2
    assert dimensionality(k1 * k2) == 2
    assert dimensionality(k1.periodic(1)) == 2
    assert dimensionality(k1.stretch(2)) == 2

    # Test consistency check.
    with pytest.raises(RuntimeError):
        dimensionality(k1 + EQ())
예제 #22
0
    def make_latent_process(i):
        # Long-term trend:
        variance = vs.bnd(0.9, name=f'{i}/long_term/var')
        scale = vs.bnd(2 * 30, name=f'{i}/long_term/scale')
        kernel = variance * EQ().stretch(scale)

        # Short-term trend:
        variance = vs.bnd(0.1, name=f'{i}/short_term/var')
        scale = vs.bnd(20, name=f'{i}/short_term/scale')
        kernel += variance * Matern12().stretch(scale)

        return GP(kernel, graph=g)
예제 #23
0
def test_conditioning(x, w):
    prior = Measure()
    f1, noise1 = GP(EQ(), measure=prior), 1e-10
    f2, noise2 = GP(EQ(), measure=prior), 2e-10
    gpar = GPAR().add_layer(lambda: (f1, noise1)).add_layer(lambda: (f2, noise2))

    # Generate some data.
    y = B.concat(f1(x, noise1).sample(), f2(x, noise2).sample(), axis=1)

    # Extract posterior processes.
    gpar = gpar | (x, y, w)
    f1_post, noise1_post = gpar.layers[0]()
    f2_post, noise2_post = gpar.layers[1]()

    # Test noises.
    assert noise1 == noise1_post
    assert noise2 == noise2_post

    # Test posteriors.
    approx(f1_post.mean(x), y[:, 0:1], atol=1e-3)
    approx(f2_post.mean(B.concat(x, y[:, 0:1], axis=1)), y[:, 1:2], atol=1e-3)
예제 #24
0
    def construct_model_ilmm_equivalent(vs):
        kernels = [
            vs.pos(1, name=f"{i}/var") *
            EQ().stretch(vs.pos(0.02, name=f"{i}/scale")) for i in range(m)
        ]
        noise = vs.pos(1e-2, name="noise")
        latent_noises = vs.pos(1e-2 * B.ones(m), name="latent_noises")
        u = vs.orth(shape=(p, m), name="u")
        s_sqrt = vs.pos(shape=(m, ), name="s_sqrt")
        h = Dense(u * s_sqrt[None, :])

        return ILMMPP(kernels, h, noise, latent_noises)
예제 #25
0
def test_compare_ilmm():
    # Setup models.
    kernels = [EQ(), 2 * EQ().stretch(1.5)]
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    u, s_sqrt = B.svd(B.randn(3, 2))[:2]
    u = Dense(u)
    s_sqrt = Diagonal(s_sqrt)

    # Construct models.
    ilmm = ILMMPP(kernels, u @ s_sqrt, noise_obs, noises_latent)
    oilmm = OILMM(kernels, u, s_sqrt, noise_obs, noises_latent)

    # Construct data.
    x = B.linspace(0, 3, 5)
    y = ilmm.sample(x, latent=False)
    x2 = B.linspace(4, 7, 5)
    y2 = ilmm.sample(x2, latent=False)

    # Check LML before conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    ilmm = ilmm.condition(x, y)
    oilmm = oilmm.condition(x, y)

    # Check LML after conditioning.
    approx(ilmm.logpdf(x, y), oilmm.logpdf(x, y))
    approx(ilmm.logpdf(x2, y2), oilmm.logpdf(x2, y2))

    # Predict.
    means_pp, lowers_pp, uppers_pp = ilmm.predict(x2)
    means, lowers, uppers = oilmm.predict(x2)

    # Check predictions.
    approx(means_pp, means)
    approx(lowers_pp, lowers)
    approx(uppers_pp, uppers)
예제 #26
0
파일: lmm.py 프로젝트: wesselb/wbml
def test_lmm_olmm_sample():
    # Setup models.
    kernels = [EQ()] * 2
    noise_obs = 0.1
    noises_latent = np.array([0.1, 0.2])
    H = B.randn(3, 2)

    # Construct models.
    lmm = LMMPP(kernels, noise_obs, noises_latent, H)
    olmm = OLMM(kernels, noise_obs, noises_latent, H)

    # Wrap.
    lmm.fs = TrackedIterator(lmm.fs)
    lmm.ys = TrackedIterator(lmm.ys)
    olmm.xs = TrackedIterator(olmm.xs)
    olmm.xs_noisy = TrackedIterator(olmm.xs_noisy)

    # Test latent samples.
    x = B.randn(10)
    assert isinstance(lmm.sample(x, latent=True), B.NPNumeric)
    assert lmm.fs.used, "lmm.fs was not used."
    assert not lmm.ys.used, "lmm.ys was used."

    assert isinstance(olmm.sample(x, latent=True), B.NPNumeric)
    assert olmm.xs.used, "olmm.xs was not used."
    assert not olmm.xs_noisy.used, "olmm.xs_noisy was used."

    # Test observed samples.
    TrackedIterator.reset()

    assert isinstance(lmm.sample(x, latent=False), B.NPNumeric)
    assert not lmm.fs.used, "lmm.fs was used."
    assert lmm.ys.used, "lmm.ys was not used"

    assert isinstance(olmm.sample(x, latent=False), B.NPNumeric)
    assert not olmm.xs.used, "olmm.xs was used"
    assert olmm.xs_noisy.used, "olmm.xs_noisy was not used."
예제 #27
0
파일: test_model.py 프로젝트: Zhanghq8/gpar
def test_obs():
    graph = Graph()
    f = GP(EQ(), graph=graph)
    e = GP(1e-8 * Delta(), graph=graph)

    # Check that it produces the correct observations.
    x = B.linspace(0, 0.1, 10, dtype=torch.float64)
    y = f(x).sample()

    # Set some observations to be missing.
    y_missing = y.clone()
    y_missing[::2] = np.nan

    # Check dense case.
    gpar = GPAR()
    obs = gpar._obs(x, None, y_missing, f, e)
    yield eq, type(obs), Obs
    yield approx, y, (f | obs).mean(x)

    # Check sparse case.
    gpar = GPAR(x_ind=x)
    obs = gpar._obs(x, x, y_missing, f, e)
    yield eq, type(obs), SparseObs
    yield approx, y, (f | obs).mean(x)
예제 #28
0
def test_update_inputs():
    prior = Measure()
    f = GP(EQ(), measure=prior)

    x = np.array([[1], [2], [3]])
    y = np.array([[4], [5], [6]], dtype=float)
    res = B.concat(x, y, axis=1)
    x_ind = np.array([[6], [7]])
    res_ind = np.array([[6, 0], [7, 0]])

    # Check vanilla case.
    gpar = GPAR(x_ind=x_ind)
    approx(gpar._update_inputs(x, x_ind, y, f, None), (res, res_ind))

    # Check imputation with prior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[1, 1] = 0
    approx(gpar._update_inputs(x, x_ind, this_y, f, None), (this_res, res_ind))

    # Check replacing with prior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[0, 1] = 0
    this_res[1, 1] = np.nan
    this_res[2, 1] = 0
    approx(gpar._update_inputs(x, x_ind, this_y, f, None), (this_res, res_ind))

    # Check imputation and replacing with prior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.copy()
    this_res[:, 1] = 0
    approx(gpar._update_inputs(x, x_ind, y, f, None), (this_res, res_ind))

    # Construct observations and update result for inducing points.
    obs = Obs(f(np.array([1, 2, 3, 6, 7])), np.array([9, 10, 11, 12, 13]))
    res_ind = np.array([[6, 12], [7, 13]])

    # Check imputation with posterior.
    gpar = GPAR(impute=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[1, 1] = 10
    approx(gpar._update_inputs(x, x_ind, this_y, f, obs), (this_res, res_ind))

    # Check replacing with posterior.
    gpar = GPAR(replace=True, x_ind=x_ind)
    this_y = y.copy()
    this_y[1] = np.nan
    this_res = res.copy()
    this_res[0, 1] = 9
    this_res[1, 1] = np.nan
    this_res[2, 1] = 11
    approx(gpar._update_inputs(x, x_ind, this_y, f, obs), (this_res, res_ind))

    # Check imputation and replacing with posterior.
    gpar = GPAR(impute=True, replace=True, x_ind=x_ind)
    this_res = res.copy()
    this_res[0, 1] = 9
    this_res[1, 1] = 10
    this_res[2, 1] = 11
    approx(gpar._update_inputs(x, x_ind, y, f, obs), (this_res, res_ind))
예제 #29
0
def test_dimensionality():
    m = Measure()
    p1 = GP(EQ(), measure=m)
    p2 = GP(2 * EQ().stretch(2), measure=m)

    k1 = MultiOutputKernel(m, p1, p2)
    k2 = MultiOutputKernel(m, p1, p1)

    assert dimensionality(EQ()) == 1
    assert dimensionality(k1) == 2

    # Test the unpacking of `Wrapped`s and `Join`s.
    assert dimensionality(k1 + k2) == 2
    assert dimensionality(k1 * k2) == 2
    assert dimensionality(k1.periodic(1)) == 2
    assert dimensionality(k1.stretch(2)) == 2

    # Check that dimensionalities must line up.
    with pytest.raises(RuntimeError):
        dimensionality(k1 + EQ())

    # Check `PosteriorKernel`.
    assert dimensionality(PosteriorKernel(EQ(), EQ(), EQ(), None, 0)) == 1
    assert dimensionality(PosteriorKernel(k1, k2, k2, None, 0)) == 2
    assert dimensionality(PosteriorKernel(k1, k2, ADK(EQ()), None, 0)) == 2
    with pytest.raises(RuntimeError):
        assert dimensionality(PosteriorKernel(k1, k2, EQ(), None, 0)) == 2

    # Check `SubspaceKernel`.
    assert dimensionality(SubspaceKernel(EQ(), EQ(), None, 0)) == 1
    assert dimensionality(SubspaceKernel(k1, k2, None, 0)) == 2
    assert dimensionality(SubspaceKernel(k1, ADK(EQ()), None, 0)) == 2
    with pytest.raises(RuntimeError):
        assert dimensionality(SubspaceKernel(k1, EQ(), None, 0)) == 2
예제 #30
0
파일: eq.py 프로젝트: wesselb/gpcm
import lab as B
from stheno import EQ, GP, Delta, Measure

from experiments.experiment import run, setup

args, wd = setup("eq")

# Setup experiment.
n = 881  # Add last one for `linspace`.
noise = 0.1
t = B.linspace(-44, 44, n)
t_plot = B.linspace(44, 44, 500)

# Setup true model and GPCM models.
kernel = EQ()
window = 2
scale = 1
n_u = 40
n_z = 88

# Sample data.
m = Measure()
gp_f = GP(kernel, measure=m)
gp_y = gp_f + GP(noise * Delta(), measure=m)
truth, y = map(B.flatten, m.sample(gp_f(t_plot), gp_y(t)))

# Remove region [-8.8, 8.8].
inds = ~((t >= -8.8) & (t <= 8.8))
t = t[inds]
y = y[inds]