Beispiel #1
0
def test_inference_deepGP():
    gp1 = GPRegression(
        X, None,
        RBF(input_dim=3,
            variance=torch.tensor(3.),
            lengthscale=torch.tensor(2.)))
    Z, _ = gp1.model()
    gp2 = VariationalSparseGP(Z, y2D, Matern32(input_dim=3), Z.clone(),
                              Gaussian(torch.tensor(1e-6)))

    class DeepGP(torch.nn.Module):
        def __init__(self, gp1, gp2):
            super(DeepGP, self).__init__()
            self.gp1 = gp1
            self.gp2 = gp2

        def model(self):
            Z, _ = self.gp1.model()
            self.gp2.set_data(Z, y2D)
            self.gp2.model()

        def guide(self):
            self.gp1.guide()
            self.gp2.guide()

    deepgp = DeepGP(gp1, gp2)
    train(deepgp, num_steps=1)
Beispiel #2
0
def test_inference_deepGP():
    gp1 = GPRegression(X, None, kernel, name="GPR1")
    Z, _ = gp1.model()
    gp2 = VariationalSparseGP(Z,
                              y2D,
                              Matern32(input_dim=3),
                              Z.clone(),
                              likelihood,
                              name="GPR2")

    def model():
        Z, _ = gp1.model()
        gp2.set_data(Z, y2D)
        gp2.model()

    def guide():
        gp1.guide()
        gp2.guide()

    svi = SVI(model, guide, optim.Adam({}), Trace_ELBO())
    svi.step()
Beispiel #3
0
def test_active_dims_disjoint_ok():
    k1 = Matern52(2, variance, lengthscale[0], active_dims=[0, 1])
    k2 = Matern32(1, variance, lengthscale[0], active_dims=[2])
    Sum(k1, k2)
Beispiel #4
0
def test_active_dims_overlap_ok():
    k1 = Matern52(2, variance, lengthscale[0], active_dims=[0, 1])
    k2 = Matern32(2, variance, lengthscale[0], active_dims=[1, 2])
    Sum(k1, k2)
Beispiel #5
0
lengthscale = torch.tensor([2.0, 1.0, 2.0])
X = torch.tensor([[1.0, 0.0, 1.0], [2.0, 1.0, 3.0]])
Z = torch.tensor([[4.0, 5.0, 6.0], [3.0, 1.0, 7.0], [3.0, 1.0, 2.0]])

TEST_CASES = [
    T(Constant(3, variance), X=X, Z=Z, K_sum=18),
    T(
        Brownian(1, variance),
        # only work on 1D input
        X=X[:, 0],
        Z=Z[:, 0],
        K_sum=27),
    T(Cosine(3, variance, lengthscale), X=X, Z=Z, K_sum=-0.193233),
    T(Linear(3, variance), X=X, Z=Z, K_sum=291),
    T(Exponential(3, variance, lengthscale), X=X, Z=Z, K_sum=2.685679),
    T(Matern32(3, variance, lengthscale), X=X, Z=Z, K_sum=3.229314),
    T(Matern52(3, variance, lengthscale), X=X, Z=Z, K_sum=3.391847),
    T(Periodic(3, variance, lengthscale, period=torch.ones(1)),
      X=X,
      Z=Z,
      K_sum=18),
    T(Polynomial(3, variance, degree=2), X=X, Z=Z, K_sum=7017),
    T(RationalQuadratic(3, variance, lengthscale, scale_mixture=torch.ones(1)),
      X=X,
      Z=Z,
      K_sum=5.684670),
    T(RBF(3, variance, lengthscale), X=X, Z=Z, K_sum=3.681117),
    T(WhiteNoise(3, variance, lengthscale), X=X, Z=Z, K_sum=0),
    T(WhiteNoise(3, variance, lengthscale), X=X, Z=None, K_sum=6),
    T(
        Coregionalize(3, components=torch.eye(3, 3)),
Beispiel #6
0
     X=X[:, 0], Z=Z[:, 0], K_sum=27
 ),
 T(
     Cosine(3, variance, lengthscale),
     X=X, Z=Z, K_sum=-0.193233
 ),
 T(
     Linear(3, variance),
     X=X, Z=Z, K_sum=291
 ),
 T(
     Exponential(3, variance, lengthscale),
     X=X, Z=Z, K_sum=2.685679
 ),
 T(
     Matern32(3, variance, lengthscale),
     X=X, Z=Z, K_sum=3.229314
 ),
 T(
     Matern52(3, variance, lengthscale),
     X=X, Z=Z, K_sum=3.391847
 ),
 T(
     Periodic(3, variance, lengthscale, period=torch.ones(1)),
     X=X, Z=Z, K_sum=18
 ),
 T(
     Polynomial(3, variance, degree=2),
     X=X, Z=Z, K_sum=7017
 ),
 T(