def test_convert_from_projection_kernel(self):
        Ws = [torch.eye(2, 2) for _ in range(3)]
        bs = [torch.zeros(2) for _ in range(3)]
        kernel = PolynomialProjectionKernel(3,
                                            2,
                                            2,
                                            RBFKernel,
                                            Ws,
                                            bs,
                                            learn_proj=False,
                                            weighted=True)
        likelihood = gpytorch.likelihoods.GaussianLikelihood()
        model = ExactGPModel(more_fake_data, more_fake_target, likelihood,
                             kernel)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)

        optimizer_ = torch.optim.Adam(model.parameters(), lr=0.01)
        optimizer_.zero_grad()
        output = model(more_fake_data)
        loss = -mll(output, more_fake_target)
        loss.backward()
        optimizer_.step()

        add_model, projection = convert_rp_model_to_additive_model(model, True)
        z = projection(even_more_fake_data)
        add_model.eval()
        predictions = add_model(z)
        model.eval()
        expected_predictions = model(even_more_fake_data)
        self.assertListEqual(predictions.mean.tolist(),
                             expected_predictions.mean.tolist())
    def test_fit_learn_proj(self):
        kernel = PolynomialProjectionKernel(self.J,
                                            self.k,
                                            self.d,
                                            self.base_kernel,
                                            self.Ws,
                                            self.bs,
                                            activation=None,
                                            learn_proj=True,
                                            weighted=False)
        likelihood = gpytorch.likelihoods.GaussianLikelihood()
        model = ExactGPModel(real_data, real_target, likelihood, kernel)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
        self.assertAlmostEqual(kernel.kernel.kernels[0].outputscale.item(),
                               1 / self.J)
        self.assertAlmostEqual(kernel.projection_module.weight[0, 0], 1)
        self.assertAlmostEqual(kernel.projection_module.weight[0, 1], 0)
        old_val = float(kernel.projection_module.weight[0, 0].item())
        model.train()

        optimizer_ = torch.optim.Adam(model.parameters(), lr=0.01)
        for _ in range(10):
            optimizer_.zero_grad()
            output = model(real_data)
            loss = -mll(output, real_target)
            loss.backward()
            optimizer_.step()
        self.assertNotEqual(
            kernel.kernel.kernels[0].base_kernel.kernels[0].raw_lengthscale, 0)
        self.assertAlmostEqual(kernel.kernel.kernels[0].outputscale.item(),
                               1 / self.J)
        self.assertNotEqual(kernel.projection_module.weight[0, 0].item(),
                            old_val)
        self.assertNotAlmostEqual(kernel.projection_module.weight[0, 1].item(),
                                  0)
Exemplo n.º 3
0
def create_gam_model(data, y):
    n, d = data.shape
    kernel = ScaleKernel(
        create_strictly_additive_kernel(d, False, 'RBF',
                                        memory_efficient=True))
    model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
    return model
Exemplo n.º 4
0
def create_rp_model(data, y, proj_ratio=1):
    n, d = data.shape
    kernel = ScaleKernel(
        RPPolyKernel(round(proj_ratio * d),
                     1,
                     d,
                     MaternKernel,
                     nu=2.5,
                     weighted=True,
                     space_proj=True))
    model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
    return model
Exemplo n.º 5
0
def create_dpa_gp_ard_model(data, y, J):
    n, d = data.shape
    kernel = ScaleKernel(
        create_additive_rp_kernel(d,
                                  J,
                                  learn_proj=False,
                                  kernel_type='RBF',
                                  space_proj=True,
                                  prescale=True,
                                  batch_kernel=False,
                                  ard=True,
                                  proj_dist='sphere',
                                  mem_efficient=True))
    model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
    return model
Exemplo n.º 6
0
def create_poly_rp_model(data, y, J, k):
    n, d = data.shape
    kernel = ScaleKernel(
        RPPolyKernel(J, k, d, RBFKernel, weighted=True, space_proj=True))
    model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
    return model
Exemplo n.º 7
0
def create_bl_model(data, y):
    kernel = ScaleKernel(MaternKernel())
    model = ExactGPModel(data, y, GaussianLikelihood(), kernel)
    return model
    def test_gradients(self):
        x = torch.tensor([[1., 2., 3.], [1.1, 2.2, 3.3]])
        y = torch.sin(x).sum(dim=1)
        kbase = RBFKernel()
        kbase.initialize(lengthscale=torch.tensor([1.]))
        base_kernel = AdditiveStructureKernel(kbase, 3)
        proj_module = torch.nn.Linear(3, 3, bias=False)
        proj_module.weight.data = torch.eye(3, dtype=torch.float)
        proj_kernel = ScaledProjectionKernel(proj_module,
                                             base_kernel,
                                             prescale=True,
                                             ard_num_dims=3)
        proj_kernel.initialize(lengthscale=torch.tensor([1., 2., 3.]))

        model = ExactGPModel(x, y, gpytorch.likelihoods.GaussianLikelihood(),
                             proj_kernel)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model)
        optimizer_ = torch.optim.Adam(model.parameters(), lr=0.1)
        optimizer_.zero_grad()

        pred = model(x)
        loss = -mll(pred, y)
        loss.backward()

        optimizer_.step()

        np.testing.assert_allclose(
            proj_kernel.base_kernel.base_kernel.lengthscale.numpy(),
            torch.tensor([[1.]]).numpy())
        np.testing.assert_allclose(
            proj_kernel.projection_module.weight.numpy(),
            torch.eye(3, dtype=torch.float).numpy())
        self.assertFalse(
            np.allclose(proj_kernel.lengthscale.detach().numpy(),
                        torch.tensor([1., 2., 3.]).numpy()))

        proj_module = torch.nn.Linear(3, 3, bias=False)
        proj_module.weight.data = torch.eye(3, dtype=torch.float)
        proj_kernel2 = ScaledProjectionKernel(proj_module,
                                              base_kernel,
                                              prescale=True,
                                              ard_num_dims=3,
                                              learn_proj=True)

        proj_kernel2.initialize(lengthscale=torch.tensor([1., 2., 3.]))

        model = ExactGPModel(x, y, gpytorch.likelihoods.GaussianLikelihood(),
                             proj_kernel2)
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(model.likelihood, model)
        optimizer_ = torch.optim.Adam(model.parameters(), lr=0.1)
        optimizer_.zero_grad()

        pred = model(x)
        loss = -mll(pred, y)
        loss.backward()

        optimizer_.step()

        np.testing.assert_allclose(
            proj_kernel2.base_kernel.base_kernel.lengthscale.numpy(),
            torch.tensor([[1.]]).numpy())
        self.assertFalse(
            np.allclose(proj_kernel2.projection_module.weight.detach().numpy(),
                        torch.eye(3, dtype=torch.float).numpy()))
        self.assertFalse(
            np.allclose(proj_kernel2.lengthscale.detach().numpy(),
                        torch.tensor([1., 2., 3.]).numpy()))