예제 #1
0
    def test_ard(self):
        a = torch.tensor([[[1, 2], [2, 4]]], dtype=torch.float).repeat(2, 1, 1)
        b = torch.tensor([[[1, 3], [0, 4]]], dtype=torch.float).repeat(2, 1, 1)
        lengthscales = torch.tensor([1, 2], dtype=torch.float).view(1, 1, 2)

        base_kernel = RBFKernel(ard_num_dims=2)
        base_kernel.initialize(lengthscale=lengthscales)
        kernel = ScaleKernel(base_kernel)
        kernel.initialize(outputscale=torch.tensor([3], dtype=torch.float))
        kernel.eval()

        scaled_a = a.div(lengthscales)
        scaled_b = b.div(lengthscales)
        actual = (scaled_a.unsqueeze(-2) - scaled_b.unsqueeze(-3)).pow(2).sum(dim=-1).mul_(-0.5).exp()
        actual.mul_(3)
        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # Diag
        res = kernel(a, b).diag()
        actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims
        actual = scaled_a.transpose(-1, -2).unsqueeze(-1) - scaled_b.transpose(-1, -2).unsqueeze(-2)
        actual = actual.pow(2).mul_(-0.5).exp().view(4, 2, 2)
        actual.mul_(3)
        res = kernel(a, b, batch_dims=(0, 2)).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims and diag
        res = kernel(a, b, batch_dims=(0, 2)).diag()
        actual = torch.cat([actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
        self.assertLess(torch.norm(res - actual), 1e-5)
예제 #2
0
 def test_inherit_active_dims(self):
     lengthscales = torch.tensor([1, 1], dtype=torch.float)
     base_kernel = RBFKernel(active_dims=(1, 2), ard_num_dims=2)
     base_kernel.initialize(lengthscale=lengthscales)
     kernel = ScaleKernel(base_kernel)
     kernel.initialize(outputscale=torch.tensor([3], dtype=torch.float))
     kernel.eval()
     self.assertTrue(
         torch.all(kernel.active_dims == base_kernel.active_dims))
def create_multi_full_kernel(d, J, init_mixin_range=(1.0, 1.0), **kwargs):
    """Helper to create a sum of full kernels with the options in **kwargs."""
    outputscales = _sample_from_range(J, init_mixin_range)
    total = sum(outputscales)
    outputscales = [o / total for o in outputscales]

    subkernels = []
    for j in range(0,J):
        new_kernel = ScaleKernel(create_full_kernel(d, **kwargs))
        new_kernel.initialize(outputscale=outputscales[j])
        subkernels.append(new_kernel)
    return gpytorch.kernels.AdditiveKernel(*subkernels)
    def make_kernel(active_dim=None):
        kernel = _map_to_kernel(True, kernel_type, keops, active_dims=active_dim)

        if hasattr(kernel, 'period_length'):
            kernel.initialize(period_length=torch.tensor([1.]))
        else:
            kernel.initialize(lengthscale=torch.tensor([1.]))
        kernel = ScaleKernel(kernel)
        kernel.initialize(outputscale=torch.tensor([1/J]))
        if ski:
            kernel = gpytorch.kernels.GridInterpolationKernel(kernel, **ski_options)
        return kernel
    def test_forward(self):
        gam_kernel = MemoryEfficientGamKernel()
        x = torch.tensor([[1., 2., 3.], [1.1, 2.2, 3.3]])
        K = gam_kernel(x, x).evaluate()

        k = ScaleKernel(RBFKernel())
        k.initialize(outputscale=1.)
        as_kernel = AdditiveStructureKernel(k, 2)
        K2 = as_kernel(x, x).evaluate()

        np.testing.assert_allclose(K.detach().numpy(),
                                   K2.detach().numpy(),
                                   atol=1e-6)
예제 #6
0
    def test_forward_batch_mode(self):
        a = torch.Tensor([4, 2, 8]).view(1, 3, 1).repeat(4, 1, 1)
        b = torch.Tensor([0, 2]).view(1, 2, 1).repeat(4, 1, 1)
        lengthscale = 2

        base_kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
        kernel = ScaleKernel(base_kernel, batch_size=4)
        kernel.initialize(log_outputscale=torch.Tensor([1, 2, 3, 4]).log())
        kernel.eval()

        base_actual = torch.Tensor([[16, 4], [4, 0], [64, 36]]).mul_(-0.5).div_(lengthscale ** 2).exp()
        actual = base_actual.unsqueeze(0).mul(torch.Tensor([1, 2, 3, 4]).view(4, 1, 1))
        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)
예제 #7
0
    def test_forward(self):
        a = torch.Tensor([4, 2, 8]).view(3, 1)
        b = torch.Tensor([0, 2]).view(2, 1)
        lengthscale = 2

        base_kernel = RBFKernel().initialize(log_lengthscale=math.log(lengthscale))
        kernel = ScaleKernel(base_kernel)
        kernel.initialize(log_outputscale=torch.Tensor([3]).log())
        kernel.eval()

        actual = torch.Tensor([[16, 4], [4, 0], [64, 36]]).mul_(-0.5).div_(lengthscale ** 2).exp()
        actual = actual * 3
        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)
    def test_degree1(self):
        AddK = NewtonGirardAdditiveKernel(RBFKernel(ard_num_dims=3), 3, 1)
        self.assertEqual(AddK.base_kernel.lengthscale.numel(), 3)
        self.assertEqual(AddK.outputscale.numel(), 1)

        testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
        add_k_val = AddK(testvals, testvals).evaluate()

        manual_k = ScaleKernel(AdditiveKernel(RBFKernel(active_dims=0),
                                              RBFKernel(active_dims=1),
                                              RBFKernel(active_dims=2)))
        manual_k.initialize(outputscale=1.)
        manual_add_k_val = manual_k(testvals, testvals).evaluate()

        # np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
        self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
    def test_ard(self):
        base_k = RBFKernel(ard_num_dims=3)
        base_k.initialize(lengthscale=[1., 2., 3.])
        AddK = NewtonGirardAdditiveKernel(base_k, 3, max_degree=1)

        testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
        add_k_val = AddK(testvals, testvals).evaluate()

        ks = []
        for i in range(3):
            k = RBFKernel(active_dims=i)
            k.initialize(lengthscale=i + 1)
            ks.append(k)
        manual_k = ScaleKernel(AdditiveKernel(*ks))
        manual_k.initialize(outputscale=1.)
        manual_add_k_val = manual_k(testvals, testvals).evaluate()

        # np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
        self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
예제 #10
0
    def test_ard_batch(self):
        a = torch.tensor([[[1, 2, 3], [2, 4, 0]], [[-1, 1, 2], [2, 1, 4]]],
                         dtype=torch.float)
        b = torch.tensor([[[1, 3, 1]], [[2, -1, 0]]],
                         dtype=torch.float).repeat(1, 2, 1)
        lengthscales = torch.tensor([[[1, 2, 1]]], dtype=torch.float)

        base_kernel = RBFKernel(batch_shape=torch.Size([2]), ard_num_dims=3)
        base_kernel.initialize(lengthscale=lengthscales)
        kernel = ScaleKernel(base_kernel, batch_shape=torch.Size([2]))
        kernel.initialize(outputscale=torch.tensor([1, 2], dtype=torch.float))
        kernel.eval()

        scaled_a = a.div(lengthscales)
        scaled_b = b.div(lengthscales)
        actual = (scaled_a.unsqueeze(-2) -
                  scaled_b.unsqueeze(-3)).pow(2).sum(dim=-1).mul_(-0.5).exp()
        actual[1].mul_(2)
        res = kernel(a, b).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # diag
        res = kernel(a, b).diag()
        actual = torch.cat(
            [actual[i].diag().unsqueeze(0) for i in range(actual.size(0))])
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims
        double_batch_a = scaled_a.transpose(-1, -2)
        double_batch_b = scaled_b.transpose(-1, -2)
        actual = double_batch_a.unsqueeze(-1) - double_batch_b.unsqueeze(-2)
        actual = actual.pow(2).mul_(-0.5).exp()
        actual[1, :, :, :].mul_(2)
        res = kernel(a, b, last_dim_is_batch=True).evaluate()
        self.assertLess(torch.norm(res - actual), 1e-5)

        # batch_dims and diag
        res = kernel(a, b, last_dim_is_batch=True).diag()
        actual = actual.diagonal(dim1=-2, dim2=-1)
        self.assertLess(torch.norm(res - actual), 1e-5)
    def test_degree3(self):
        # just make sure it doesn't break here.
        AddK = NewtonGirardAdditiveKernel(RBFKernel(ard_num_dims=3), 3, 3)
        self.assertEqual(AddK.base_kernel.lengthscale.numel(), 3)
        self.assertEqual(AddK.outputscale.numel(), 3)

        testvals = torch.tensor([[1, 2, 3], [7, 5, 2]], dtype=torch.float)
        add_k_val = AddK(testvals, testvals).evaluate()

        manual_k1 = ScaleKernel(
            AdditiveKernel(RBFKernel(active_dims=0), RBFKernel(active_dims=1),
                           RBFKernel(active_dims=2)))
        manual_k1.initialize(outputscale=1 / 3)
        manual_k2 = ScaleKernel(
            AdditiveKernel(RBFKernel(active_dims=[0, 1]),
                           RBFKernel(active_dims=[1, 2]),
                           RBFKernel(active_dims=[0, 2])))
        manual_k2.initialize(outputscale=1 / 3)

        manual_k3 = ScaleKernel(AdditiveKernel(RBFKernel()))
        manual_k3.initialize(outputscale=1 / 3)
        manual_k = AdditiveKernel(manual_k1, manual_k2, manual_k3)
        manual_add_k_val = manual_k(testvals, testvals).evaluate()
        # np.testing.assert_allclose(add_k_val.detach().numpy(), manual_add_k_val.detach().numpy(), atol=1e-5)
        self.assertTrue(torch.allclose(add_k_val, manual_add_k_val, atol=1e-5))
예제 #12
0
 def test_initialize_outputscale_batch(self):
     kernel = ScaleKernel(RBFKernel(), batch_size=2)
     ls_init = torch.tensor([3.14, 4.13])
     kernel.initialize(outputscale=ls_init)
     actual_value = ls_init.view_as(kernel.outputscale)
     self.assertLess(torch.norm(kernel.outputscale - actual_value), 1e-5)
예제 #13
0
 def test_initialize_outputscale(self):
     kernel = ScaleKernel(RBFKernel())
     kernel.initialize(outputscale=3.14)
     actual_value = torch.tensor(3.14).view_as(kernel.outputscale)
     self.assertLess(torch.norm(kernel.outputscale - actual_value), 1e-5)