Exemplo n.º 1
0
 def __init__(self, train_x, train_y, likelihood, lengthscale_constraint,
              outputscale_constraint, ard_dims):
     super(GP, self).__init__(train_x, train_y, likelihood)
     self.ard_dims = ard_dims
     self.mean_module = ConstantMeanGrad()
     base_kernel = RBFKernelGrad(
         lengthscale_constraint=lengthscale_constraint,
         ard_num_dims=ard_dims)
     self.covar_module = ScaleKernel(
         base_kernel, outputscale_constraint=outputscale_constraint)
Exemplo n.º 2
0
 def __init__(self, dim):
     # squeeze output dim before passing train_Y to ExactGP
     # super().__init__(train_X, train_Y.squeeze(-1), GaussianLikelihood())
     # super().__init__(train_X, train_Y, MultitaskGaussianLikelihood(num_tasks=1+train_X.shape[-1]))
     self.likelihood = MultitaskGaussianLikelihood(num_tasks=1 + dim)
     self.mean_module = ConstantMeanGrad()
     base_kernel = RBFKernelGrad(ard_num_dims=dim)
     self.covar_module = ScaleKernel(base_kernel=base_kernel)
     # self.to(train_X)  # make sure we're on the right device/dtype
     self.dim = dim
Exemplo n.º 3
0
    def test_kernel(self, cuda=False):
        a = torch.tensor([[[1, 2], [2, 4]]], dtype=torch.float)
        b = torch.tensor([[[1, 3], [0, 4]]], dtype=torch.float)

        actual = torch.tensor([
            [0.35321, 0, -0.73517, 0.0054977, 0.011443, -0.022886],
            [0, 0.73517, 0, -0.011443, -0.012374, 0.047633],
            [0.73517, 0, -0.79499, 0.022886, 0.047633, -0.083824],
            [0.12476, 0.25967, 0.25967, 0.015565, 0.064793, 0],
            [-0.25967, -0.2808, -0.54047, -0.064793, -0.23732, 0],
            [-0.25967, -0.54047, -0.2808, 0, 0, 0.032396],
        ])

        kernel = RBFKernelGrad()

        if cuda:
            a = a.cuda()
            b = b.cuda()
            actual = actual.cuda()
            kernel = kernel.cuda()

        res = kernel(a, b).evaluate()

        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 4
0
    def test_kernel_batch(self):
        a = torch.tensor([[[1, 2, 3], [2, 4, 0]], [[-1, 1, 2], [2, 1, 4]]],
                         dtype=torch.float)
        b = torch.tensor([[[1, 3, 1]], [[2, -1, 0]]],
                         dtype=torch.float).repeat(1, 2, 1)

        kernel = RBFKernelGrad()
        res = kernel(a, b).evaluate()

        # Compute each batch separately
        actual = torch.zeros(2, 8, 8)
        actual[0, :, :] = kernel(a[0, :, :].squeeze(),
                                 b[0, :, :].squeeze()).evaluate()
        actual[1, :, :] = kernel(a[1, :, :].squeeze(),
                                 b[1, :, :].squeeze()).evaluate()

        self.assertLess(torch.norm(res - actual), 1e-5)
Exemplo n.º 5
0
 def test_initialize_lengthscale_batch(self):
     kernel = RBFKernelGrad(batch_size=2)
     ls_init = torch.tensor([3.14, 4.13])
     kernel.initialize(lengthscale=ls_init)
     actual_value = ls_init.view_as(kernel.lengthscale)
     self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)
Exemplo n.º 6
0
 def test_initialize_lengthscale(self):
     kernel = RBFKernelGrad()
     kernel.initialize(lengthscale=3.14)
     actual_value = torch.tensor(3.14).view_as(kernel.lengthscale)
     self.assertLess(torch.norm(kernel.lengthscale - actual_value), 1e-5)
Exemplo n.º 7
0
 def __init__(self, train_x, train_y, likelihood):
     super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMeanGrad()
     self.base_kernel = RBFKernelGrad(arg_num_dims=3)
     self.covar_module = ScaleKernel(self.base_kernel)
Exemplo n.º 8
0
 def create_kernel_no_ard(self, **kwargs):
     return RBFKernelGrad(**kwargs)
Exemplo n.º 9
0
 def create_kernel_ard(self, num_dims, **kwargs):
     return RBFKernelGrad(ard_num_dims=num_dims, **kwargs)