def test_computes_diag_eval(self):
     a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)
     variances = torch.randn(3)
     kernel = WhiteNoiseKernel(variances=variances)
     kernel.eval()
     actual = torch.diag(variances)
     res = kernel(a).evaluate()
     self.assertLess(torch.norm(res - actual), 1e-5)
Esempio n. 2
0
 def test_computes_diag_eval_batch(self):
     a = torch.Tensor([[4, 2, 8], [4, 2, 8]]).view(2, 3, 1)
     variances = torch.randn(2, 3, 1)
     kernel = WhiteNoiseKernel(variances=variances)
     kernel.eval()
     actual = torch.cat((torch.diag(variances[0].squeeze(-1)).unsqueeze(0),
                         torch.diag(variances[1].squeeze(-1)).unsqueeze(0)))
     res = kernel(a).evaluate()
     self.assertLess(torch.norm(res - actual), 1e-5)
 def test_computes_zero_eval(self):
     a = torch.tensor([4, 2, 8], dtype=torch.float).view(3, 1)
     b = torch.tensor([3, 7], dtype=torch.float).view(2, 1)
     variances = torch.randn(3)
     kernel = WhiteNoiseKernel(variances=variances)
     kernel.eval()
     actual_one = torch.zeros(3, 2)
     actual_two = torch.zeros(2, 3)
     res_one = kernel(a, b).evaluate()
     res_two = kernel(b, a).evaluate()
     self.assertLess(torch.norm(res_one - actual_one), 1e-5)
     self.assertLess(torch.norm(res_two - actual_two), 1e-5)
Esempio n. 4
0
 def test_computes_diag_train(self):
     a = torch.Tensor([4, 2, 8]).view(3, 1)
     variances = torch.randn(3)
     kernel = WhiteNoiseKernel(variances=variances)
     actual = torch.diag(variances)
     res = kernel(a).evaluate()
     self.assertLess(torch.norm(res - actual), 1e-5)
Esempio n. 5
0
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1e-5, 1e-5))
     self.base_covar_module = ScaleKernel(RBFKernel(lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1)))
     self.grid_covar_module = GridInterpolationKernel(self.base_covar_module, grid_size=50, num_dims=1)
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(100) * 0.001)
     self.covar_module = self.grid_covar_module + self.noise_covar_module
Esempio n. 6
0
 def __init__(self, train_inputs, train_targets, likelihood):
     super(ExactGPModel, self).__init__(train_inputs, train_targets,
                                        likelihood)
     self.mean_module = ConstantMean(constant_bounds=(-1, 1))
     self.rbf_covar_module = RBFKernel(log_lengthscale_bounds=(-3, 3))
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(11) *
                                                0.001)
     self.covar_module = self.rbf_covar_module + self.noise_covar_module
 def __init__(self, train_inputs, train_targets, likelihood):
     super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
     self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-1, 1))
     self.rbf_covar_module = RBFKernel(
         log_lengthscale_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1, log_transform=True)
     )
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(11) * 0.001)
     self.covar_module = ScaleKernel(self.rbf_covar_module + self.noise_covar_module)
 def __init__(self, train_x, train_y, likelihood):
     super(GPRegressionModel, self).__init__(train_x, train_y, likelihood)
     self.mean_module = ConstantMean(constant_bounds=[-1e-5, 1e-5])
     self.base_covar_module = RBFKernel(log_lengthscale_bounds=(-5, 6))
     self.grid_covar_module = GridInterpolationKernel(
         self.base_covar_module, grid_size=50, grid_bounds=[(0, 1)])
     self.noise_covar_module = WhiteNoiseKernel(variances=torch.ones(100) *
                                                0.001)
     self.covar_module = self.grid_covar_module + self.noise_covar_module
Esempio n. 9
0
    def __init__(self,
                 train_x,
                 train_y,
                 likelihood,
                 var=None,
                 latent=None,
                 kernel_params=None,
                 latent_params=None):
        super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
        if latent_params is None:
            latent_params = {'input_dim': train_x.size(-1)}
        self._set_latent_function(latent, latent_params)

        self.mean_module = ZeroMean()
        ard_num_dims = self.latent_func.embed_dim if self.latent_func.embed_dim is not None else train_x.size(
            -1)

        kernel = kernel_params['type'] if kernel_params is not None else 'rbf'
        if kernel is None or kernel == 'rbf':
            self.kernel_covar_module = ScaleKernel(
                RBFKernel(ard_num_dims=ard_num_dims))
        elif kernel == 'matern':
            self.kernel_covar_module = ScaleKernel(
                MaternKernel(nu=1.5, ard_num_dims=ard_num_dims))
            # without scale kernel: very poor performance
            # matern 0.5, 1.5 and 2.5 all have similar performance
        elif kernel == 'spectral_mixture':
            self.kernel_covar_module = SpectralMixtureKernel(
                num_mixtures=kernel_params['n_mixtures'],
                ard_num_dims=train_x.size(-1))
            self.kernel_covar_module.initialize_from_data(train_x, train_y)
        else:
            raise NotImplementedError

        # set covariance module
        if var is not None:
            self.noise_covar_module = WhiteNoiseKernel(var)
            self.covar_module = self.kernel_covar_module + self.noise_covar_module
        else:
            self.covar_module = self.kernel_covar_module