def test_fixed_noise_gaussian_likelihood(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
         lkhd = FixedNoiseGaussianLikelihood(noise=noise)
         # test basics
         self.assertIsInstance(lkhd.noise_covar, FixedGaussianNoise)
         self.assertTrue(torch.equal(noise, lkhd.noise))
         new_noise = 0.1 + torch.rand(4, device=device, dtype=dtype)
         lkhd.noise = new_noise
         self.assertTrue(torch.equal(lkhd.noise, new_noise))
         # test __call__
         mean = torch.zeros(4, device=device, dtype=dtype)
         covar = DiagLazyTensor(torch.ones(4, device=device, dtype=dtype))
         mvn = MultivariateNormal(mean, covar)
         out = lkhd(mvn)
         self.assertTrue(torch.allclose(out.variance, 1 + new_noise))
         # things should break if dimensions mismatch
         mean = torch.zeros(5, device=device, dtype=dtype)
         covar = DiagLazyTensor(torch.ones(5, device=device, dtype=dtype))
         mvn = MultivariateNormal(mean, covar)
         with self.assertWarns(UserWarning):
             lkhd(mvn)
         # test __call__ w/ observation noise
         obs_noise = 0.1 + torch.rand(5, device=device, dtype=dtype)
         out = lkhd(mvn, noise=obs_noise)
         self.assertTrue(torch.allclose(out.variance, 1 + obs_noise))
Exemplo n.º 2
0
    def test_kissgp_gp_fast_pred_var(self):
        with gpytorch.settings.fast_pred_var(), gpytorch.settings.debug(False):
            train_x, train_y, test_x, test_y = make_data()
            likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) * 0.001)
            gp_model = GPRegressionModel(train_x, train_y, likelihood)
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)

            # Optimize the model
            gp_model.train()
            likelihood.train()

            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            for _ in range(25):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Test the model
            gp_model.eval()
            likelihood.eval()
            # Set the cache
            test_function_predictions = likelihood(gp_model(train_x))

            # Now bump up the likelihood to something huge
            # This will make it easy to calculate the variance
            likelihood.noise = torch.ones(100) * 3.0
            test_function_predictions = likelihood(gp_model(train_x))

            noise = likelihood.noise
            var_diff = (test_function_predictions.variance - noise).abs()
            self.assertLess(torch.max(var_diff / noise), 0.05)