Exemplo n.º 1
0
 def test_posterior_with_exact_computations_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             with gpytorch.settings.fast_computations(
                     covar_root_decomposition=False, log_prob=False):
                 self.test_posterior_latent_gp_and_likelihood_with_optimization(
                     cuda=True)
Exemplo n.º 2
0
    def test_sgpr_mean_abs_error_cuda(self):
        # Suppress numerical warnings
        warnings.simplefilter("ignore", NumericalWarning)

        if not torch.cuda.is_available():
            return

        with least_used_cuda_device():
            self.test_sgpr_mean_abs_error(cuda=True)
Exemplo n.º 3
0
    def test_sgpr_mean_abs_error_cuda(self):
        # Suppress numerical warnings
        warnings.simplefilter("ignore", NumericalWarning)

        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y, test_x, test_y = make_data(cuda=True)
            likelihood = GaussianLikelihood().cuda()
            gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)

            # Optimize the model
            gp_model.train()
            likelihood.train()

            optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
            optimizer.n_iter = 0
            for _ in range(25):
                optimizer.zero_grad()
                output = gp_model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in gp_model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Test the model
            gp_model.eval()
            likelihood.eval()
            test_preds = likelihood(gp_model(test_x)).mean
            mean_abs_error = torch.mean(torch.abs(test_y - test_preds))

            self.assertLess(mean_abs_error.squeeze().item(), 0.02)

            # Test variances
            test_vars = likelihood(gp_model(test_x)).variance
            self.assertAllClose(
                test_vars,
                likelihood(gp_model(test_x)).covariance_matrix.diagonal(
                    dim1=-1, dim2=-2))
            self.assertGreater(test_vars.min().item() + 0.05,
                               likelihood.noise.item())
            self.assertLess(
                test_vars.max().item() - 0.05,
                likelihood.noise.item() +
                gp_model.covar_module.base_kernel.outputscale.item())
    def test_regression_error_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y = train_data(cuda=True)
            likelihood = GaussianLikelihood().cuda()
            inducing_points = torch.linspace(0, 1,
                                             25).unsqueeze(-1).repeat(2, 1, 1)
            model = SVGPRegressionModel(inducing_points).cuda()
            mll = gpytorch.mlls.VariationalELBO(likelihood,
                                                model,
                                                num_data=train_y.size(-1))

            # Find optimal model hyperparameters
            model.train()
            likelihood.train()
            optimizer = optim.Adam([{
                "params": model.parameters()
            }, {
                "params": likelihood.parameters()
            }],
                                   lr=0.01)
            for _ in range(150):
                optimizer.zero_grad()
                output = model(train_x)
                loss = -mll(output, train_y)
                loss = loss.sum()
                loss.backward()
                optimizer.step()

            for param in model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Set back to eval mode
            model.eval()
            likelihood.eval()
            test_preds = likelihood(model(train_x)).mean.squeeze()
            mean_abs_error = torch.mean(
                torch.abs(train_y[0, :] - test_preds[0, :]) / 2)
            mean_abs_error2 = torch.mean(
                torch.abs(train_y[1, :] - test_preds[1, :]) / 2)
            self.assertLess(mean_abs_error.item(), 1e-1)
            self.assertLess(mean_abs_error2.item(), 1e-1)
Exemplo n.º 5
0
    def test_kissgp_gp_mean_abs_error_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y, test_x, test_y = make_data(cuda=True)
            likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) *
                                                      0.001).cuda()
            gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)

            # Optimize the model
            gp_model.train()
            likelihood.train()

            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            with gpytorch.settings.debug(False):
                for _ in range(25):
                    optimizer.zero_grad()
                    output = gp_model(train_x)
                    loss = -mll(output, train_y)
                    loss.backward()
                    optimizer.n_iter += 1
                    optimizer.step()

                for param in gp_model.parameters():
                    self.assertTrue(param.grad is not None)
                    self.assertGreater(param.grad.norm().item(), 0)
                for param in likelihood.parameters():
                    self.assertTrue(param.grad is not None)
                    self.assertGreater(param.grad.norm().item(), 0)

                # Test the model
                gp_model.eval()
                likelihood.eval()
                test_preds = likelihood(gp_model(test_x)).mean
                mean_abs_error = torch.mean(torch.abs(test_y - test_preds))

            self.assertLess(mean_abs_error.squeeze().item(), 0.02)
Exemplo n.º 6
0
    def test_gp_posterior_mean_skip_variances_slow_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, test_x, train_y, _ = self._get_data(cuda=True)
            likelihood = GaussianLikelihood()
            gp_model = ExactGPModel(train_x, train_y, likelihood)

            gp_model.cuda()
            likelihood.cuda()

            # Compute posterior distribution
            gp_model.eval()
            likelihood.eval()

            with gpytorch.settings.fast_pred_var(False):
                with gpytorch.settings.skip_posterior_variances(True):
                    mean_skip_var = gp_model(test_x).mean
                mean = gp_model(test_x).mean
                likelihood_mean = likelihood(gp_model(test_x)).mean
            self.assertTrue(torch.allclose(mean_skip_var, mean))
            self.assertTrue(torch.allclose(mean_skip_var, likelihood_mean))
Exemplo n.º 7
0
 def test_lkj_covariance_prior_batch_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_lkj_covariance_prior_batch_log_prob(cuda=True)
Exemplo n.º 8
0
 def test_smoothed_box_prior_batch_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_smoothed_box_prior_batch_log_prob(cuda=True)
Exemplo n.º 9
0
 def test_grid_gp_mean_abs_error_2d_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_grid_gp_mean_abs_error(cuda=True, num_dim=2)
 def test_multivariate_normal_prior_log_prob_log_transform_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_multivariate_normal_prior_log_prob_log_transform(
                 cuda=True)
Exemplo n.º 11
0
 def test_regression_error_skip_logdet_forward_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_regression_error(skip_logdet_forward=True, cuda=True)
Exemplo n.º 12
0
 def test_gauss_hermite_quadrature_1D_normal_batch_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_gauss_hermite_quadrature_1D_normal_nonbatch(
                 cuda=True)
Exemplo n.º 13
0
 def test_prior_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_prior(cuda=True)
Exemplo n.º 14
0
 def test_fantasy_updates_batch_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_fantasy_updates_batch(cuda=True)
Exemplo n.º 15
0
 def test_lkj_cholesky_factor_prior_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_lkj_cholesky_factor_prior_log_prob(cuda=True)
Exemplo n.º 16
0
 def test_simple_model_list_gp_regression_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_simple_model_list_gp_regression(cuda=True)
Exemplo n.º 17
0
 def test_psd_safe_cholesky_pd_cuda(self, cuda=False):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_psd_safe_cholesky_pd(cuda=True)
Exemplo n.º 18
0
 def test_normal_prior_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_normal_prior_log_prob(cuda=True)
Exemplo n.º 19
0
 def test_kl_divergence_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_kl_divergence(cuda=True)
Exemplo n.º 20
0
 def test_multivariate_normal_non_lazy_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multivariate_normal_non_lazy(cuda=True)
Exemplo n.º 21
0
 def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_posterior_latent_gp_and_likelihood_with_optimization(
                 cuda=True)
 def test_multivariate_normal_batch_correlated_samples_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multivariate_normal_batch_correlated_samples(
                 cuda=True)
Exemplo n.º 23
0
 def test_posterior_latent_gp_and_likelihood_fast_pred_var_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_posterior_latent_gp_and_likelihood_fast_pred_var(
                 cuda=True)
 def test_from_independent_mvns_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_from_independent_mvns(cuda=True)
Exemplo n.º 25
0
 def test_regression_error_cuda(self):
     if not torch.cuda.is_available():
         return
     with least_used_cuda_device():
         return self.test_regression_error(cuda=True)
 def test_multitask_multivariate_normal_exceptions_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multitask_multivariate_normal_exceptions(cuda=True)
 def test_multitask_gp_mean_abs_error_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multitask_gp_mean_abs_error(cuda=True)
Exemplo n.º 28
0
 def test_half_cauchy_prior_log_prob_log_transform_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_half_cauchy_prior_log_prob_log_transform(
                 cuda=True)