コード例 #1
0
    def test_classification_error_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y = train_data(cuda=True)
            likelihood = BernoulliLikelihood().cuda()
            model = GPClassificationModel(train_x).cuda()
            mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=len(train_y))

            # Find optimal model hyperparameters
            model.train()
            optimizer = optim.Adam(model.parameters(), lr=0.1)
            optimizer.n_iter = 0
            for _ in range(75):
                optimizer.zero_grad()
                output = model(train_x)
                loss = -mll(output, train_y)
                loss.backward()
                optimizer.n_iter += 1
                optimizer.step()

            for param in model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            optimizer.step()

            # Set back to eval mode
            model.eval()
            test_preds = likelihood(model(train_x)).mean.round()
            mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2)
            self.assertLess(mean_abs_error.item(), 1e-5)
コード例 #2
0
 def test_posterior_with_exact_computations_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             with gpytorch.settings.fast_computations(
                     covar_root_decomposition=False, log_prob=False):
                 self.test_posterior_latent_gp_and_likelihood_with_optimization(
                     cuda=True)
コード例 #3
0
    def test_regression_error_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y = train_data(cuda=True)
            likelihood = GaussianLikelihood().cuda()
            inducing_points = torch.linspace(0, 1,
                                             25).unsqueeze(-1).repeat(2, 1, 1)
            model = SVGPRegressionModel(inducing_points).cuda()
            mll = gpytorch.mlls.VariationalELBO(likelihood,
                                                model,
                                                num_data=train_y.size(-1))

            # Find optimal model hyperparameters
            model.train()
            likelihood.train()
            optimizer = optim.Adam([{
                "params": model.parameters()
            }, {
                "params": likelihood.parameters()
            }],
                                   lr=0.01)
            for _ in range(150):
                optimizer.zero_grad()
                output = model(train_x)
                loss = -mll(output, train_y)
                loss = loss.sum()
                loss.backward()
                optimizer.step()

            for param in model.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)
            for param in likelihood.parameters():
                self.assertTrue(param.grad is not None)
                self.assertGreater(param.grad.norm().item(), 0)

            # Set back to eval mode
            model.eval()
            likelihood.eval()
            test_preds = likelihood(model(train_x)).mean.squeeze()
            mean_abs_error = torch.mean(
                torch.abs(train_y[0, :] - test_preds[0, :]) / 2)
            mean_abs_error2 = torch.mean(
                torch.abs(train_y[1, :] - test_preds[1, :]) / 2)
            self.assertLess(mean_abs_error.item(), 1e-1)
            self.assertLess(mean_abs_error2.item(), 1e-1)
コード例 #4
0
    def test_kissgp_gp_mean_abs_error_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, train_y, test_x, test_y = make_data(cuda=True)
            likelihood = FixedNoiseGaussianLikelihood(torch.ones(100) *
                                                      0.001).cuda()
            gp_model = GPRegressionModel(train_x, train_y, likelihood).cuda()
            mll = gpytorch.mlls.ExactMarginalLogLikelihood(
                likelihood, gp_model)

            # Optimize the model
            gp_model.train()
            likelihood.train()

            optimizer = optim.Adam(list(gp_model.parameters()) +
                                   list(likelihood.parameters()),
                                   lr=0.1)
            optimizer.n_iter = 0
            with gpytorch.settings.debug(False):
                for _ in range(25):
                    optimizer.zero_grad()
                    output = gp_model(train_x)
                    loss = -mll(output, train_y)
                    loss.backward()
                    optimizer.n_iter += 1
                    optimizer.step()

                for param in gp_model.parameters():
                    self.assertTrue(param.grad is not None)
                    self.assertGreater(param.grad.norm().item(), 0)
                for param in likelihood.parameters():
                    self.assertTrue(param.grad is not None)
                    self.assertGreater(param.grad.norm().item(), 0)

                # Test the model
                gp_model.eval()
                likelihood.eval()
                test_preds = likelihood(gp_model(test_x)).mean
                mean_abs_error = torch.mean(torch.abs(test_y - test_preds))

            self.assertLess(mean_abs_error.squeeze().item(), 0.02)
コード例 #5
0
    def test_gp_posterior_mean_skip_variances_slow_cuda(self):
        if not torch.cuda.is_available():
            return
        with least_used_cuda_device():
            train_x, test_x, train_y, _ = self._get_data(cuda=True)
            likelihood = GaussianLikelihood()
            gp_model = ExactGPModel(train_x, train_y, likelihood)

            gp_model.cuda()
            likelihood.cuda()

            # Compute posterior distribution
            gp_model.eval()
            likelihood.eval()

            with gpytorch.settings.fast_pred_var(False):
                with gpytorch.settings.skip_posterior_variances(True):
                    mean_skip_var = gp_model(test_x).mean
                mean = gp_model(test_x).mean
                likelihood_mean = likelihood(gp_model(test_x)).mean
            self.assertTrue(torch.allclose(mean_skip_var, mean))
            self.assertTrue(torch.allclose(mean_skip_var, likelihood_mean))
コード例 #6
0
ファイル: test_quadrature.py プロジェクト: xz6014/gpytorch
 def test_gauss_hermite_quadrature_1D_normal_batch_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_gauss_hermite_quadrature_1D_normal_nonbatch(
                 cuda=True)
コード例 #7
0
 def test_multitask_gp_mean_abs_error_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multitask_gp_mean_abs_error(cuda=True)
コード例 #8
0
 def test_simple_model_list_gp_regression_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_simple_model_list_gp_regression(cuda=True)
コード例 #9
0
 def test_regression_error_skip_logdet_forward_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_regression_error(skip_logdet_forward=True, cuda=True)
コード例 #10
0
 def test_lkj_covariance_prior_batch_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_lkj_covariance_prior_batch_log_prob(cuda=True)
コード例 #11
0
 def test_posterior_latent_gp_and_likelihood_with_optimization_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_posterior_latent_gp_and_likelihood_with_optimization(
                 cuda=True)
コード例 #12
0
 def test_kl_divergence_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_kl_divergence(cuda=True)
コード例 #13
0
 def test_multivariate_normal_batch_non_lazy_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multivariate_normal_batch_non_lazy(cuda=True)
コード例 #14
0
 def test_gamma_prior_log_prob_log_transform_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_gamma_prior_log_prob_log_transform(cuda=True)
コード例 #15
0
 def test_regression_error_cuda(self):
     if not torch.cuda.is_available():
         return
     with least_used_cuda_device():
         return self.test_regression_error(cuda=True)
コード例 #16
0
 def test_psd_safe_cholesky_psd_cuda(self, cuda=False):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_psd_safe_cholesky_psd(cuda=True)
コード例 #17
0
 def test_smoothed_box_prior_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_smoothed_box_prior_log_prob(cuda=True)
コード例 #18
0
 def test_multivariate_normal_batch_correlated_sampels_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multivariate_normal_batch_correlated_sampels(
                 cuda=True)
コード例 #19
0
 def test_normal_prior_batch_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             return self.test_normal_prior_batch_log_prob(cuda=True)
コード例 #20
0
 def test_posterior_latent_gp_and_likelihood_fast_pred_var_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_posterior_latent_gp_and_likelihood_fast_pred_var(
                 cuda=True)
コード例 #21
0
 def test_fantasy_updates_batch_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_fantasy_updates_batch(cuda=True)
コード例 #22
0
 def test_grid_gp_mean_abs_error_2d_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_grid_gp_mean_abs_error(cuda=True, num_dim=2)
コード例 #23
0
 def test_from_independent_mvns_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_from_independent_mvns(cuda=True)
コード例 #24
0
 def test_lkj_cholesky_factor_prior_log_prob_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_lkj_cholesky_factor_prior_log_prob(cuda=True)
コード例 #25
0
 def test_prior_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_prior(cuda=True)
コード例 #26
0
 def test_multitask_multivariate_normal_cuda(self):
     if torch.cuda.is_available():
         with least_used_cuda_device():
             self.test_multitask_multivariate_normal(cuda=True)