Exemple #1
0
    def test_pairwise_gp(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # test training
            # regular training
            mll = PairwiseLaplaceMarginalLogLikelihood(model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 2}, max_retries=1)
            # prior training
            prior_m = PairwiseGP(None, None)
            with self.assertRaises(RuntimeError):
                prior_m(train_X)
            # forward in training mode with non-training data
            custom_m = PairwiseGP(**model_kwargs)
            other_X = torch.rand(batch_shape + torch.Size([3, X_dim]),
                                 **tkwargs)
            other_comp = train_comp.clone()
            with self.assertRaises(RuntimeError):
                custom_m(other_X)
            custom_mll = PairwiseLaplaceMarginalLogLikelihood(custom_m).to(
                **tkwargs)
            post = custom_m(train_X)
            with self.assertRaises(RuntimeError):
                custom_mll(post, other_comp)

            # setting jitter = 0 with a singular covar will raise error
            sing_train_X = torch.ones(batch_shape + torch.Size([10, X_dim]),
                                      **tkwargs)
            with self.assertRaises(RuntimeError):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=RuntimeWarning)
                    custom_m = PairwiseGP(sing_train_X, train_comp, jitter=0)
                    custom_m.posterior(sing_train_X)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, RBFKernel)
            self.assertIsInstance(model.covar_module.lengthscale_prior,
                                  GammaPrior)
            self.assertEqual(model.num_outputs, 1)

            # test custom noise prior
            custom_noise_prior = GammaPrior(concentration=2.0, rate=1.0)
            custom_noise_module = HomoskedasticNoise(
                noise_prior=custom_noise_prior)
            custom_m = PairwiseGP(**model_kwargs,
                                  noise_module=custom_noise_module)
            self.assertEqual(custom_m.noise_module.noise_prior.concentration,
                             torch.tensor(2.0))
            self.assertEqual(custom_m.noise_module.noise_prior.rate,
                             torch.tensor(1.0))
            # test custom models
            custom_m = PairwiseGP(**model_kwargs, covar_module=LinearKernel())
            self.assertIsInstance(custom_m.covar_module, LinearKernel)
            # std_noise setter
            custom_m.std_noise = 123
            self.assertTrue(torch.all(custom_m.std_noise == 123))
            # prior prediction
            prior_m = PairwiseGP(None, None)
            prior_m.eval()
            post = prior_m.posterior(train_X)
            self.assertIsInstance(post, GPyTorchPosterior)

            # test methods that are not commonly or explicitly used
            # _calc_covar with observation noise
            no_noise_cov = model._calc_covar(train_X,
                                             train_X,
                                             observation_noise=False)
            noise_cov = model._calc_covar(train_X,
                                          train_X,
                                          observation_noise=True)
            diag_diff = (noise_cov - no_noise_cov).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    model.std_noise.expand(diag_diff.shape),
                    rtol=1e-4,
                    atol=1e-5,
                ))
            # test trying adding jitter
            pd_mat = torch.eye(2, 2)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=RuntimeWarning)
                jittered_pd_mat = model._add_jitter(pd_mat)
            diag_diff = (jittered_pd_mat - pd_mat).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    torch.full_like(diag_diff, model._jitter),
                    atol=model._jitter / 10,
                ))

            # test initial utility val
            util_comp = torch.topk(model.utility, k=2,
                                   dim=-1).indices.unsqueeze(-2)
            self.assertTrue(torch.all(util_comp == train_comp))

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, 1])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # expect to raise error when output_indices is not None
            with self.assertRaises(RuntimeError):
                model.posterior(X, output_indices=[0])

            # test re-evaluating utility when it's None
            model.utility = None
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            pvar = posterior_pred.variance
            reshaped_noise = model.std_noise.unsqueeze(-2).expand(
                posterior.variance.shape)
            pvar_exp = posterior.variance + reshaped_noise
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            pvar = posterior_pred.variance
            reshaped_noise = model.std_noise.unsqueeze(-2).expand(
                posterior.variance.shape)
            pvar_exp = posterior.variance + reshaped_noise
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
Exemple #2
0
    def test_pairwise_gp(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # test training
            # regular training
            mll = PairwiseLaplaceMarginalLogLikelihood(model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 2}, max_retries=1)
            # prior training
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            with self.assertRaises(RuntimeError):
                prior_m(train_X)
            # forward in training mode with non-training data
            custom_m = PairwiseGP(**model_kwargs)
            other_X = torch.rand(batch_shape + torch.Size([3, X_dim]),
                                 **tkwargs)
            other_comp = train_comp.clone()
            with self.assertRaises(RuntimeError):
                custom_m(other_X)
            custom_mll = PairwiseLaplaceMarginalLogLikelihood(custom_m).to(
                **tkwargs)
            post = custom_m(train_X)
            with self.assertRaises(RuntimeError):
                custom_mll(post, other_comp)

            # setting jitter = 0 with a singular covar will raise error
            sing_train_X = torch.ones(batch_shape + torch.Size([10, X_dim]),
                                      **tkwargs)
            with self.assertRaises(RuntimeError):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=RuntimeWarning)
                    custom_m = PairwiseGP(sing_train_X, train_comp, jitter=0)
                    custom_m.posterior(sing_train_X)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            self.assertIsInstance(model.covar_module.base_kernel, RBFKernel)
            self.assertIsInstance(
                model.covar_module.base_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.covar_module.outputscale_prior,
                                  SmoothedBoxPrior)
            self.assertEqual(model.num_outputs, 1)
            self.assertEqual(model.batch_shape, batch_shape)

            # test custom models
            custom_m = PairwiseGP(**model_kwargs, covar_module=LinearKernel())
            self.assertIsInstance(custom_m.covar_module, LinearKernel)

            # prior prediction
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            prior_m.eval()
            post = prior_m.posterior(train_X)
            self.assertIsInstance(post, GPyTorchPosterior)

            # test trying adding jitter
            pd_mat = torch.eye(2, 2)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=RuntimeWarning)
                jittered_pd_mat = model._add_jitter(pd_mat)
            diag_diff = (jittered_pd_mat - pd_mat).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    torch.full_like(diag_diff, model._jitter),
                    atol=model._jitter / 10,
                ))

            # test initial utility val
            util_comp = torch.topk(model.utility, k=2,
                                   dim=-1).indices.unsqueeze(-2)
            self.assertTrue(torch.all(util_comp == train_comp))

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, 1])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # expect to raise error when output_indices is not None
            with self.assertRaises(RuntimeError):
                model.posterior(X, output_indices=[0])

            # test re-evaluating utility when it's None
            model.utility = None
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)

            # test input_transform
            # the untransfomed one should be stored
            normalize_tf = Normalize(d=2,
                                     bounds=torch.tensor([[0, 0], [0.5, 1.5]]))
            model = PairwiseGP(**model_kwargs, input_transform=normalize_tf)
            self.assertTrue(torch.all(model.datapoints == train_X))

            # test set_train_data strict mode
            model = PairwiseGP(**model_kwargs)
            changed_train_X = train_X.unsqueeze(0)
            changed_train_comp = train_comp.unsqueeze(0)
            # expect to raise error when set data to something different
            with self.assertRaises(RuntimeError):
                model.set_train_data(changed_train_X,
                                     changed_train_comp,
                                     strict=True)

            # the same datapoints but changed comparison will also raise error
            with self.assertRaises(RuntimeError):
                model.set_train_data(train_X, changed_train_comp, strict=True)