예제 #1
0
    def _get_model_and_data(self, batch_shape, X_dim=2, **tkwargs):
        train_X, train_Y, train_comp = self._make_rand_mini_data(
            batch_shape=batch_shape, X_dim=X_dim, **tkwargs)

        model_kwargs = {"datapoints": train_X, "comparisons": train_comp}
        model = PairwiseGP(**model_kwargs)
        return model, model_kwargs
예제 #2
0
    def test_learned_preference_objective(self):
        X_dim = 2
        train_X = torch.rand(2, X_dim)
        train_comps = torch.LongTensor([[0, 1]])
        pref_model = PairwiseGP(train_X, train_comps)

        og_sample_shape = 3
        batch_size = 2
        n = 8
        test_X = torch.rand(torch.Size(
            (og_sample_shape, batch_size, n, X_dim)))

        # test default setting where sampler = IIDNormalSampler(num_samples=1)
        pref_obj = LearnedObjective(pref_model=pref_model)
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([og_sample_shape, batch_size, n]))

        # test when sampler has num_samples = 16
        num_samples = 16
        pref_obj = LearnedObjective(
            pref_model=pref_model,
            sampler=SobolQMCNormalSampler(num_samples=num_samples),
        )
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([num_samples * og_sample_shape, batch_size, n]),
        )

        # test posterior mean
        mean_pref_model = PosteriorMeanModel(model=pref_model)
        pref_obj = LearnedObjective(pref_model=mean_pref_model)
        self.assertEqual(
            pref_obj(test_X).shape,
            torch.Size([og_sample_shape, batch_size, n]))

        # cannot use a deterministic model together with a sampler
        with self.assertRaises(AssertionError):
            LearnedObjective(
                pref_model=mean_pref_model,
                sampler=SobolQMCNormalSampler(num_samples=num_samples),
            )
예제 #3
0
    def test_analytic_eubo(self):
        twargs = {"dtype": torch.double}
        X_dim = 3
        Y_dim = 2
        X = torch.rand(2, X_dim, **twargs)
        Y = torch.rand(2, Y_dim, **twargs)
        comps = torch.tensor([[1, 0]], dtype=torch.long)

        standard_bounds = torch.zeros(2, X.shape[-1])
        standard_bounds[1] = 1

        model = SingleTaskGP(X, Y)
        pref_model = PairwiseGP(Y, comps)

        # Test with an outcome model and a preference model
        one_sample_outcome_model = FixedSingleSampleModel(model=model)
        eubo = AnalyticExpectedUtilityOfBestOption(
            pref_model=pref_model, outcome_model=one_sample_outcome_model)

        # test forward with different number of points
        good_X = torch.rand(2, X_dim, **twargs)
        eubo(good_X)

        bad_X = torch.rand(3, X_dim, **twargs)
        with self.assertRaises(UnsupportedError):
            eubo(bad_X)

        good_X = torch.rand(1, X_dim, **twargs)
        previous_winner = torch.rand(1, Y_dim, **twargs)
        eubo_with_winner = AnalyticExpectedUtilityOfBestOption(
            pref_model=pref_model,
            outcome_model=one_sample_outcome_model,
            previous_winner=previous_winner,
        )
        eubo_with_winner(good_X)

        # Test model=None
        AnalyticExpectedUtilityOfBestOption(pref_model=pref_model,
                                            outcome_model=None)
예제 #4
0
    def test_pairwise_gp(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # test training
            # regular training
            mll = PairwiseLaplaceMarginalLogLikelihood(model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 2}, max_retries=1)
            # prior training
            prior_m = PairwiseGP(None, None)
            with self.assertRaises(RuntimeError):
                prior_m(train_X)
            # forward in training mode with non-training data
            custom_m = PairwiseGP(**model_kwargs)
            other_X = torch.rand(batch_shape + torch.Size([3, X_dim]),
                                 **tkwargs)
            other_comp = train_comp.clone()
            with self.assertRaises(RuntimeError):
                custom_m(other_X)
            custom_mll = PairwiseLaplaceMarginalLogLikelihood(custom_m).to(
                **tkwargs)
            post = custom_m(train_X)
            with self.assertRaises(RuntimeError):
                custom_mll(post, other_comp)

            # setting jitter = 0 with a singular covar will raise error
            sing_train_X = torch.ones(batch_shape + torch.Size([10, X_dim]),
                                      **tkwargs)
            with self.assertRaises(RuntimeError):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=RuntimeWarning)
                    custom_m = PairwiseGP(sing_train_X, train_comp, jitter=0)
                    custom_m.posterior(sing_train_X)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, RBFKernel)
            self.assertIsInstance(model.covar_module.lengthscale_prior,
                                  GammaPrior)
            self.assertEqual(model.num_outputs, 1)

            # test custom noise prior
            custom_noise_prior = GammaPrior(concentration=2.0, rate=1.0)
            custom_noise_module = HomoskedasticNoise(
                noise_prior=custom_noise_prior)
            custom_m = PairwiseGP(**model_kwargs,
                                  noise_module=custom_noise_module)
            self.assertEqual(custom_m.noise_module.noise_prior.concentration,
                             torch.tensor(2.0))
            self.assertEqual(custom_m.noise_module.noise_prior.rate,
                             torch.tensor(1.0))
            # test custom models
            custom_m = PairwiseGP(**model_kwargs, covar_module=LinearKernel())
            self.assertIsInstance(custom_m.covar_module, LinearKernel)
            # std_noise setter
            custom_m.std_noise = 123
            self.assertTrue(torch.all(custom_m.std_noise == 123))
            # prior prediction
            prior_m = PairwiseGP(None, None)
            prior_m.eval()
            post = prior_m.posterior(train_X)
            self.assertIsInstance(post, GPyTorchPosterior)

            # test methods that are not commonly or explicitly used
            # _calc_covar with observation noise
            no_noise_cov = model._calc_covar(train_X,
                                             train_X,
                                             observation_noise=False)
            noise_cov = model._calc_covar(train_X,
                                          train_X,
                                          observation_noise=True)
            diag_diff = (noise_cov - no_noise_cov).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    model.std_noise.expand(diag_diff.shape),
                    rtol=1e-4,
                    atol=1e-5,
                ))
            # test trying adding jitter
            pd_mat = torch.eye(2, 2)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=RuntimeWarning)
                jittered_pd_mat = model._add_jitter(pd_mat)
            diag_diff = (jittered_pd_mat - pd_mat).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    torch.full_like(diag_diff, model._jitter),
                    atol=model._jitter / 10,
                ))

            # test initial utility val
            util_comp = torch.topk(model.utility, k=2,
                                   dim=-1).indices.unsqueeze(-2)
            self.assertTrue(torch.all(util_comp == train_comp))

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, 1])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # expect to raise error when output_indices is not None
            with self.assertRaises(RuntimeError):
                model.posterior(X, output_indices=[0])

            # test re-evaluating utility when it's None
            model.utility = None
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)

            # test adding observation noise
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            self.assertEqual(posterior_pred.variance.shape, expected_shape)
            pvar = posterior_pred.variance
            reshaped_noise = model.std_noise.unsqueeze(-2).expand(
                posterior.variance.shape)
            pvar_exp = posterior.variance + reshaped_noise
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            # test adding observation noise in batch mode
            posterior_pred = model.posterior(X, observation_noise=True)
            self.assertIsInstance(posterior_pred, GPyTorchPosterior)
            self.assertEqual(posterior_pred.mean.shape, expected_shape)
            pvar = posterior_pred.variance
            reshaped_noise = model.std_noise.unsqueeze(-2).expand(
                posterior.variance.shape)
            pvar_exp = posterior.variance + reshaped_noise
            self.assertTrue(
                torch.allclose(pvar, pvar_exp, rtol=1e-4, atol=1e-5))
예제 #5
0
    def test_condition_on_observations(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # evaluate model
            model.posterior(torch.rand(torch.Size([4, X_dim]), **tkwargs))
            # test condition_on_observations

            # test condition_on_observations with prior mode
            prior_m = PairwiseGP(None, None)
            cond_m = prior_m.condition_on_observations(train_X, train_comp)
            self.assertTrue(cond_m.datapoints is train_X)
            self.assertTrue(cond_m.comparisons is train_comp)

            # fantasize at different input points
            fant_shape = torch.Size([2])
            X_fant, Y_fant, comp_fant = self._make_rand_mini_data(
                batch_shape=fant_shape + batch_shape, X_dim=X_dim, **tkwargs)

            # cannot condition on non-pairwise Ys
            with self.assertRaises(RuntimeError):
                model.condition_on_observations(X_fant, comp_fant[..., 0])
            cm = model.condition_on_observations(X_fant, comp_fant)
            # make sure it's a deep copy
            self.assertTrue(model is not cm)

            # fantasize at same input points (check proper broadcasting)
            cm_same_inputs = model.condition_on_observations(
                X_fant[0], comp_fant)

            test_Xs = [
                # test broadcasting single input across fantasy and model batches
                torch.rand(4, X_dim, **tkwargs),
                # separate input for each model batch and broadcast across
                # fantasy batches
                torch.rand(batch_shape + torch.Size([4, X_dim]), **tkwargs),
                # separate input for each model and fantasy batch
                torch.rand(fant_shape + batch_shape + torch.Size([4, X_dim]),
                           **tkwargs),
            ]
            for test_X in test_Xs:
                posterior = cm.posterior(test_X)
                self.assertEqual(posterior.mean.shape,
                                 fant_shape + batch_shape + torch.Size([4, 1]))
                posterior_same_inputs = cm_same_inputs.posterior(test_X)
                self.assertEqual(
                    posterior_same_inputs.mean.shape,
                    fant_shape + batch_shape + torch.Size([4, 1]),
                )

                # check that fantasies of batched model are correct
                if len(batch_shape) > 0 and test_X.dim() == 2:
                    state_dict_non_batch = {
                        key: (val[0] if val.numel() > 1 else val)
                        for key, val in model.state_dict().items()
                    }
                    model_kwargs_non_batch = {
                        "datapoints": model_kwargs["datapoints"][0],
                        "comparisons": model_kwargs["comparisons"][0],
                    }
                    model_non_batch = model.__class__(**model_kwargs_non_batch)
                    model_non_batch.load_state_dict(state_dict_non_batch)
                    model_non_batch.eval()
                    model_non_batch.posterior(
                        torch.rand(torch.Size([4, X_dim]), **tkwargs))
                    cm_non_batch = model_non_batch.condition_on_observations(
                        X_fant[0][0], comp_fant[:, 0, :])
                    non_batch_posterior = cm_non_batch.posterior(test_X)
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mean[:, 0, ...],
                            non_batch_posterior.mean,
                            atol=1e-3,
                        ))
                    self.assertTrue(
                        torch.allclose(
                            posterior_same_inputs.mvn.
                            covariance_matrix[:, 0, :, :],
                            non_batch_posterior.mvn.covariance_matrix,
                            atol=1e-3,
                        ))
예제 #6
0
    def test_pairwise_gp(self):
        for batch_shape, dtype in itertools.product(
            (torch.Size(), torch.Size([2])), (torch.float, torch.double)):
            tkwargs = {"device": self.device, "dtype": dtype}
            X_dim = 2

            model, model_kwargs = self._get_model_and_data(
                batch_shape=batch_shape, X_dim=X_dim, **tkwargs)
            train_X = model_kwargs["datapoints"]
            train_comp = model_kwargs["comparisons"]

            # test training
            # regular training
            mll = PairwiseLaplaceMarginalLogLikelihood(model).to(**tkwargs)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=OptimizationWarning)
                fit_gpytorch_model(mll, options={"maxiter": 2}, max_retries=1)
            # prior training
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            with self.assertRaises(RuntimeError):
                prior_m(train_X)
            # forward in training mode with non-training data
            custom_m = PairwiseGP(**model_kwargs)
            other_X = torch.rand(batch_shape + torch.Size([3, X_dim]),
                                 **tkwargs)
            other_comp = train_comp.clone()
            with self.assertRaises(RuntimeError):
                custom_m(other_X)
            custom_mll = PairwiseLaplaceMarginalLogLikelihood(custom_m).to(
                **tkwargs)
            post = custom_m(train_X)
            with self.assertRaises(RuntimeError):
                custom_mll(post, other_comp)

            # setting jitter = 0 with a singular covar will raise error
            sing_train_X = torch.ones(batch_shape + torch.Size([10, X_dim]),
                                      **tkwargs)
            with self.assertRaises(RuntimeError):
                with warnings.catch_warnings():
                    warnings.filterwarnings("ignore", category=RuntimeWarning)
                    custom_m = PairwiseGP(sing_train_X, train_comp, jitter=0)
                    custom_m.posterior(sing_train_X)

            # test init
            self.assertIsInstance(model.mean_module, ConstantMean)
            self.assertIsInstance(model.covar_module, ScaleKernel)
            self.assertIsInstance(model.covar_module.base_kernel, RBFKernel)
            self.assertIsInstance(
                model.covar_module.base_kernel.lengthscale_prior, GammaPrior)
            self.assertIsInstance(model.covar_module.outputscale_prior,
                                  SmoothedBoxPrior)
            self.assertEqual(model.num_outputs, 1)
            self.assertEqual(model.batch_shape, batch_shape)

            # test custom models
            custom_m = PairwiseGP(**model_kwargs, covar_module=LinearKernel())
            self.assertIsInstance(custom_m.covar_module, LinearKernel)

            # prior prediction
            prior_m = PairwiseGP(None, None).to(**tkwargs)
            prior_m.eval()
            post = prior_m.posterior(train_X)
            self.assertIsInstance(post, GPyTorchPosterior)

            # test trying adding jitter
            pd_mat = torch.eye(2, 2)
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=RuntimeWarning)
                jittered_pd_mat = model._add_jitter(pd_mat)
            diag_diff = (jittered_pd_mat - pd_mat).diagonal(dim1=-2, dim2=-1)
            self.assertTrue(
                torch.allclose(
                    diag_diff,
                    torch.full_like(diag_diff, model._jitter),
                    atol=model._jitter / 10,
                ))

            # test initial utility val
            util_comp = torch.topk(model.utility, k=2,
                                   dim=-1).indices.unsqueeze(-2)
            self.assertTrue(torch.all(util_comp == train_comp))

            # test posterior
            # test non batch evaluation
            X = torch.rand(batch_shape + torch.Size([3, X_dim]), **tkwargs)
            expected_shape = batch_shape + torch.Size([3, 1])
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)
            self.assertEqual(posterior.variance.shape, expected_shape)

            # expect to raise error when output_indices is not None
            with self.assertRaises(RuntimeError):
                model.posterior(X, output_indices=[0])

            # test re-evaluating utility when it's None
            model.utility = None
            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)

            # test batch evaluation
            X = torch.rand(2, *batch_shape, 3, X_dim, **tkwargs)
            expected_shape = torch.Size([2]) + batch_shape + torch.Size([3, 1])

            posterior = model.posterior(X)
            self.assertIsInstance(posterior, GPyTorchPosterior)
            self.assertEqual(posterior.mean.shape, expected_shape)

            # test input_transform
            # the untransfomed one should be stored
            normalize_tf = Normalize(d=2,
                                     bounds=torch.tensor([[0, 0], [0.5, 1.5]]))
            model = PairwiseGP(**model_kwargs, input_transform=normalize_tf)
            self.assertTrue(torch.all(model.datapoints == train_X))

            # test set_train_data strict mode
            model = PairwiseGP(**model_kwargs)
            changed_train_X = train_X.unsqueeze(0)
            changed_train_comp = train_comp.unsqueeze(0)
            # expect to raise error when set data to something different
            with self.assertRaises(RuntimeError):
                model.set_train_data(changed_train_X,
                                     changed_train_comp,
                                     strict=True)

            # the same datapoints but changed comparison will also raise error
            with self.assertRaises(RuntimeError):
                model.set_train_data(train_X, changed_train_comp, strict=True)