Ejemplo n.º 1
0
    def test_q_upper_confidence_bound(self):
        for dtype in (torch.float, torch.double):
            # the event shape is `b x q x t` = 1 x 1 x 1
            samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(samples=samples))
            # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)

            # basic test
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(
                1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
Ejemplo n.º 2
0
    def test_q_probability_of_improvement(self, cuda=False):
        device = torch.device("cuda") if cuda else torch.device("cpu")
        for dtype in (torch.float, torch.double):
            # the event shape is `b x q x t` = 1 x 1 x 1
            samples = torch.zeros(1, 1, 1, device=device, dtype=dtype)
            mm = MockModel(MockPosterior(samples=samples))
            # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, device=device, dtype=dtype)

            # basic test
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.5)

            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.5)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.5)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qProbabilityOfImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.5)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(1, 1, 1, device=device, dtype=dtype, requires_grad=True)
            with warnings.catch_warnings(record=True) as ws:
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
Ejemplo n.º 3
0
 def test_batch_range(self):
     # check batch_range default and can be changed
     sampler = IIDNormalSampler(num_samples=4)
     self.assertEquals(sampler.batch_range, (0, -2))
     sampler.batch_range = (-3, -2)
     self.assertEquals(sampler.batch_range, (-3, -2))
     # check that base samples are cleared after batch_range set
     posterior = _get_test_posterior(self.device)
     _ = sampler(posterior)
     self.assertNotEquals(sampler.base_samples, None)
     sampler.batch_range = (0, -2)
     self.assertEquals(sampler.base_samples, None)
Ejemplo n.º 4
0
 def test_get_base_sample_shape_no_collapse(self):
     sampler = IIDNormalSampler(num_samples=4, collapse_batch_dims=False)
     self.assertFalse(sampler.resample)
     self.assertEqual(sampler.sample_shape, torch.Size([4]))
     self.assertFalse(sampler.collapse_batch_dims)
     # check sample shape non-batched
     posterior = _get_posterior()
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 2, 1]))
     # check sample shape batched
     posterior = _get_posterior_batched()
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 3, 2, 1]))
Ejemplo n.º 5
0
    def test_forward(self):
        for dtype in (torch.float, torch.double):

            # no resample
            sampler = IIDNormalSampler(num_samples=4, seed=1234)
            self.assertFalse(sampler.resample)
            self.assertEqual(sampler.seed, 1234)
            self.assertTrue(sampler.collapse_batch_dims)
            # check samples non-batched
            posterior = _get_posterior(device=self.device, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure samples are the same
            samples2 = sampler(posterior)
            self.assertTrue(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works when changing the dtype
            new_dtype = torch.float if dtype == torch.double else torch.double
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=new_dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1235)

            # resample
            sampler = IIDNormalSampler(num_samples=4, resample=True, seed=None)
            self.assertTrue(sampler.resample)
            self.assertTrue(sampler.collapse_batch_dims)
            initial_seed = sampler.seed
            # check samples non-batched
            posterior = _get_posterior(device=self.device, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 1)
            # ensure samples are different
            samples2 = sampler(posterior)
            self.assertFalse(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, initial_seed + 2)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(device=self.device,
                                                       dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 3)
Ejemplo n.º 6
0
def _get_sampler(mc_samples: int, qmc: bool) -> MCSampler:
    """Set up MC sampler for q(N)EHVI."""
    # initialize the sampler
    seed = int(torch.randint(1, 10000, (1,)).item())
    if qmc:
        return SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
    return IIDNormalSampler(num_samples=mc_samples, seed=seed)
Ejemplo n.º 7
0
    def test_forward_no_collapse(self, cuda=False):
        for dtype in (torch.float, torch.double):

            # no resample
            sampler = IIDNormalSampler(
                num_samples=4, seed=1234, collapse_batch_dims=False
            )
            self.assertFalse(sampler.resample)
            self.assertEqual(sampler.seed, 1234)
            self.assertFalse(sampler.collapse_batch_dims)
            # check samples non-batched
            posterior = _get_posterior(cuda=cuda, dtype=dtype)
            samples = sampler(posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, 1235)
            # ensure samples are the same
            samples2 = sampler(posterior)
            self.assertTrue(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, 1235)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(cuda=cuda, dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, 1236)

            # resample
            sampler = IIDNormalSampler(
                num_samples=4, resample=True, collapse_batch_dims=False
            )
            self.assertTrue(sampler.resample)
            self.assertFalse(sampler.collapse_batch_dims)
            initial_seed = sampler.seed
            # check samples non-batched
            posterior = _get_posterior(cuda=cuda, dtype=dtype)
            samples = sampler(posterior=posterior)
            self.assertEqual(samples.shape, torch.Size([4, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 1)
            # ensure samples are not the same
            samples2 = sampler(posterior)
            self.assertFalse(torch.allclose(samples, samples2))
            self.assertEqual(sampler.seed, initial_seed + 2)
            # ensure this works with a differently shaped posterior
            posterior_batched = _get_posterior_batched(cuda=cuda, dtype=dtype)
            samples_batched = sampler(posterior_batched)
            self.assertEqual(samples_batched.shape, torch.Size([4, 3, 2, 1]))
            self.assertEqual(sampler.seed, initial_seed + 3)
Ejemplo n.º 8
0
 def __init__(
     self,
     num_samples: int,
     resample: bool = False,
     seed: Optional[int] = None,
     collapse_batch_dims: bool = True,
     max_num_comparisons: int = None,
 ) -> None:
     PairwiseMCSampler.__init__(self,
                                max_num_comparisons=max_num_comparisons,
                                seed=seed)
     IIDNormalSampler.__init__(
         self,
         num_samples,
         resample=resample,
         seed=seed,
         collapse_batch_dims=collapse_batch_dims,
     )
Ejemplo n.º 9
0
 def test_raises(self):
     tkwargs = {"device": self.device, "dtype": torch.double}
     with self.assertRaisesRegex(
         ValueError,
         "Expected train_X to have shape n x d and train_Y to have shape n x 1",
     ):
         SaasFullyBayesianSingleTaskGP(
             train_X=torch.rand(10, 4, **tkwargs), train_Y=torch.randn(10, **tkwargs)
         )
     with self.assertRaisesRegex(
         ValueError,
         "Expected train_X to have shape n x d and train_Y to have shape n x 1",
     ):
         SaasFullyBayesianSingleTaskGP(
             train_X=torch.rand(10, 4, **tkwargs),
             train_Y=torch.randn(12, 1, **tkwargs),
         )
     with self.assertRaisesRegex(
         ValueError,
         "Expected train_X to have shape n x d and train_Y to have shape n x 1",
     ):
         SaasFullyBayesianSingleTaskGP(
             train_X=torch.rand(10, **tkwargs),
             train_Y=torch.randn(10, 1, **tkwargs),
         )
     with self.assertRaisesRegex(
         ValueError,
         "Expected train_Yvar to be None or have the same shape as train_Y",
     ):
         SaasFullyBayesianSingleTaskGP(
             train_X=torch.rand(10, 4, **tkwargs),
             train_Y=torch.randn(10, 1, **tkwargs),
             train_Yvar=torch.rand(10, **tkwargs),
         )
     train_X, train_Y, train_Yvar, model = self._get_data_and_model(
         infer_noise=True, **tkwargs
     )
     sampler = IIDNormalSampler(num_samples=2)
     with self.assertRaisesRegex(
         NotImplementedError, "Fantasize is not implemented!"
     ):
         model.fantasize(X=torch.rand(5, 4, **tkwargs), sampler=sampler)
     # Make sure an exception is raised if the model has not been fitted
     not_fitted_error_msg = (
         "Model has not been fitted. You need to call "
         "`fit_fully_bayesian_model_nuts` to fit the model."
     )
     with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
         model.num_mcmc_samples
     with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
         model.median_lengthscale
     with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
         model.forward(torch.rand(1, 4, **tkwargs))
     with self.assertRaisesRegex(RuntimeError, not_fitted_error_msg):
         model.posterior(torch.rand(1, 4, **tkwargs))
Ejemplo n.º 10
0
    def test_penalized_acquisition_function(self):
        for dtype in (torch.float, torch.double):
            mock_model = MockModel(
                MockPosterior(mean=torch.tensor([1.0]),
                              variance=torch.tensor([1.0])))
            init_point = torch.tensor([0.5, 0.5, 0.5],
                                      device=self.device,
                                      dtype=dtype)
            groups = [[0, 2], [1]]
            raw_acqf = ExpectedImprovement(model=mock_model, best_f=1.0)
            penalty = GroupLassoPenalty(init_point=init_point, groups=groups)
            lmbda = 0.1
            acqf = PenalizedAcquisitionFunction(raw_acqf=raw_acqf,
                                                penalty_func=penalty,
                                                regularization_parameter=lmbda)

            sample_point = torch.tensor([[1.0, 2.0, 3.0]],
                                        device=self.device,
                                        dtype=dtype)
            raw_value = raw_acqf(sample_point)
            penalty_value = penalty(sample_point)
            real_value = raw_value - lmbda * penalty_value
            computed_value = acqf(sample_point)
            self.assertTrue(torch.equal(real_value, computed_value))

            # testing X_pending for analytic raw_acqfn (EI)
            X_pending = torch.tensor([0.1, 0.2, 0.3],
                                     device=self.device,
                                     dtype=dtype)
            with self.assertRaises(UnsupportedError):
                acqf.set_X_pending(X_pending)

            # testing X_pending for non-analytic raw_acqfn (EI)
            sampler = IIDNormalSampler(num_samples=2)
            raw_acqf_2 = qExpectedImprovement(model=mock_model,
                                              best_f=0,
                                              sampler=sampler)
            init_point = torch.tensor([1.0, 1.0, 1.0],
                                      device=self.device,
                                      dtype=dtype)
            l2_module = L2Penalty(init_point=init_point)
            acqf_2 = PenalizedAcquisitionFunction(
                raw_acqf=raw_acqf_2,
                penalty_func=l2_module,
                regularization_parameter=lmbda,
            )

            X_pending = torch.tensor([0.1, 0.2, 0.3],
                                     device=self.device,
                                     dtype=dtype)
            acqf_2.set_X_pending(X_pending)
            self.assertTrue(torch.equal(acqf_2.X_pending, X_pending))
Ejemplo n.º 11
0
 def test_get_value_function(self):
     mm = MockModel(None)
     # test PosteriorMean
     vf = _get_value_function(mm)
     self.assertIsInstance(vf, PosteriorMean)
     self.assertIsNone(vf.objective)
     # test SimpleRegret
     obj = GenericMCObjective(lambda Y: Y.sum(dim=-1))
     sampler = IIDNormalSampler(num_samples=2)
     vf = _get_value_function(model=mm, objective=obj, sampler=sampler)
     self.assertIsInstance(vf, qSimpleRegret)
     self.assertEqual(vf.objective, obj)
     self.assertEqual(vf.sampler, sampler)
Ejemplo n.º 12
0
    def test_constrained_q_expected_hypervolume_improvement(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            ref_point = [0.0, 0.0]
            t_ref_point = torch.tensor(ref_point, **tkwargs)
            pareto_Y = torch.tensor(
                [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]],
                **tkwargs)
            partitioning = NondominatedPartitioning(ref_point=t_ref_point)
            partitioning.update(Y=pareto_Y)

            # test q=1
            # the event shape is `b x q x m` = 1 x 1 x 2
            samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
            mm = MockModel(MockPosterior(samples=samples))
            sampler = IIDNormalSampler(num_samples=1)
            X = torch.zeros(1, 1, **tkwargs)
            # test zero slack
            for eta in (1e-1, 1e-2):
                acqf = qExpectedHypervolumeImprovement(
                    model=mm,
                    ref_point=ref_point,
                    partitioning=partitioning,
                    sampler=sampler,
                    constraints=[lambda Z: torch.zeros_like(Z[..., -1])],
                    eta=eta,
                )
                res = acqf(X)
                self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4)
            # test feasible
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
                constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])],
                eta=1e-3,
            )
            res = acqf(X)
            self.assertAlmostEqual(res.item(), 1.5, places=4)
            # test infeasible
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
                constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])],
                eta=1e-3,
            )
            res = acqf(X)
            self.assertAlmostEqual(res.item(), 0.0, places=4)
Ejemplo n.º 13
0
    def test_setup(self):
        mean = torch.zeros(1, 1)
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        # basic test
        sampler = IIDNormalSampler(1)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        acqf._setup(model=mm, sampler=sampler)
        self.assertFalse(acqf._is_mt)
        self.assertFalse(acqf._is_deterministic)
        self.assertFalse(acqf._uses_matheron)
        self.assertFalse(acqf._cache_root)
        acqf._setup(model=mm, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._cache_root)

        # test check_sampler
        with warnings.catch_warnings(record=True) as ws, settings.debug(True):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
            self.assertEqual(len(ws), 0)

        # test collapse_batch_dims=False
        sampler = IIDNormalSampler(1, collapse_batch_dims=False)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        with self.assertRaises(UnsupportedError):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
        # test warning if base_samples is not None
        sampler = IIDNormalSampler(1)
        sampler.base_samples = torch.zeros(1, 1)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        with warnings.catch_warnings(record=True) as ws, settings.debug(True):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
            self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
        # test the base_samples are set to None
        self.assertIsNone(acqf.sampler.base_samples)
        # test model that uses matheron's rule and sampler.batch_range != (0, -1)
        hogp = HigherOrderGP(torch.zeros(1, 1), torch.zeros(1, 1, 1)).eval()
        acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler)
        with self.assertRaises(RuntimeError):
            acqf._setup(model=hogp, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._uses_matheron)
        self.assertTrue(acqf._is_mt)
        self.assertFalse(acqf._is_deterministic)

        # test deterministic model
        model = GenericDeterministicModel(f=lambda X: X)
        acqf = DummyCachedCholeskyAcqf(model=model, sampler=sampler)
        acqf._setup(model=model, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._is_deterministic)
        self.assertFalse(acqf._uses_matheron)
        self.assertFalse(acqf._is_mt)
        self.assertFalse(acqf._cache_root)
Ejemplo n.º 14
0
 def test_get_value_function(self):
     with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 1
         mm = MockModel(None)
         # test PosteriorMean
         vf = _get_value_function(mm)
         self.assertIsInstance(vf, PosteriorMean)
         self.assertIsNone(vf.objective)
         # test SimpleRegret
         obj = GenericMCObjective(lambda Y: Y.sum(dim=-1))
         sampler = IIDNormalSampler(num_samples=2)
         vf = _get_value_function(model=mm, objective=obj, sampler=sampler)
         self.assertIsInstance(vf, qSimpleRegret)
         self.assertEqual(vf.objective, obj)
         self.assertEqual(vf.sampler, sampler)
Ejemplo n.º 15
0
 def test_init(self):
     mm = MockModel(MockPosterior(mean=torch.rand(2, 1)))
     mc_points = torch.rand(2, 2)
     qNIPV = qNegIntegratedPosteriorVariance(model=mm, mc_points=mc_points)
     sampler = qNIPV.sampler
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([1]))
     self.assertFalse(sampler.resample)
     self.assertTrue(torch.equal(mc_points, qNIPV.mc_points))
     self.assertIsNone(qNIPV.X_pending)
     self.assertIsNone(qNIPV.objective)
     sampler = IIDNormalSampler(num_samples=2, resample=True)
     qNIPV = qNegIntegratedPosteriorVariance(model=mm,
                                             mc_points=mc_points,
                                             sampler=sampler)
     self.assertIsInstance(qNIPV.sampler, IIDNormalSampler)
     self.assertEqual(qNIPV.sampler.sample_shape, torch.Size([2]))
Ejemplo n.º 16
0
    def test_acquisition_functions(self):
        tkwargs = {"device": self.device, "dtype": torch.double}
        train_X, train_Y, train_Yvar, model = self._get_data_and_model(
            infer_noise=True, **tkwargs
        )
        fit_fully_bayesian_model_nuts(
            model, warmup_steps=8, num_samples=5, thinning=2, disable_progbar=True
        )
        sampler = IIDNormalSampler(num_samples=2)
        acquisition_functions = [
            ExpectedImprovement(model=model, best_f=train_Y.max()),
            ProbabilityOfImprovement(model=model, best_f=train_Y.max()),
            PosteriorMean(model=model),
            UpperConfidenceBound(model=model, beta=4),
            qExpectedImprovement(model=model, best_f=train_Y.max(), sampler=sampler),
            qNoisyExpectedImprovement(model=model, X_baseline=train_X, sampler=sampler),
            qProbabilityOfImprovement(
                model=model, best_f=train_Y.max(), sampler=sampler
            ),
            qSimpleRegret(model=model, sampler=sampler),
            qUpperConfidenceBound(model=model, beta=4, sampler=sampler),
            qNoisyExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                X_baseline=train_X,
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
            ),
            qExpectedHypervolumeImprovement(
                model=ModelListGP(model, model),
                ref_point=torch.zeros(2, **tkwargs),
                sampler=sampler,
                partitioning=NondominatedPartitioning(
                    ref_point=torch.zeros(2, **tkwargs), Y=train_Y.repeat([1, 2])
                ),
            ),
        ]

        for acqf in acquisition_functions:
            for batch_shape in [[5], [6, 5, 2]]:
                test_X = torch.rand(*batch_shape, 1, 4, **tkwargs)
                self.assertEqual(acqf(test_X).shape, torch.Size(batch_shape))
Ejemplo n.º 17
0
 def test_cache_root_decomposition(self):
     tkwargs = {"device": self.device}
     for dtype in (torch.float, torch.double):
         tkwargs["dtype"] = dtype
         # test mt-mvn
         train_x = torch.rand(2, 1, **tkwargs)
         train_y = torch.rand(2, 2, **tkwargs)
         test_x = torch.rand(2, 1, **tkwargs)
         model = SingleTaskGP(train_x, train_y)
         sampler = IIDNormalSampler(1)
         with torch.no_grad():
             posterior = model.posterior(test_x)
         acqf = DummyCachedCholeskyAcqf(
             model=model,
             sampler=sampler,
             objective=GenericMCObjective(lambda Y: Y[..., 0]),
         )
         baseline_L = torch.eye(2, **tkwargs)
         with mock.patch(
                 EXTRACT_BATCH_COVAR_PATH,
                 wraps=extract_batch_covar) as mock_extract_batch_covar:
             with mock.patch(CHOLESKY_PATH,
                             return_value=baseline_L) as mock_cholesky:
                 acqf._cache_root_decomposition(posterior=posterior)
                 mock_extract_batch_covar.assert_called_once_with(
                     posterior.mvn)
                 mock_cholesky.assert_called_once()
         # test mvn
         model = SingleTaskGP(train_x, train_y[:, :1])
         with torch.no_grad():
             posterior = model.posterior(test_x)
         with mock.patch(
                 EXTRACT_BATCH_COVAR_PATH) as mock_extract_batch_covar:
             with mock.patch(CHOLESKY_PATH,
                             return_value=baseline_L) as mock_cholesky:
                 acqf._cache_root_decomposition(posterior=posterior)
                 mock_extract_batch_covar.assert_not_called()
                 mock_cholesky.assert_called_once()
         self.assertTrue(torch.equal(acqf._baseline_L, baseline_L))
Ejemplo n.º 18
0
 def test_get_value_function(self):
     with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 1
         mm = MockModel(None)
         # test PosteriorMean
         vf = _get_value_function(mm)
         self.assertIsInstance(vf, PosteriorMean)
         self.assertIsNone(vf.objective)
         # test SimpleRegret
         obj = GenericMCObjective(lambda Y, X: Y.sum(dim=-1))
         sampler = IIDNormalSampler(num_samples=2)
         vf = _get_value_function(model=mm, objective=obj, sampler=sampler)
         self.assertIsInstance(vf, qSimpleRegret)
         self.assertEqual(vf.objective, obj)
         self.assertEqual(vf.sampler, sampler)
         # test with project
         mock_project = mock.Mock(
             return_value=torch.ones(1, 1, 1, device=self.device)
         )
         vf = _get_value_function(
             model=mm,
             objective=obj,
             sampler=sampler,
             project=mock_project,
         )
         self.assertIsInstance(vf, ProjectedAcquisitionFunction)
         self.assertEqual(vf.objective, obj)
         self.assertEqual(vf.sampler, sampler)
         self.assertEqual(vf.project, mock_project)
         test_X = torch.rand(1, 1, 1, device=self.device)
         with mock.patch.object(
             vf, "base_value_function", __class__=torch.nn.Module, return_value=None
         ) as patch_bvf:
             vf(test_X)
             mock_project.assert_called_once_with(test_X)
             patch_bvf.assert_called_once_with(
                 torch.ones(1, 1, 1, device=self.device)
             )
Ejemplo n.º 19
0
 def test_get_base_sample_shape(self):
     sampler = IIDNormalSampler(num_samples=4)
     self.assertFalse(sampler.resample)
     self.assertEqual(sampler.sample_shape, torch.Size([4]))
     self.assertTrue(sampler.collapse_batch_dims)
     # check sample shape non-batched
     posterior = _get_test_posterior(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 2, 1]))
     # check sample shape batched
     posterior = _get_test_posterior_batched(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 1, 2, 1]))
     # check sample shape with different batch range
     sampler.batch_range = (-3, -1)
     posterior = _get_test_posterior_batched(self.device)
     bss = sampler._get_base_sample_shape(posterior=posterior)
     self.assertEqual(bss, torch.Size([4, 1, 1, 1]))
Ejemplo n.º 20
0
 def test_initialize_q_knowledge_gradient(self):
     for dtype in (torch.float, torch.double):
         mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
         mm = MockModel(MockPosterior(mean=mean))
         # test error when neither specifying neither sampler nor num_fantasies
         with self.assertRaises(ValueError):
             qKnowledgeGradient(model=mm, num_fantasies=None)
         # test error when sampler and num_fantasies arg are inconsistent
         sampler = IIDNormalSampler(num_samples=16)
         with self.assertRaises(ValueError):
             qKnowledgeGradient(model=mm, num_fantasies=32, sampler=sampler)
         # test default construction
         qKG = qKnowledgeGradient(model=mm, num_fantasies=32)
         self.assertEqual(qKG.num_fantasies, 32)
         self.assertIsInstance(qKG.sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([32]))
         self.assertIsNone(qKG.objective)
         self.assertIsNone(qKG.inner_sampler)
         self.assertIsNone(qKG.X_pending)
         self.assertIsNone(qKG.current_value)
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 32 + 3)
         # test custom construction
         obj = GenericMCObjective(lambda Y, X: Y.mean(dim=-1))
         sampler = IIDNormalSampler(num_samples=16)
         X_pending = torch.zeros(2, 2, device=self.device, dtype=dtype)
         qKG = qKnowledgeGradient(
             model=mm,
             num_fantasies=16,
             sampler=sampler,
             objective=obj,
             X_pending=X_pending,
         )
         self.assertEqual(qKG.num_fantasies, 16)
         self.assertEqual(qKG.sampler, sampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
         self.assertEqual(qKG.objective, obj)
         self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.inner_sampler.sample_shape, torch.Size([128]))
         self.assertTrue(torch.equal(qKG.X_pending, X_pending))
         self.assertIsNone(qKG.current_value)
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 16 + 3)
         # test assignment of num_fantasies from sampler if not provided
         qKG = qKnowledgeGradient(model=mm, num_fantasies=None, sampler=sampler)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([16]))
         # test custom construction with inner sampler and current value
         inner_sampler = SobolQMCNormalSampler(num_samples=256)
         current_value = torch.zeros(1, device=self.device, dtype=dtype)
         qKG = qKnowledgeGradient(
             model=mm,
             num_fantasies=8,
             objective=obj,
             inner_sampler=inner_sampler,
             current_value=current_value,
         )
         self.assertEqual(qKG.num_fantasies, 8)
         self.assertEqual(qKG.sampler.sample_shape, torch.Size([8]))
         self.assertEqual(qKG.objective, obj)
         self.assertIsInstance(qKG.inner_sampler, SobolQMCNormalSampler)
         self.assertEqual(qKG.inner_sampler, inner_sampler)
         self.assertIsNone(qKG.X_pending)
         self.assertTrue(torch.equal(qKG.current_value, current_value))
         self.assertEqual(qKG.get_augmented_q_batch_size(q=3), 8 + 3)
         # test construction with non-MC objective (ScalarizedObjective)
         qKG_s = qKnowledgeGradient(
             model=mm,
             num_fantasies=16,
             sampler=sampler,
             objective=ScalarizedObjective(weights=torch.rand(2)),
         )
         self.assertIsNone(qKG_s.inner_sampler)
         self.assertIsInstance(qKG_s.objective, ScalarizedObjective)
         # test error if no objective and multi-output model
         mean2 = torch.zeros(1, 2, device=self.device, dtype=dtype)
         mm2 = MockModel(MockPosterior(mean=mean2))
         with self.assertRaises(UnsupportedError):
             qKnowledgeGradient(model=mm2)
Ejemplo n.º 21
0
Archivo: utils.py Proyecto: mcx/botorch
def get_acquisition_function(
    acquisition_function_name: str,
    model: Model,
    objective: MCAcquisitionObjective,
    X_observed: Tensor,
    X_pending: Optional[Tensor] = None,
    constraints: Optional[List[Callable[[Tensor], Tensor]]] = None,
    mc_samples: int = 500,
    qmc: bool = True,
    seed: Optional[int] = None,
    **kwargs,
) -> monte_carlo.MCAcquisitionFunction:
    r"""Convenience function for initializing botorch acquisition functions.

    Args:
        acquisition_function_name: Name of the acquisition function.
        model: A fitted model.
        objective: A MCAcquisitionObjective.
        X_observed: A `m1 x d`-dim Tensor of `m1` design points that have
            already been observed.
        X_pending: A `m2 x d`-dim Tensor of `m2` design points whose evaluation
            is pending.
        constraints: A list of callables, each mapping a Tensor of dimension
            `sample_shape x batch-shape x q x m` to a Tensor of dimension
            `sample_shape x batch-shape x q`, where negative values imply
            feasibility. Used when constraint_transforms are not passed
            as part of the objective.
        mc_samples: The number of samples to use for (q)MC evaluation of the
            acquisition function.
        qmc: If True, use quasi-Monte-Carlo sampling (instead of iid).
        seed: If provided, perform deterministic optimization (i.e. the
            function to optimize is fixed and not stochastic).

    Returns:
        The requested acquisition function.

    Example:
        >>> model = SingleTaskGP(train_X, train_Y)
        >>> obj = LinearMCObjective(weights=torch.tensor([1.0, 2.0]))
        >>> acqf = get_acquisition_function("qEI", model, obj, train_X)
    """
    # initialize the sampler
    if qmc:
        sampler = SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
    else:
        sampler = IIDNormalSampler(num_samples=mc_samples, seed=seed)
    # instantiate and return the requested acquisition function
    if acquisition_function_name == "qEI":
        best_f = objective(model.posterior(X_observed).mean).max().item()
        return monte_carlo.qExpectedImprovement(
            model=model,
            best_f=best_f,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
        )
    elif acquisition_function_name == "qPI":
        best_f = objective(model.posterior(X_observed).mean).max().item()
        return monte_carlo.qProbabilityOfImprovement(
            model=model,
            best_f=best_f,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
            tau=kwargs.get("tau", 1e-3),
        )
    elif acquisition_function_name == "qNEI":
        return monte_carlo.qNoisyExpectedImprovement(
            model=model,
            X_baseline=X_observed,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
            prune_baseline=kwargs.get("prune_baseline", False),
        )
    elif acquisition_function_name == "qSR":
        return monte_carlo.qSimpleRegret(model=model,
                                         sampler=sampler,
                                         objective=objective,
                                         X_pending=X_pending)
    elif acquisition_function_name == "qUCB":
        if "beta" not in kwargs:
            raise ValueError("`beta` must be specified in kwargs for qUCB.")
        return monte_carlo.qUpperConfidenceBound(
            model=model,
            beta=kwargs["beta"],
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
        )
    elif acquisition_function_name == "qEHVI":
        # pyre-fixme [16]: `Model` has no attribute `train_targets`
        try:
            ref_point = kwargs["ref_point"]
        except KeyError:
            raise ValueError(
                "`ref_point` must be specified in kwargs for qEHVI")
        try:
            Y = kwargs["Y"]
        except KeyError:
            raise ValueError("`Y` must be specified in kwargs for qEHVI")
        # get feasible points
        if constraints is not None:
            feas = torch.stack([c(Y) <= 0 for c in constraints],
                               dim=-1).all(dim=-1)
            Y = Y[feas]
        obj = objective(Y)
        partitioning = NondominatedPartitioning(
            ref_point=torch.as_tensor(ref_point,
                                      dtype=Y.dtype,
                                      device=Y.device),
            Y=obj,
            alpha=kwargs.get("alpha", 0.0),
        )
        return moo_monte_carlo.qExpectedHypervolumeImprovement(
            model=model,
            ref_point=ref_point,
            partitioning=partitioning,
            sampler=sampler,
            objective=objective,
            constraints=constraints,
            X_pending=X_pending,
        )
    raise NotImplementedError(
        f"Unknown acquisition function {acquisition_function_name}")
Ejemplo n.º 22
0
    def _instantiate_acqf(
        self,
        model: Model,
        objective: AcquisitionObjective,
        model_dependent_kwargs: Dict[str, Any],
        objective_thresholds: Optional[Tensor] = None,
        X_pending: Optional[Tensor] = None,
        X_baseline: Optional[Tensor] = None,
    ) -> None:
        # Extract model dependent kwargs
        outcome_constraints = model_dependent_kwargs.pop("outcome_constraints")
        # Replicate `get_EHVI` transformation code
        X_observed = X_baseline
        if X_observed is None:
            raise ValueError("There are no feasible observed points.")
        if objective_thresholds is None:
            raise ValueError("Objective Thresholds required")
        with torch.no_grad():
            Y = model.posterior(X_observed).mean

        # For EHVI acquisition functions we pass the constraint transform directly.
        if outcome_constraints is None:
            cons_tfs = None
        else:
            cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
        num_objectives = objective_thresholds.shape[0]

        mc_samples = self.options.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES)
        qmc = self.options.get("qmc", True)
        alpha = self.options.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives),
        )
        # this selects the objectives (a subset of the outcomes) and set each
        # objective threhsold to have the proper optimization direction
        ref_point = objective(objective_thresholds).tolist()

        # initialize the sampler
        seed = int(torch.randint(1, 10000, (1,)).item())
        if qmc:
            sampler = SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
        else:
            sampler = IIDNormalSampler(
                num_samples=mc_samples, seed=seed
            )  # pragma: nocover
        if not ref_point:
            raise ValueError(
                "`ref_point` must be specified in kwargs for qEHVI"
            )  # pragma: nocover
        # get feasible points
        if cons_tfs is not None:
            # pyre-ignore [16]: `Tensor` has no attribute `all`.
            feas = torch.stack([c(Y) <= 0 for c in cons_tfs], dim=-1).all(dim=-1)
            Y = Y[feas]
        obj = objective(Y)
        partitioning = NondominatedPartitioning(
            ref_point=torch.as_tensor(ref_point, dtype=Y.dtype, device=Y.device),
            Y=obj,
            alpha=alpha,
        )
        self.acqf = self._botorch_acqf_class(  # pyre-ignore[28]: Some kwargs are
            # not expected in base `AcquisitionFunction` but are expected in
            # its subclasses.
            model=model,
            ref_point=ref_point,
            partitioning=partitioning,
            sampler=sampler,
            objective=objective,
            constraints=cons_tfs,
            X_pending=X_pending,
        )
Ejemplo n.º 23
0
    def forward(self,
                X: Tensor,
                num_samples: int = 1,
                observation_noise: bool = False) -> Tensor:
        r"""Sample from the model posterior.

        Args:
            X: A `batch_shape x N x d`-dim Tensor from which to sample (in the `N`
                dimension) according to the maximum posterior value under the objective.
            num_samples: The number of samples to draw.
            observation_noise: If True, sample with observation noise.

        Returns:
            A `batch_shape x num_samples x d`-dim Tensor of samples from `X`, where
            `X[..., i, :]` is the `i`-th sample.
        """
        posterior = self.model.posterior(X,
                                         observation_noise=observation_noise)
        if isinstance(self.objective, ScalarizedObjective):
            posterior = self.objective(posterior)

        sampler = IIDNormalSampler(num_samples=num_samples,
                                   collapse_batch_dims=False,
                                   resample=True)
        samples = sampler(posterior)  # num_samples x batch_shape x N x m
        if isinstance(self.objective, ScalarizedObjective):
            obj = samples.squeeze(-1)  # num_samples x batch_shape x N
        else:
            obj = self.objective(samples)  # num_samples x batch_shape x N
        if self.replacement:
            # if we allow replacement then things are simple(r)
            idcs = torch.argmax(obj, dim=-1)
        else:
            # if we need to deduplicate we have to do some tensor acrobatics
            # first we get the indices associated w/ the num_samples top samples
            _, idcs_full = torch.topk(obj, num_samples, dim=-1)
            # generate some indices to smartly index into the lower triangle of
            # idcs_full (broadcasting across batch dimensions)
            ridx, cindx = torch.tril_indices(num_samples, num_samples)
            # pick the unique indices in order - since we look at the lower triangle
            # of the index matrix and we don't sort, this achieves deduplication
            sub_idcs = idcs_full[ridx, ..., cindx]
            if sub_idcs.ndim == 1:
                idcs = _flip_sub_unique(sub_idcs, num_samples)
            elif sub_idcs.ndim == 2:
                # TODO: Find a better way to do this
                n_b = sub_idcs.size(-1)
                idcs = torch.stack(
                    [
                        _flip_sub_unique(sub_idcs[:, i], num_samples)
                        for i in range(n_b)
                    ],
                    dim=-1,
                )
            else:
                # TODO: Find a general way to do this efficiently.
                raise NotImplementedError(
                    "MaxPosteriorSampling without replacement for more than a single "
                    "batch dimension is not yet implemented.")
        # idcs is num_samples x batch_shape, to index into X we need to permute for it
        # to have shape batch_shape x num_samples
        if idcs.ndim > 1:
            idcs = idcs.permute(*range(1, idcs.ndim), 0)
        # in order to use gather, we need to repeat the index tensor d times
        idcs = idcs.unsqueeze(-1).expand(*idcs.shape, X.size(-1))
        # now if the model is batched batch_shape will not necessarily be the
        # batch_shape of X, so we expand X to the proper shape
        Xe = X.expand(*obj.shape[1:], X.size(-1))
        # finally we can gather along the N dimension
        return torch.gather(Xe, -2, idcs)
Ejemplo n.º 24
0
    def _get_best_point_acqf(
        self,
        X_observed: Tensor,
        objective_weights: Tensor,
        mc_samples: int = 512,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        seed_inner: Optional[int] = None,
        qmc: bool = True,
    ) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
        model = self.model
        fixed_features = fixed_features or {}
        target_fidelities = target_fidelities or {}
        objective = _get_objective(
            model=model,  # pyre-ignore [6]
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )
        if isinstance(objective, ScalarizedObjective):
            acq_function = PosteriorMean(
                model=model,
                objective=objective  # pyre-ignore: [6]
            )
        elif isinstance(objective, MCAcquisitionObjective):
            if qmc:
                sampler = SobolQMCNormalSampler(num_samples=mc_samples,
                                                seed=seed_inner)
            else:
                sampler = IIDNormalSampler(num_samples=mc_samples,
                                           seed=seed_inner)
            acq_function = qSimpleRegret(
                model=model,
                sampler=sampler,
                objective=objective  # pyre-ignore [6]
            )
        else:
            raise UnsupportedError(
                f"Unknown objective type: {objective.__class__}"  # pragma: nocover
            )

        if self.fidelity_features:
            # we need to optimize at the target fidelities
            if any(f in self.fidelity_features for f in fixed_features):
                raise RuntimeError(
                    "Fixed features cannot also be fidelity features")
            elif not set(self.fidelity_features) == set(target_fidelities):
                raise RuntimeError(
                    "Must provide a target fidelity for every fidelity feature"
                )
            # make sure to not modify fixed_features in-place
            fixed_features = {**fixed_features, **target_fidelities}
        elif target_fidelities:
            raise RuntimeError(
                "Must specify fidelity_features in fit() when using target fidelities"
            )

        if fixed_features:
            acq_function = FixedFeatureAcquisitionFunction(
                acq_function=acq_function,
                d=X_observed.size(-1),
                columns=list(fixed_features.keys()),
                values=list(fixed_features.values()),
            )
            non_fixed_idcs = [
                i for i in range(self.Xs[0].size(-1))
                if i not in fixed_features
            ]
        else:
            non_fixed_idcs = None

        return acq_function, non_fixed_idcs
Ejemplo n.º 25
0
    def test_q_upper_confidence_bound_batch(self):
        # TODO: T41739913 Implement tests for all MCAcquisitionFunctions
        for dtype in (torch.float, torch.double):
            samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
            samples[0, 0, 0] = 1.0
            mm = MockModel(MockPosterior(samples=samples))
            # X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)

            # test batch mode
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)

            # test batch mode, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)  # 1-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
            res = acqf(X.expand(2, 1, 1))  # 2-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            # the base samples should have the batch dim collapsed
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X.expand(2, 1, 1))
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # test batch mode, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # test batch mode, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)  # 1-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
            res = acqf(X.expand(2, 1, 1))  # 2-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            # the base samples should have the batch dim collapsed
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X.expand(2, 1, 1))
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(
                1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
Ejemplo n.º 26
0
def prune_inferior_points(
    model: Model,
    X: Tensor,
    objective: Optional[MCAcquisitionObjective] = None,
    num_samples: int = 2048,
    max_frac: float = 1.0,
) -> Tensor:
    r"""Prune points from an input tensor that are unlikely to be the best point.

    Given a model, an objective, and an input tensor `X`, this function returns
    the subset of points in `X` that have some probability of being the best
    point under the objective. This function uses sampling to estimate the
    probabilities, the higher the number of points `n` in `X` the higher the
    number of samples `num_samples` should be to obtain accurate estimates.

    Args:
        model: A fitted model. Batched models are currently not supported.
        X: An input tensor of shape `n x d`. Batched inputs are currently not
            supported.
        objective: The objective under which to evaluate the posterior.
        num_samples: The number of samples used to compute empirical
            probabilities of being the best point.
        max_frac: The maximum fraction of points to retain. Must satisfy
            `0 < max_frac <= 1`. Ensures that the number of elements in the
            returned tensor does not exceed `ceil(max_frac * n)`.

    Returns:
        A `n' x d` with subset of points in `X`, where

            n' = min(N_nz, ceil(max_frac * n))

        with `N_nz` the number of points in `X` that have non-zero (empirical,
        under `num_samples` samples) probability of being the best point.
    """
    if X.ndim > 2:
        # TODO: support batched inputs (req. dealing with ragged tensors)
        raise UnsupportedError(
            "Batched inputs `X` are currently unsupported by prune_inferior_points"
        )
    max_points = math.ceil(max_frac * X.size(-2))
    if max_points < 1 or max_points > X.size(-2):
        raise ValueError(f"max_frac must take values in (0, 1], is {max_frac}")
    with torch.no_grad():
        posterior = model.posterior(X=X)
    if posterior.event_shape.numel() > SobolEngine.MAXDIM:
        if settings.debug.on():
            warnings.warn(
                f"Sample dimension q*m={posterior.event_shape.numel()} exceeding Sobol "
                f"max dimension ({SobolEngine.MAXDIM}). Using iid samples instead.",
                SamplingWarning,
            )
        sampler = IIDNormalSampler(num_samples=num_samples)
    else:
        sampler = SobolQMCNormalSampler(num_samples=num_samples)
    samples = sampler(posterior)
    if objective is None:
        objective = IdentityMCObjective()
    obj_vals = objective(samples)
    if obj_vals.ndim > 2:
        # TODO: support batched inputs (req. dealing with ragged tensors)
        raise UnsupportedError(
            "Batched models are currently unsupported by prune_inferior_points"
        )
    is_best = torch.argmax(obj_vals, dim=-1)
    idcs, counts = torch.unique(is_best, return_counts=True)

    if len(idcs) > max_points:
        counts, order_idcs = torch.sort(counts, descending=True)
        idcs = order_idcs[:max_points]

    return X[idcs]
Ejemplo n.º 27
0
    def test_q_expected_hypervolume_improvement(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            ref_point = [0.0, 0.0]
            tkwargs["dtype"] = dtype
            pareto_Y = torch.tensor(
                [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]],
                **tkwargs)
            partitioning = NondominatedPartitioning(num_outcomes=2)
            # the event shape is `b x q x m` = 1 x 1 x 2
            samples = torch.zeros(1, 1, 2, **tkwargs)
            mm = MockModel(MockPosterior(samples=samples))
            # test error if there is not pareto_Y initialized in partitioning
            with self.assertRaises(BotorchError):
                qExpectedHypervolumeImprovement(model=mm,
                                                ref_point=ref_point,
                                                partitioning=partitioning)
            partitioning.update(Y=pareto_Y)
            # test error if ref point has wrong shape
            with self.assertRaises(ValueError):
                qExpectedHypervolumeImprovement(model=mm,
                                                ref_point=ref_point[:1],
                                                partitioning=partitioning)

            X = torch.zeros(1, 1, **tkwargs)
            # basic test
            sampler = IIDNormalSampler(num_samples=1)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            # check ref point
            self.assertTrue(
                torch.equal(acqf.ref_point, torch.tensor(ref_point,
                                                         **tkwargs)))
            # check cached indices
            self.assertTrue(hasattr(acqf, "q_subset_indices"))
            self.assertIn("q_choose_1", acqf.q_subset_indices)
            self.assertTrue(
                torch.equal(
                    acqf.q_subset_indices["q_choose_1"],
                    torch.tensor([[0]], device=self.device),
                ))

            # test q=2
            X2 = torch.zeros(2, 1, **tkwargs)
            samples2 = torch.zeros(1, 2, 2, **tkwargs)
            mm2 = MockModel(MockPosterior(samples=samples2))
            acqf.model = mm2
            res = acqf(X2)
            self.assertEqual(res.item(), 0.0)
            # check cached indices
            self.assertTrue(hasattr(acqf, "q_subset_indices"))
            self.assertIn("q_choose_1", acqf.q_subset_indices)
            self.assertTrue(
                torch.equal(
                    acqf.q_subset_indices["q_choose_1"],
                    torch.tensor([[0], [1]], device=self.device),
                ))
            self.assertIn("q_choose_2", acqf.q_subset_indices)
            self.assertTrue(
                torch.equal(
                    acqf.q_subset_indices["q_choose_2"],
                    torch.tensor([[0, 1]], device=self.device),
                ))
            self.assertNotIn("q_choose_3", acqf.q_subset_indices)
            # now back to 1 and sure all caches were cleared
            acqf.model = mm
            res = acqf(X)
            self.assertNotIn("q_choose_2", acqf.q_subset_indices)
            self.assertIn("q_choose_1", acqf.q_subset_indices)
            self.assertTrue(
                torch.equal(
                    acqf.q_subset_indices["q_choose_1"],
                    torch.tensor([[0]], device=self.device),
                ))

            X = torch.zeros(1, 1, **tkwargs)
            samples = torch.zeros(1, 1, 2, **tkwargs)
            mm = MockModel(MockPosterior(samples=samples))
            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape,
                             torch.Size([2, 1, 1, 2]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape,
                             torch.Size([2, 1, 1, 2]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape,
                             torch.Size([2, 1, 1, 2]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(1, 1, 1, requires_grad=True, **tkwargs)
            with warnings.catch_warnings(
                    record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))

            # test objective
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
                objective=IdentityMCMultiOutputObjective(),
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # Test that the hypervolume improvement is correct for given sample
            # test q = 1
            X = torch.zeros(1, 1, **tkwargs)
            # basic test
            samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
            mm = MockModel(MockPosterior(samples=samples))
            sampler = IIDNormalSampler(num_samples=1)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 1.5)
            # test q = 1, does not contribute
            samples = torch.tensor([0.0, 1.0], **tkwargs).view(1, 1, 2)
            sampler = IIDNormalSampler(1)
            mm = MockModel(MockPosterior(samples=samples))
            acqf.model = mm
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # test q = 2, both points contribute
            X = torch.zeros(2, 1, **tkwargs)
            samples = torch.tensor([[6.5, 4.5], [7.0, 4.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf.model = mm
            res = acqf(X)
            self.assertEqual(res.item(), 1.75)

            # test q = 2, only 1 point contributes
            samples = torch.tensor([[6.5, 4.5], [6.0, 4.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf.model = mm
            res = acqf(X)
            self.assertEqual(res.item(), 1.5)

            # test q = 2, neither contributes
            samples = torch.tensor([[2.0, 2.0], [0.0, 0.1]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf.model = mm
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # test q = 2, test point better than current best second objective
            samples = torch.tensor([[6.5, 4.5], [6.0, 6.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf.model = mm
            res = acqf(X)
            self.assertEqual(res.item(), 8.0)

            # test q = 2, test point better than current-best first objective
            samples = torch.tensor([[6.5, 4.5], [9.0, 2.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 2.0)
            # test q = 3, all contribute
            X = torch.zeros(3, 1, **tkwargs)
            samples = torch.tensor([[6.5, 4.5], [9.0, 2.0], [7.0, 4.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 2.25)
            # test q = 3, not all contribute
            samples = torch.tensor([[6.5, 4.5], [9.0, 2.0], [7.0, 5.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 3.5)
            # test q = 3, none contribute
            samples = torch.tensor([[0.0, 4.5], [1.0, 2.0], [3.0, 0.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # test m = 3, q=1
            pareto_Y = torch.tensor(
                [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0],
                 [1.0, 3.0, 4.0]],
                **tkwargs,
            )
            partitioning = NondominatedPartitioning(num_outcomes=3, Y=pareto_Y)
            samples = torch.tensor([[1.0, 2.0, 6.0]], **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            ref_point = [-1.0] * 3
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            X = torch.zeros(1, 2, **tkwargs)
            res = acqf(X)
            self.assertEqual(res.item(), 12.0)

            # change reference point
            ref_point = [0.0] * 3
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 4.0)

            # test m = 3, no contribution
            ref_point = [1.0] * 3
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # test m = 3, q = 2
            pareto_Y = torch.tensor(
                [[4.0, 2.0, 3.0], [3.0, 5.0, 1.0], [2.0, 4.0, 2.0]], **tkwargs)
            samples = torch.tensor([[1.0, 2.0, 6.0], [1.0, 3.0, 4.0]],
                                   **tkwargs).unsqueeze(0)
            mm = MockModel(MockPosterior(samples=samples))
            ref_point = [-1.0] * 3
            partitioning = NondominatedPartitioning(num_outcomes=3, Y=pareto_Y)
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
            )
            X = torch.zeros(2, 2, **tkwargs)
            res = acqf(X)
            self.assertEqual(res.item(), 22.0)
Ejemplo n.º 28
0
    def test_q_expected_improvement(self):
        for dtype in (torch.float, torch.double):
            # the event shape is `b x q x t` = 1 x 1 x 1
            samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(samples=samples))
            # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)

            # basic test
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # test shifting best_f value
            acqf = qExpectedImprovement(model=mm, best_f=-1, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 1.0)

            # test size verification of best_f
            with self.assertRaises(ValueError):
                qExpectedImprovement(
                    model=mm, best_f=torch.zeros(2, device=self.device, dtype=dtype)
                )

            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qExpectedImprovement(model=mm, best_f=0, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(
                1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))

        # test bad objective type
        obj = ScalarizedObjective(
            weights=torch.rand(2, device=self.device, dtype=dtype)
        )
        with self.assertRaises(UnsupportedError):
            qExpectedImprovement(model=mm, best_f=0, sampler=sampler, objective=obj)
Ejemplo n.º 29
0
def get_acquisition_function(
    acquisition_function_name: str,
    model: Model,
    objective: MCAcquisitionObjective,
    X_observed: Tensor,
    X_pending: Optional[Tensor] = None,
    mc_samples: int = 500,
    qmc: bool = True,
    seed: Optional[int] = None,
    **kwargs,
) -> monte_carlo.MCAcquisitionFunction:
    r"""Convenience function for initializing botorch acquisition functions.

    Args:
        acquisition_function_name: Name of the acquisition function.
        model: A fitted model.
        objective: A MCAcquisitionObjective.
        X_observed: A `m1 x d`-dim Tensor of `m1` design points that have
            already been observed.
        X_pending: A `m2 x d`-dim Tensor of `m2` design points whose evaluation
            is pending.
        mc_samples: The number of samples to use for (q)MC evaluation of the
            acquisition function.
        qmc: If True, use quasi-Monte-Carlo sampling (instead of iid).
        seed: If provided, perform deterministic optimization (i.e. the
            function to optimize is fixed and not stochastic).

    Returns:
        The requested acquisition function.

    Example:
        >>> model = SingleTaskGP(train_X, train_Y)
        >>> obj = LinearMCObjective(weights=torch.tensor([1.0, 2.0]))
        >>> acqf = get_acquisition_function("qEI", model, obj, train_X)
    """
    # initialize the sampler
    if qmc:
        sampler = SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
    else:
        sampler = IIDNormalSampler(num_samples=mc_samples, seed=seed)
    # instantiate and return the requested acquisition function
    if acquisition_function_name == "qEI":
        best_f = objective(model.posterior(X_observed).mean).max().item()
        return monte_carlo.qExpectedImprovement(
            model=model,
            best_f=best_f,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
        )
    elif acquisition_function_name == "qPI":
        best_f = objective(model.posterior(X_observed).mean).max().item()
        return monte_carlo.qProbabilityOfImprovement(
            model=model,
            best_f=best_f,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
            tau=kwargs.get("tau", 1e-3),
        )
    elif acquisition_function_name == "qNEI":
        return monte_carlo.qNoisyExpectedImprovement(
            model=model,
            X_baseline=X_observed,
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
            prune_baseline=kwargs.get("prune_baseline", False),
        )
    elif acquisition_function_name == "qSR":
        return monte_carlo.qSimpleRegret(model=model,
                                         sampler=sampler,
                                         objective=objective,
                                         X_pending=X_pending)
    elif acquisition_function_name == "qUCB":
        if "beta" not in kwargs:
            raise ValueError("`beta` must be specified in kwargs for qUCB.")
        return monte_carlo.qUpperConfidenceBound(
            model=model,
            beta=kwargs["beta"],
            sampler=sampler,
            objective=objective,
            X_pending=X_pending,
        )
    raise NotImplementedError(
        f"Unknown acquisition function {acquisition_function_name}")
Ejemplo n.º 30
0
    def test_q_simple_regret_batch(self):
        # the event shape is `b x q x t` = 2 x 2 x 1
        for dtype in (torch.float, torch.double):
            samples = torch.zeros(2, 2, 1, device=self.device, dtype=dtype)
            samples[0, 0, 0] = 1.0
            mm = MockModel(MockPosterior(samples=samples))
            # X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)

            # test batch mode
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)

            # test batch mode, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)  # 1-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))
            res = acqf(X.expand(2, 1, 1))  # 2-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            # the base samples should have the batch dim collapsed
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X.expand(2, 1, 1))
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # test batch mode, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # test batch mode, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qSimpleRegret(model=mm, sampler=sampler)
            res = acqf(X)  # 1-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))
            res = acqf(X.expand(2, 1, 1))  # 2-dim batch
            self.assertEqual(res[0].item(), 1.0)
            self.assertEqual(res[1].item(), 0.0)
            # the base samples should have the batch dim collapsed
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 2, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X.expand(2, 1, 1))
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))