Exemplo n.º 1
0
 def test_MockModel(self):
     mp = MockPosterior()
     mm = MockModel(mp)
     X = torch.empty(0)
     self.assertEqual(mm.posterior(X), mp)
     self.assertEqual(mm.num_outputs, 0)
     mm.state_dict()
     mm.load_state_dict()
Exemplo n.º 2
0
class TestGetAcquisitionFunction(BotorchTestCase):
    def setUp(self):
        super().setUp()
        self.model = mock.MagicMock()
        self.objective = DummyMCObjective()
        self.X_observed = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.X_pending = torch.tensor([[1.0, 3.0, 4.0]])
        self.mc_samples = 250
        self.qmc = True
        self.ref_point = [0.0, 0.0]
        self.mo_objective = DummyMCMultiOutputObjective()
        self.Y = torch.tensor([[1.0, 2.0]])
        self.seed = 1

    @mock.patch(f"{monte_carlo.__name__}.qExpectedImprovement")
    def test_GetQEI(self, mock_acqf):
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
        mock_acqf.assert_called_once_with(
            model=self.model,
            best_f=best_f,
            sampler=mock.ANY,
            objective=self.objective,
            X_pending=self.X_pending,
        )
        # test batched model
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
        acqf = get_acquisition_function(
            acquisition_function_name="qEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        # test batched model without marginalize dim
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))

    @mock.patch(f"{monte_carlo.__name__}.qProbabilityOfImprovement")
    def test_GetQPI(self, mock_acqf):
        # basic test
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
        acqf = get_acquisition_function(
            acquisition_function_name="qPI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
        mock_acqf.assert_called_once_with(
            model=self.model,
            best_f=best_f,
            sampler=mock.ANY,
            objective=self.objective,
            X_pending=self.X_pending,
            tau=1e-3,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
        # test with different tau, non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qPI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            qmc=False,
            seed=2,
            tau=1.0,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertEqual(kwargs["tau"], 1.0)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
        acqf = get_acquisition_function(
            acquisition_function_name="qPI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            qmc=False,
            seed=2,
            tau=1.0,
        )
        # test batched model
        self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
        acqf = get_acquisition_function(
            acquisition_function_name="qPI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)

    @mock.patch(f"{monte_carlo.__name__}.qNoisyExpectedImprovement")
    def test_GetQNEI(self, mock_acqf):
        # basic test
        acqf = get_acquisition_function(
            acquisition_function_name="qNEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            marginalize_dim=0,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        self.assertTrue(mock_acqf.call_count, 1)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertEqual(kwargs["marginalize_dim"], 0)
        # test with non-qmc, no X_pending
        acqf = get_acquisition_function(
            acquisition_function_name="qNEI",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=None,
            mc_samples=self.mc_samples,
            qmc=False,
            seed=2,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))
        self.assertEqual(kwargs["X_pending"], None)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        self.assertTrue(torch.equal(kwargs["X_baseline"], self.X_observed))

    @mock.patch(f"{monte_carlo.__name__}.qSimpleRegret")
    def test_GetQSR(self, mock_acqf):
        # basic test
        acqf = get_acquisition_function(
            acquisition_function_name="qSR",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        mock_acqf.assert_called_once_with(
            model=self.model,
            sampler=mock.ANY,
            objective=self.objective,
            X_pending=self.X_pending,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
        # test with non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qSR",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            qmc=False,
            seed=2,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))

    @mock.patch(f"{monte_carlo.__name__}.qUpperConfidenceBound")
    def test_GetQUCB(self, mock_acqf):
        # make sure beta is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qUCB",
                model=self.model,
                objective=self.objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
            )
        acqf = get_acquisition_function(
            acquisition_function_name="qUCB",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            beta=0.3,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        mock_acqf.assert_called_once_with(
            model=self.model,
            beta=0.3,
            sampler=mock.ANY,
            objective=self.objective,
            X_pending=self.X_pending,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
        # test with different tau, non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qUCB",
            model=self.model,
            objective=self.objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            qmc=False,
            seed=2,
            beta=0.2,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertEqual(kwargs["beta"], 0.2)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))

    @mock.patch(f"{moo_monte_carlo.__name__}.qExpectedHypervolumeImprovement")
    def test_GetQEHVI(self, mock_acqf):
        # make sure ref_point is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qEHVI",
                model=self.model,
                objective=self.mo_objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
                Y=self.Y,
            )
        # make sure Y is specified
        with self.assertRaises(ValueError):
            acqf = get_acquisition_function(
                acquisition_function_name="qEHVI",
                model=self.model,
                objective=self.mo_objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
                ref_point=self.ref_point,
            )
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=self.seed,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        self.assertTrue(acqf == mock_acqf.return_value)
        mock_acqf.assert_called_once_with(
            constraints=None,
            model=self.model,
            objective=self.mo_objective,
            ref_point=self.ref_point,
            partitioning=mock.ANY,
            sampler=mock.ANY,
            X_pending=self.X_pending,
        )
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 1)
        # test with non-qmc
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        self.assertTrue(mock_acqf.call_count, 2)
        args, kwargs = mock_acqf.call_args
        self.assertEqual(args, ())
        self.assertEqual(kwargs["ref_point"], self.ref_point)
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertIsInstance(kwargs["objective"], DummyMCMultiOutputObjective)
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)

        self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
        self.assertEqual(sampler.seed, 2)
        # test constraints
        acqf = get_acquisition_function(
            acquisition_function_name="qEHVI",
            model=self.model,
            objective=self.mo_objective,
            X_observed=self.X_observed,
            X_pending=self.X_pending,
            mc_samples=self.mc_samples,
            constraints=[lambda Y: Y[..., -1]],
            seed=2,
            qmc=False,
            ref_point=self.ref_point,
            Y=self.Y,
        )
        _, kwargs = mock_acqf.call_args
        partitioning = kwargs["partitioning"]
        self.assertEqual(partitioning.pareto_Y.shape[0], 0)

    def test_GetUnknownAcquisitionFunction(self):
        with self.assertRaises(NotImplementedError):
            get_acquisition_function(
                acquisition_function_name="foo",
                model=self.model,
                objective=self.objective,
                X_observed=self.X_observed,
                X_pending=self.X_pending,
                mc_samples=self.mc_samples,
                seed=self.seed,
            )
Exemplo n.º 3
0
    def test_get_f_X_samples(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            tkwargs["dtype"] = dtype
            mean = torch.zeros(5, 1, **tkwargs)
            variance = torch.ones(5, 1, **tkwargs)
            mm = MockModel(
                MockPosterior(mean=mean,
                              variance=variance,
                              samples=torch.rand(5, 1, **tkwargs)))
            # basic test
            sampler = IIDNormalSampler(1)
            acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
            acqf._setup(model=mm, sampler=sampler, cache_root=True)
            q = 3
            baseline_L = torch.eye(5 - q, **tkwargs)
            acqf._baseline_L = baseline_L
            posterior = mm.posterior(torch.rand(5, 1, **tkwargs))
            # basic test
            rv = torch.rand(1, 5, 1, **tkwargs)
            with mock.patch(
                    "botorch.acquisition.cached_cholesky.sample_cached_cholesky",
                    return_value=rv,
            ) as mock_sample_cached_cholesky:
                samples = acqf._get_f_X_samples(posterior=posterior, q_in=q)
                mock_sample_cached_cholesky.assert_called_once_with(
                    posterior=posterior,
                    baseline_L=acqf._baseline_L,
                    q=q,
                    base_samples=acqf.sampler.base_samples,
                    sample_shape=acqf.sampler.sample_shape,
                )
            self.assertTrue(torch.equal(rv, samples))

            # test fall back when sampling from cached cholesky fails
            for error_cls in (NanError, NotPSDError):
                base_samples = torch.rand(1, 5, 1, **tkwargs)
                acqf.sampler.base_samples = base_samples
                acqf._baseline_L = baseline_L
                with mock.patch(
                        "botorch.acquisition.cached_cholesky.sample_cached_cholesky",
                        side_effect=error_cls,
                ) as mock_sample_cached_cholesky:
                    with warnings.catch_warnings(
                            record=True) as ws, settings.debug(True):
                        samples = acqf._get_f_X_samples(posterior=posterior,
                                                        q_in=q)
                        mock_sample_cached_cholesky.assert_called_once_with(
                            posterior=posterior,
                            baseline_L=acqf._baseline_L,
                            q=q,
                            base_samples=base_samples,
                            sample_shape=acqf.sampler.sample_shape,
                        )
                        self.assertTrue(
                            issubclass(ws[0].category, BotorchWarning))
                        self.assertTrue(samples.shape, torch.Size([1, q, 1]))
            # test HOGP
            hogp = HigherOrderGP(torch.zeros(2, 1), torch.zeros(2, 1,
                                                                1)).eval()
            acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler)
            acqf._setup(model=hogp, sampler=sampler, cache_root=True)
            mock_samples = torch.rand(5, 1, 1, **tkwargs)
            posterior = MockPosterior(mean=mean,
                                      variance=variance,
                                      samples=mock_samples)
            samples = acqf._get_f_X_samples(posterior=posterior, q_in=q)
            self.assertTrue(torch.equal(samples,
                                        mock_samples[2:].unsqueeze(0)))
Exemplo n.º 4
0
    def test_sample_points_around_best(self):
        tkwargs = {"device": self.device}
        _bounds = torch.ones(2, 2)
        _bounds[1] = 2
        for dtype in (torch.float, torch.double):
            tkwargs["dtype"] = dtype
            bounds = _bounds.to(**tkwargs)
            X_train = 1 + torch.rand(20, 2, **tkwargs)
            model = MockModel(
                MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
            )
            # test NEI with X_baseline
            acqf = qNoisyExpectedImprovement(model, X_baseline=X_train)
            with mock.patch(
                "botorch.optim.initializers.sample_perturbed_subset_dims"
            ) as mock_subset_dims:
                X_rnd = sample_points_around_best(
                    acq_function=acqf,
                    n_discrete_points=4,
                    sigma=1e-3,
                    bounds=bounds,
                )
                mock_subset_dims.assert_not_called()
            self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
            self.assertTrue((X_rnd >= 1).all())
            self.assertTrue((X_rnd <= 2).all())
            # test model that returns a batched mean
            model = MockModel(
                MockPosterior(
                    mean=(2 * X_train + 1).sum(dim=-1, keepdim=True).unsqueeze(0)
                )
            )
            acqf = qNoisyExpectedImprovement(model, X_baseline=X_train)
            X_rnd = sample_points_around_best(
                acq_function=acqf,
                n_discrete_points=4,
                sigma=1e-3,
                bounds=bounds,
            )
            self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
            self.assertTrue((X_rnd >= 1).all())
            self.assertTrue((X_rnd <= 2).all())

            # test EI without X_baseline
            acqf = qExpectedImprovement(model, best_f=0.0)

            with warnings.catch_warnings(record=True) as w, settings.debug(True):

                X_rnd = sample_points_around_best(
                    acq_function=acqf,
                    n_discrete_points=4,
                    sigma=1e-3,
                    bounds=bounds,
                )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            # set train inputs
            model.train_inputs = (X_train,)
            X_rnd = sample_points_around_best(
                acq_function=acqf,
                n_discrete_points=4,
                sigma=1e-3,
                bounds=bounds,
            )
            self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
            self.assertTrue((X_rnd >= 1).all())
            self.assertTrue((X_rnd <= 2).all())

            # test an acquisition function that has objective=None
            # and maximize=False
            pm = PosteriorMean(model, maximize=False)
            self.assertIsNone(pm.objective)
            self.assertFalse(pm.maximize)
            X_rnd = sample_points_around_best(
                acq_function=pm,
                n_discrete_points=4,
                sigma=0,
                bounds=bounds,
                best_pct=1e-8,  # ensures that we only use best value
            )
            idx = (-model.posterior(X_train).mean).argmax()
            self.assertTrue((X_rnd == X_train[idx : idx + 1]).all(dim=-1).all())

            # test acquisition function that has no model
            ff = FixedFeatureAcquisitionFunction(pm, d=2, columns=[0], values=[0])
            # set X_baseline for testing purposes
            ff.X_baseline = X_train
            with warnings.catch_warnings(record=True) as w, settings.debug(True):
                X_rnd = sample_points_around_best(
                    acq_function=ff,
                    n_discrete_points=4,
                    sigma=1e-3,
                    bounds=bounds,
                )
                self.assertEqual(len(w), 1)
                self.assertTrue(issubclass(w[-1].category, BotorchWarning))
                self.assertIsNone(X_rnd)

            # test constraints with NEHVI
            constraints = [lambda Y: Y[..., 0]]
            ref_point = torch.zeros(2, **tkwargs)
            # test cases when there are and are not any feasible points
            for any_feas in (True, False):
                Y_train = torch.stack(
                    [
                        torch.linspace(-0.5, 0.5, X_train.shape[0], **tkwargs)
                        if any_feas
                        else torch.ones(X_train.shape[0], **tkwargs),
                        X_train.sum(dim=-1),
                    ],
                    dim=-1,
                )
                moo_model = MockModel(MockPosterior(mean=Y_train, samples=Y_train))
                acqf = qNoisyExpectedHypervolumeImprovement(
                    moo_model,
                    ref_point=ref_point,
                    X_baseline=X_train,
                    constraints=constraints,
                    cache_root=False,
                )
                X_rnd = sample_points_around_best(
                    acq_function=acqf,
                    n_discrete_points=4,
                    sigma=0.0,
                    bounds=bounds,
                )
                self.assertTrue(X_rnd.shape, torch.Size([4, 2]))
                # this should be true since sigma=0
                # and we should only be returning feasible points
                violation = constraints[0](Y_train)
                neg_violation = -violation.clamp_min(0.0)
                feas = neg_violation == 0
                eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
                if feas.any():
                    # determine
                    # create n_train x n_rnd tensor of booleans
                    eq_mask = (X_train.unsqueeze(1) == X_rnd.unsqueeze(0)).all(dim=-1)
                    # check that all X_rnd correspond to feasible points
                    self.assertEqual(eq_mask[feas].sum(), 4)
                else:
                    idcs = torch.topk(neg_violation, k=2).indices
                    self.assertEqual(eq_mask[idcs].sum(), 4)
                self.assertTrue((X_rnd >= 1).all())
                self.assertTrue((X_rnd <= 2).all())
            # test that subset_dims is called if d>=21
            X_train = 1 + torch.rand(20, 21, **tkwargs)
            model = MockModel(
                MockPosterior(mean=(2 * X_train + 1).sum(dim=-1, keepdim=True))
            )
            bounds = torch.ones(2, 21, **tkwargs)
            bounds[1] = 2
            # test NEI with X_baseline
            acqf = qNoisyExpectedImprovement(model, X_baseline=X_train)
            with mock.patch(
                "botorch.optim.initializers.sample_perturbed_subset_dims",
                wraps=sample_perturbed_subset_dims,
            ) as mock_subset_dims:
                X_rnd = sample_points_around_best(
                    acq_function=acqf, n_discrete_points=5, sigma=1e-3, bounds=bounds
                )
            self.assertTrue(X_rnd.shape, torch.Size([5, 2]))
            self.assertTrue((X_rnd >= 1).all())
            self.assertTrue((X_rnd <= 2).all())
            mock_subset_dims.assert_called_once()
            # test tiny prob_perturb to make sure we perturb at least one dimension
            X_rnd = sample_points_around_best(
                acq_function=acqf,
                n_discrete_points=5,
                sigma=1e-3,
                bounds=bounds,
                prob_perturb=1e-8,
            )
            self.assertTrue(
                ((X_rnd.unsqueeze(0) == X_train.unsqueeze(1)).all(dim=-1)).sum() == 0
            )