Пример #1
0
    def test_probability_of_improvement(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([0.0], device=self.device,
                                dtype=dtype).view(1, 1)
            variance = torch.ones(1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            module = ProbabilityOfImprovement(model=mm, best_f=1.96)
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)
            pi = module(X)
            pi_expected = torch.tensor(0.0250, device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))

            module = ProbabilityOfImprovement(model=mm,
                                              best_f=1.96,
                                              maximize=False)
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)
            pi = module(X)
            pi_expected = torch.tensor(0.9750, device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(pi, pi_expected, atol=1e-4))

            # check for proper error if multi-output model
            mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
            variance2 = torch.ones_like(mean2)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            with self.assertRaises(UnsupportedError):
                ProbabilityOfImprovement(model=mm2, best_f=0.0)
Пример #2
0
    def test_upper_confidence_bound(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[0.0]], device=self.device, dtype=dtype)
            variance = torch.tensor([[1.0]], device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            module = UpperConfidenceBound(model=mm, beta=1.0)
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)
            ucb = module(X)
            ucb_expected = torch.tensor([1.0], device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))

            module = UpperConfidenceBound(model=mm, beta=1.0, maximize=False)
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)
            ucb = module(X)
            ucb_expected = torch.tensor([-1.0],
                                        device=self.device,
                                        dtype=dtype)
            self.assertTrue(torch.allclose(ucb, ucb_expected, atol=1e-4))

            # check for proper error if multi-output model
            mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
            variance2 = torch.rand(1, 2, device=self.device, dtype=dtype)
            mm2 = MockModel(MockPosterior(mean=mean2, variance=variance2))
            module2 = UpperConfidenceBound(model=mm2, beta=1.0)
            with self.assertRaises(UnsupportedError):
                module2(X)
Пример #3
0
    def test_expected_improvement(self):
        for dtype in (torch.float, torch.double):
            mean = torch.tensor([[-0.5]], device=self.device, dtype=dtype)
            variance = torch.ones(1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))

            # basic test
            module = ExpectedImprovement(model=mm, best_f=0.0)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.19780,
                                       device=self.device,
                                       dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))

            # test maximize
            module = ExpectedImprovement(model=mm, best_f=0.0, maximize=False)
            X = torch.empty(1, 1, device=self.device, dtype=dtype)  # dummy
            ei = module(X)
            ei_expected = torch.tensor(0.6978, device=self.device, dtype=dtype)
            self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
            with self.assertRaises(UnsupportedError):
                module.set_X_pending(None)

            # test posterior transform (single-output)
            mean = torch.tensor([0.5], device=self.device, dtype=dtype)
            covar = torch.tensor([[0.16]], device=self.device, dtype=dtype)
            mvn = MultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([0.5], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.2601, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)

            # test posterior transform (multi-output)
            mean = torch.tensor([[-0.25, 0.5]],
                                device=self.device,
                                dtype=dtype)
            covar = torch.tensor([[[0.5, 0.125], [0.125, 0.5]]],
                                 device=self.device,
                                 dtype=dtype)
            mvn = MultitaskMultivariateNormal(mean, covar)
            p = GPyTorchPosterior(mvn)
            mm = MockModel(p)
            weights = torch.tensor([2.0, 1.0], device=self.device, dtype=dtype)
            transform = ScalarizedPosteriorTransform(weights)
            ei = ExpectedImprovement(model=mm,
                                     best_f=0.0,
                                     posterior_transform=transform)
            X = torch.rand(1, 2, device=self.device, dtype=dtype)
            ei_expected = torch.tensor(0.6910, device=self.device, dtype=dtype)
            torch.allclose(ei(X), ei_expected, atol=1e-4)
Пример #4
0
 def test_prune_inferior_points(self):
     for dtype in (torch.float, torch.double):
         X = torch.rand(3, 2, device=self.device, dtype=dtype)
         # the event shape is `q x t` = 3 x 1
         samples = torch.tensor([[-1.0], [0.0], [1.0]],
                                device=self.device,
                                dtype=dtype)
         mm = MockModel(MockPosterior(samples=samples))
         # test that a batched X raises errors
         with self.assertRaises(UnsupportedError):
             prune_inferior_points(model=mm, X=X.expand(2, 3, 2))
         # test that a batched model raises errors (event shape is `q x t` = 3 x 1)
         mm2 = MockModel(MockPosterior(samples=samples.expand(2, 3, 1)))
         with self.assertRaises(UnsupportedError):
             prune_inferior_points(model=mm2, X=X)
         # test that invalid max_frac is checked properly
         with self.assertRaises(ValueError):
             prune_inferior_points(model=mm, X=X, max_frac=1.1)
         # test basic behaviour
         X_pruned = prune_inferior_points(model=mm, X=X)
         self.assertTrue(torch.equal(X_pruned, X[[-1]]))
         # test custom objective
         neg_id_obj = GenericMCObjective(lambda X: -X.squeeze(-1))
         X_pruned = prune_inferior_points(model=mm,
                                          X=X,
                                          objective=neg_id_obj)
         self.assertTrue(torch.equal(X_pruned, X[[0]]))
         # test non-repeated samples (requires mocking out MockPosterior's rsample)
         samples = torch.tensor(
             [[[3.0], [0.0], [0.0]], [[0.0], [2.0], [0.0]],
              [[0.0], [0.0], [1.0]]],
             device=self.device,
             dtype=dtype,
         )
         with mock.patch.object(MockPosterior,
                                "rsample",
                                return_value=samples):
             mm = MockModel(MockPosterior(samples=samples))
             X_pruned = prune_inferior_points(model=mm, X=X)
         self.assertTrue(torch.equal(X_pruned, X))
         # test max_frac limiting
         with mock.patch.object(MockPosterior,
                                "rsample",
                                return_value=samples):
             mm = MockModel(MockPosterior(samples=samples))
             X_pruned = prune_inferior_points(model=mm, X=X, max_frac=2 / 3)
         self.assertTrue(torch.equal(X_pruned, X[:2]))
         # test that zero-probability is in fact pruned
         samples[2, 0, 0] = 10
         with mock.patch.object(MockPosterior,
                                "rsample",
                                return_value=samples):
             mm = MockModel(MockPosterior(samples=samples))
             X_pruned = prune_inferior_points(model=mm, X=X)
         self.assertTrue(torch.equal(X_pruned, X[:2]))
Пример #5
0
    def test_InverseCostWeightedUtility(self):
        for batch_shape in ([], [2]):
            for dtype in (torch.float, torch.double):
                # the event shape is `batch_shape x q x t`
                mean = 1 + torch.rand(
                    *batch_shape, 2, 1, device=self.device, dtype=dtype)
                mm = MockModel(MockPosterior(mean=mean))

                X = torch.randn(*batch_shape,
                                3,
                                2,
                                device=self.device,
                                dtype=dtype)
                deltas = torch.rand(4,
                                    *batch_shape,
                                    device=self.device,
                                    dtype=dtype)

                # test that sampler is required if use_mean=False
                icwu = InverseCostWeightedUtility(mm, use_mean=False)
                with self.assertRaises(RuntimeError):
                    icwu(X, deltas)

                # check warning for negative cost
                mm = MockModel(MockPosterior(mean=mean.clamp_max(-1e-6)))
                icwu = InverseCostWeightedUtility(mm)
                with warnings.catch_warnings(
                        record=True) as ws, settings.debug(True):
                    icwu(X, deltas)
                    self.assertTrue(
                        any(
                            issubclass(w.category, CostAwareWarning)
                            for w in ws))

                # basic test
                mm = MockModel(MockPosterior(mean=mean))
                icwu = InverseCostWeightedUtility(mm)
                ratios = icwu(X, deltas)
                self.assertTrue(
                    torch.equal(ratios, deltas / mean.squeeze(-1).sum(dim=-1)))

                # sampling test
                samples = 1 + torch.rand(  # event shape is q x m
                    *batch_shape,
                    3,
                    1,
                    device=self.device,
                    dtype=dtype)
                mm = MockModel(MockPosterior(samples=samples))
                icwu = InverseCostWeightedUtility(mm, use_mean=False)
                ratios = icwu(X, deltas, sampler=IIDNormalSampler(4))
                self.assertTrue(
                    torch.equal(ratios,
                                deltas / samples.squeeze(-1).sum(dim=-1)))
Пример #6
0
 def test_posterior_mean(self):
     for dtype in (torch.float, torch.double):
         mean = torch.tensor([[0.25]], device=self.device, dtype=dtype)
         mm = MockModel(MockPosterior(mean=mean))
         module = PosteriorMean(model=mm)
         X = torch.empty(1, 1, device=self.device, dtype=dtype)
         pm = module(X)
         self.assertTrue(torch.equal(pm, mean.view(-1)))
         # check for proper error if multi-output model
         mean2 = torch.rand(1, 2, device=self.device, dtype=dtype)
         mm2 = MockModel(MockPosterior(mean=mean2))
         with self.assertRaises(UnsupportedError):
             PosteriorMean(model=mm2)
Пример #7
0
 def test_prune_baseline(self):
     no = "botorch.utils.testing.MockModel.num_outputs"
     prune = "botorch.acquisition.monte_carlo.prune_inferior_points"
     for dtype in (torch.float, torch.double):
         X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
         X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
         with mock.patch(
                 no, new_callable=mock.PropertyMock) as mock_num_outputs:
             mock_num_outputs.return_value = 1
             mm = MockModel(mock.Mock())
             with mock.patch(prune, return_value=X_pruned) as mock_prune:
                 acqf = qNoisyExpectedImprovement(
                     model=mm,
                     X_baseline=X_baseline,
                     prune_baseline=True,
                     cache_root=False,
                 )
             mock_prune.assert_called_once()
             self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
             with mock.patch(prune, return_value=X_pruned) as mock_prune:
                 acqf = qNoisyExpectedImprovement(
                     model=mm,
                     X_baseline=X_baseline,
                     prune_baseline=True,
                     marginalize_dim=-3,
                     cache_root=False,
                 )
                 _, kwargs = mock_prune.call_args
                 self.assertEqual(kwargs["marginalize_dim"], -3)
Пример #8
0
    def test_q_upper_confidence_bound(self):
        for dtype in (torch.float, torch.double):
            # the event shape is `b x q x t` = 1 x 1 x 1
            samples = torch.zeros(1, 1, 1, device=self.device, dtype=dtype)
            mm = MockModel(MockPosterior(samples=samples))
            # X is `q x d` = 1 x 1. X is a dummy and unused b/c of mocking
            X = torch.zeros(1, 1, device=self.device, dtype=dtype)

            # basic test
            sampler = IIDNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)

            # basic test, no resample
            sampler = IIDNormalSampler(num_samples=2, seed=12345)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            res = acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, no resample
            sampler = SobolQMCNormalSampler(num_samples=2)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertTrue(torch.equal(acqf.sampler.base_samples, bs))

            # basic test, qmc, resample
            sampler = SobolQMCNormalSampler(num_samples=2, resample=True)
            acqf = qUpperConfidenceBound(model=mm, beta=0.5, sampler=sampler)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            self.assertEqual(acqf.sampler.base_samples.shape, torch.Size([2, 1, 1, 1]))
            bs = acqf.sampler.base_samples.clone()
            acqf(X)
            self.assertFalse(torch.equal(acqf.sampler.base_samples, bs))

            # basic test for X_pending and warning
            acqf.set_X_pending()
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(None)
            self.assertIsNone(acqf.X_pending)
            acqf.set_X_pending(X)
            self.assertEqual(acqf.X_pending, X)
            res = acqf(X)
            X2 = torch.zeros(
                1, 1, 1, device=self.device, dtype=dtype, requires_grad=True
            )
            with warnings.catch_warnings(record=True) as ws, settings.debug(True):
                acqf.set_X_pending(X2)
                self.assertEqual(acqf.X_pending, X2)
                self.assertEqual(len(ws), 1)
                self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
Пример #9
0
 def test_constrained_expected_improvement_batch(self):
     for dtype in (torch.float, torch.double):
         mean = torch.tensor(
             [[-0.5, 0.0, 5.0, 0.0], [0.0, 0.0, 5.0, 0.0],
              [0.5, 0.0, 5.0, 0.0]],
             device=self.device,
             dtype=dtype,
         ).unsqueeze(dim=-2)
         variance = torch.ones(3, 4, device=self.device,
                               dtype=dtype).unsqueeze(dim=-2)
         N = torch.distributions.Normal(loc=0.0, scale=1.0)
         a = N.icdf(
             torch.tensor(0.75))  # get a so that P(-a <= N <= a) = 0.5
         mm = MockModel(MockPosterior(mean=mean, variance=variance))
         module = ConstrainedExpectedImprovement(
             model=mm,
             best_f=0.0,
             objective_index=0,
             constraints={
                 1: [None, 0],
                 2: [5.0, None],
                 3: [-a, a]
             },
         )
         X = torch.empty(3, 1, 1, device=self.device, dtype=dtype)  # dummy
         ei = module(X)
         self.assertTrue(ei.shape == torch.Size([3]))
         ei_expected_unconstrained = torch.tensor(
             [0.19780, 0.39894, 0.69780], device=self.device, dtype=dtype)
         ei_expected = ei_expected_unconstrained * 0.5 * 0.5 * 0.5
         self.assertTrue(torch.allclose(ei, ei_expected, atol=1e-4))
Пример #10
0
    def test_estimate_feasible_volume(self):
        for dtype in (torch.float, torch.double):
            for samples in (
                    torch.zeros(1, 2, 1, device=self.device, dtype=dtype),
                    torch.ones(1, 1, 1, device=self.device, dtype=dtype),
            ):

                mm = MockModel(MockPosterior(samples=samples))
                bounds = torch.ones((2, 1))
                outcome_constraints = [lambda y: y[..., 0] - 0.5]

                p_linear, p_outcome = estimate_feasible_volume(
                    bounds=bounds,
                    model=mm,
                    outcome_constraints=outcome_constraints,
                    nsample_feature=2,
                    nsample_outcome=1,
                    dtype=dtype,
                )

                self.assertEqual(p_linear, 1.0)
                self.assertEqual(p_outcome, 1.0 - samples[0, 0].item())

                p_linear, p_outcome = estimate_feasible_volume(
                    bounds=bounds,
                    model=mm,
                    outcome_constraints=None,
                    nsample_feature=2,
                    nsample_outcome=1,
                    dtype=dtype,
                )
                self.assertEqual(p_linear, 1.0)
                self.assertEqual(p_outcome, 1.0)
Пример #11
0
 def test_initialize_q_multi_fidelity_knowledge_gradient(self):
     for dtype in (torch.float, torch.double):
         mm = MockModel(MockPosterior())
         # test error when not specifying current_value
         with self.assertRaises(UnsupportedError):
             qMultiFidelityKnowledgeGradient(model=mm,
                                             num_fantasies=None,
                                             cost_aware_utility=mock.Mock())
         # test default construction
         mock_cau = mock.Mock()
         current_value = torch.zeros(1, device=self.device, dtype=dtype)
         qMFKG = qMultiFidelityKnowledgeGradient(
             model=mm,
             num_fantasies=32,
             current_value=current_value,
             cost_aware_utility=mock_cau,
         )
         self.assertEqual(qMFKG.num_fantasies, 32)
         self.assertIsInstance(qMFKG.sampler, SobolQMCNormalSampler)
         self.assertEqual(qMFKG.sampler.sample_shape, torch.Size([32]))
         self.assertIsNone(qMFKG.objective)
         self.assertIsNone(qMFKG.inner_sampler)
         self.assertIsNone(qMFKG.X_pending)
         self.assertEqual(qMFKG.get_augmented_q_batch_size(q=3), 32 + 3)
         self.assertEqual(qMFKG.cost_aware_utility, mock_cau)
         self.assertTrue(torch.equal(qMFKG.current_value, current_value))
         self.assertIsNone(qMFKG._cost_sampler)
         X = torch.rand(2, 3, device=self.device, dtype=dtype)
         self.assertTrue(torch.equal(qMFKG.project(X), X))
         self.assertTrue(torch.equal(qMFKG.expand(X), X))
         # make sure cost sampling logic works
         self.assertIsInstance(qMFKG.cost_sampler, SobolQMCNormalSampler)
         self.assertEqual(qMFKG.cost_sampler.sample_shape, torch.Size([32]))
Пример #12
0
 def test_init(self):
     mm = MockModel(MockPosterior(mean=torch.rand(2, 1)))
     # test default init
     acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm)
     self.assertIsInstance(acqf.objective, IdentityMCMultiOutputObjective)
     self.assertIsInstance(acqf.sampler, SobolQMCNormalSampler)
     self.assertEqual(acqf.sampler._sample_shape, torch.Size([512]))
     self.assertTrue(acqf.sampler.collapse_batch_dims, True)
     self.assertFalse(acqf.sampler.resample)
     self.assertIsNone(acqf.X_pending)
     # test custom init
     sampler = SobolQMCNormalSampler(num_samples=64,
                                     collapse_batch_dims=False,
                                     resample=True)
     objective = DummyMCMultiOutputObjective()
     X_pending = torch.rand(2, 1)
     acqf = DummyMultiObjectiveMCAcquisitionFunction(model=mm,
                                                     sampler=sampler,
                                                     objective=objective,
                                                     X_pending=X_pending)
     self.assertEqual(acqf.objective, objective)
     self.assertEqual(acqf.sampler, sampler)
     self.assertTrue(torch.equal(acqf.X_pending, X_pending))
     # test unsupported objective
     with self.assertRaises(UnsupportedError):
         acqf = DummyMultiObjectiveMCAcquisitionFunction(
             model=mm, objective=IdentityMCObjective())
Пример #13
0
 def test_abstract_raises(self):
     with self.assertRaises(TypeError):
         AnalyticAcquisitionFunction()
     # raise if model is multi-output, but no posterior transform is given
     mean = torch.zeros(1, 2)
     variance = torch.ones(1, 2)
     mm = MockModel(MockPosterior(mean=mean, variance=variance))
     with self.assertRaises(UnsupportedError):
         DummyAnalyticAcquisitionFunction(model=mm)
Пример #14
0
 def test_MockModel(self):
     mp = MockPosterior()
     mm = MockModel(mp)
     X = torch.empty(0)
     self.assertEqual(mm.posterior(X), mp)
     self.assertEqual(mm.num_outputs, 0)
     mm.state_dict()
     mm.load_state_dict()
Пример #15
0
 def test_abstract_raises(self):
     with self.assertRaises(TypeError):
         MCAcquisitionFunction()
     # raise if model is multi-output, but no objective is given
     no = "botorch.utils.testing.MockModel.num_outputs"
     with mock.patch(no, new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 2
         mm = MockModel(MockPosterior())
         with self.assertRaises(UnsupportedError):
             DummyMCAcquisitionFunction(model=mm)
Пример #16
0
 def test_init(self):
     mm = MockModel(MockPosterior(mean=None))
     MPS = MaxPosteriorSampling(mm)
     self.assertEqual(MPS.model, mm)
     self.assertTrue(MPS.replacement)
     self.assertIsInstance(MPS.objective, IdentityMCObjective)
     obj = LinearMCObjective(torch.rand(2))
     MPS = MaxPosteriorSampling(mm, objective=obj, replacement=False)
     self.assertEqual(MPS.objective, obj)
     self.assertFalse(MPS.replacement)
Пример #17
0
    def test_gen_batch_initial_conditions(self):
        bounds = torch.stack([torch.zeros(2), torch.ones(2)])
        mock_acqf = MockAcquisitionFunction()
        mock_acqf.objective = lambda y: y.squeeze(-1)
        for dtype in (torch.float, torch.double):
            bounds = bounds.to(device=self.device, dtype=dtype)
            mock_acqf.X_baseline = bounds  # for testing sample_around_best
            mock_acqf.model = MockModel(MockPosterior(mean=bounds[:, :1]))
            for nonnegative, seed, init_batch_limit, ffs, sample_around_best in product(
                [True, False], [None, 1234], [None, 1], [None, {0: 0.5}], [True, False]
            ):
                with mock.patch.object(
                    MockAcquisitionFunction,
                    "__call__",
                    wraps=mock_acqf.__call__,
                ) as mock_acqf_call:
                    batch_initial_conditions = gen_batch_initial_conditions(
                        acq_function=mock_acqf,
                        bounds=bounds,
                        q=1,
                        num_restarts=2,
                        raw_samples=10,
                        fixed_features=ffs,
                        options={
                            "nonnegative": nonnegative,
                            "eta": 0.01,
                            "alpha": 0.1,
                            "seed": seed,
                            "init_batch_limit": init_batch_limit,
                            "sample_around_best": sample_around_best,
                        },
                    )
                    expected_shape = torch.Size([2, 1, 2])
                    self.assertEqual(batch_initial_conditions.shape, expected_shape)
                    self.assertEqual(batch_initial_conditions.device, bounds.device)
                    self.assertEqual(batch_initial_conditions.dtype, bounds.dtype)
                    batch_shape = (
                        torch.Size([])
                        if init_batch_limit is None
                        else torch.Size([init_batch_limit])
                    )
                    raw_samps = mock_acqf_call.call_args[0][0]
                    batch_shape = (
                        torch.Size([20 if sample_around_best else 10])
                        if init_batch_limit is None
                        else torch.Size([init_batch_limit])
                    )
                    expected_raw_samps_shape = batch_shape + torch.Size([1, 2])
                    self.assertEqual(raw_samps.shape, expected_raw_samps_shape)

                    if ffs is not None:
                        for idx, val in ffs.items():
                            self.assertTrue(
                                torch.all(batch_initial_conditions[..., idx] == val)
                            )
Пример #18
0
 def test_GetQEI(self, mock_acqf):
     self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2)))
     acqf = get_acquisition_function(
         acquisition_function_name="qEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
         marginalize_dim=0,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     best_f = self.objective(self.model.posterior(self.X_observed).mean).max().item()
     mock_acqf.assert_called_once_with(
         model=self.model,
         best_f=best_f,
         sampler=mock.ANY,
         objective=self.objective,
         posterior_transform=None,
         X_pending=self.X_pending,
     )
     # test batched model
     self.model = MockModel(MockPosterior(mean=torch.zeros(1, 2, 1)))
     acqf = get_acquisition_function(
         acquisition_function_name="qEI",
         model=self.model,
         objective=self.objective,
         X_observed=self.X_observed,
         X_pending=self.X_pending,
         mc_samples=self.mc_samples,
         seed=self.seed,
     )
     self.assertTrue(acqf == mock_acqf.return_value)
     # test batched model without marginalize dim
     args, kwargs = mock_acqf.call_args
     self.assertEqual(args, ())
     sampler = kwargs["sampler"]
     self.assertIsInstance(sampler, SobolQMCNormalSampler)
     self.assertEqual(sampler.sample_shape, torch.Size([self.mc_samples]))
     self.assertEqual(sampler.seed, 1)
     self.assertTrue(torch.equal(kwargs["X_pending"], self.X_pending))
Пример #19
0
 def test_scalarized_posterior_mean(self):
     for dtype in (torch.float, torch.double):
         mean = torch.tensor([[0.25], [0.5]], device=self.device, dtype=dtype)
         mm = MockModel(MockPosterior(mean=mean))
         weights = torch.tensor([0.5, 1.0], device=self.device, dtype=dtype)
         module = ScalarizedPosteriorMean(model=mm, weights=weights)
         X = torch.empty(1, 1, device=self.device, dtype=dtype)
         pm = module(X)
         self.assertTrue(
             torch.allclose(pm, (mean.squeeze(-1) * module.weights).sum(dim=-1))
         )
Пример #20
0
 def test_get_EHVI_input_validation_errors(self):
     weights = torch.ones(2)
     objective_thresholds = torch.zeros(2)
     mm = MockModel(MockPosterior())
     with self.assertRaisesRegex(ValueError,
                                 "There are no feasible observed points."):
         get_EHVI(
             model=mm,
             objective_weights=weights,
             objective_thresholds=objective_thresholds,
         )
Пример #21
0
 def test_deprecate_acqf_objective(self):
     mean = torch.zeros(1, 2)
     variance = torch.ones(1, 2)
     mm = MockModel(MockPosterior(mean=mean, variance=variance))
     obj = ScalarizedObjective(weights=torch.ones(2))
     # check for deprecation warning
     with self.assertWarns(DeprecationWarning):
         acqf = DummyAnalyticAcquisitionFunction(model=mm, objective=obj)
     # check that posterior transform was created and assigned
     self.assertIsInstance(acqf.posterior_transform, ScalarizedPosteriorTransform)
     self.assertFalse(hasattr(acqf, "objective"))
Пример #22
0
 def test_prune_baseline(self):
     prune = "botorch.acquisition.monte_carlo.prune_inferior_points"
     for dtype in (torch.float, torch.double):
         X_baseline = torch.zeros(1, 1, device=self.device, dtype=dtype)
         X_pruned = torch.rand(1, 1, device=self.device, dtype=dtype)
         mm = MockModel(mock.Mock())
         with mock.patch(prune, return_value=X_pruned) as mock_prune:
             acqf = qNoisyExpectedImprovement(
                 model=mm, X_baseline=X_baseline, prune_baseline=True
             )
         mock_prune.assert_called_once()
         self.assertTrue(torch.equal(acqf.X_baseline, X_pruned))
Пример #23
0
 def test_abstract_raises(self):
     with self.assertRaises(TypeError):
         MCAcquisitionFunction()
     # raise if model is multi-output, but no outcome transform or objective
     # are given
     no = "botorch.utils.testing.MockModel.num_outputs"
     with mock.patch(no,
                     new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 2
         mm = MockModel(MockPosterior())
         with self.assertRaises(UnsupportedError):
             DummyMCAcquisitionFunction(model=mm)
     # raise if model is multi-output, but outcome transform does not
     # scalarize and no objetive is given
     with mock.patch(no,
                     new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 2
         mm = MockModel(MockPosterior())
         ptf = DummyNonScalarizingPosteriorTransform()
         with self.assertRaises(UnsupportedError):
             DummyMCAcquisitionFunction(model=mm, posterior_transform=ptf)
Пример #24
0
 def test_gen_one_shot_kg_initial_conditions(self):
     num_fantasies = 8
     num_restarts = 4
     raw_samples = 16
     for dtype in (torch.float, torch.double):
         mean = torch.zeros(1, 1, device=self.device, dtype=dtype)
         mm = MockModel(MockPosterior(mean=mean))
         mock_kg = qKnowledgeGradient(model=mm, num_fantasies=num_fantasies)
         bounds = torch.tensor([[0, 0], [1, 1]], device=self.device, dtype=dtype)
         # test option error
         with self.assertRaises(ValueError):
             gen_one_shot_kg_initial_conditions(
                 acq_function=mock_kg,
                 bounds=bounds,
                 q=1,
                 num_restarts=num_restarts,
                 raw_samples=raw_samples,
                 options={"frac_random": 2.0},
             )
         # test generation logic
         q = 2
         mock_random_ics = torch.rand(num_restarts, q + num_fantasies, 2)
         mock_fantasy_cands = torch.ones(20, 1, 2)
         mock_fantasy_vals = torch.randn(20)
         with ExitStack() as es:
             mock_gbics = es.enter_context(
                 mock.patch(
                     "botorch.optim.initializers.gen_batch_initial_conditions",
                     return_value=mock_random_ics,
                 )
             )
             mock_optacqf = es.enter_context(
                 mock.patch(
                     "botorch.optim.optimize.optimize_acqf",
                     return_value=(mock_fantasy_cands, mock_fantasy_vals),
                 )
             )
             ics = gen_one_shot_kg_initial_conditions(
                 acq_function=mock_kg,
                 bounds=bounds,
                 q=q,
                 num_restarts=num_restarts,
                 raw_samples=raw_samples,
             )
             mock_gbics.assert_called_once()
             mock_optacqf.assert_called_once()
             n_value = int((1 - 0.1) * num_fantasies)
             self.assertTrue(
                 torch.equal(
                     ics[..., :-n_value, :], mock_random_ics[..., :-n_value, :]
                 )
             )
             self.assertTrue(torch.all(ics[..., -n_value:, :] == 1))
Пример #25
0
    def test_penalized_acquisition_function(self):
        for dtype in (torch.float, torch.double):
            mock_model = MockModel(
                MockPosterior(mean=torch.tensor([1.0]),
                              variance=torch.tensor([1.0])))
            init_point = torch.tensor([0.5, 0.5, 0.5],
                                      device=self.device,
                                      dtype=dtype)
            groups = [[0, 2], [1]]
            raw_acqf = ExpectedImprovement(model=mock_model, best_f=1.0)
            penalty = GroupLassoPenalty(init_point=init_point, groups=groups)
            lmbda = 0.1
            acqf = PenalizedAcquisitionFunction(raw_acqf=raw_acqf,
                                                penalty_func=penalty,
                                                regularization_parameter=lmbda)

            sample_point = torch.tensor([[1.0, 2.0, 3.0]],
                                        device=self.device,
                                        dtype=dtype)
            raw_value = raw_acqf(sample_point)
            penalty_value = penalty(sample_point)
            real_value = raw_value - lmbda * penalty_value
            computed_value = acqf(sample_point)
            self.assertTrue(torch.equal(real_value, computed_value))

            # testing X_pending for analytic raw_acqfn (EI)
            X_pending = torch.tensor([0.1, 0.2, 0.3],
                                     device=self.device,
                                     dtype=dtype)
            with self.assertRaises(UnsupportedError):
                acqf.set_X_pending(X_pending)

            # testing X_pending for non-analytic raw_acqfn (EI)
            sampler = IIDNormalSampler(num_samples=2)
            raw_acqf_2 = qExpectedImprovement(model=mock_model,
                                              best_f=0,
                                              sampler=sampler)
            init_point = torch.tensor([1.0, 1.0, 1.0],
                                      device=self.device,
                                      dtype=dtype)
            l2_module = L2Penalty(init_point=init_point)
            acqf_2 = PenalizedAcquisitionFunction(
                raw_acqf=raw_acqf_2,
                penalty_func=l2_module,
                regularization_parameter=lmbda,
            )

            X_pending = torch.tensor([0.1, 0.2, 0.3],
                                     device=self.device,
                                     dtype=dtype)
            acqf_2.set_X_pending(X_pending)
            self.assertTrue(torch.equal(acqf_2.X_pending, X_pending))
Пример #26
0
 def test_get_value_function(self):
     mm = MockModel(None)
     # test PosteriorMean
     vf = _get_value_function(mm)
     self.assertIsInstance(vf, PosteriorMean)
     self.assertIsNone(vf.objective)
     # test SimpleRegret
     obj = GenericMCObjective(lambda Y: Y.sum(dim=-1))
     sampler = IIDNormalSampler(num_samples=2)
     vf = _get_value_function(model=mm, objective=obj, sampler=sampler)
     self.assertIsInstance(vf, qSimpleRegret)
     self.assertEqual(vf.objective, obj)
     self.assertEqual(vf.sampler, sampler)
Пример #27
0
    def test_constrained_q_expected_hypervolume_improvement(self):
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            ref_point = [0.0, 0.0]
            t_ref_point = torch.tensor(ref_point, **tkwargs)
            pareto_Y = torch.tensor(
                [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]],
                **tkwargs)
            partitioning = NondominatedPartitioning(ref_point=t_ref_point)
            partitioning.update(Y=pareto_Y)

            # test q=1
            # the event shape is `b x q x m` = 1 x 1 x 2
            samples = torch.tensor([[[6.5, 4.5]]], **tkwargs)
            mm = MockModel(MockPosterior(samples=samples))
            sampler = IIDNormalSampler(num_samples=1)
            X = torch.zeros(1, 1, **tkwargs)
            # test zero slack
            for eta in (1e-1, 1e-2):
                acqf = qExpectedHypervolumeImprovement(
                    model=mm,
                    ref_point=ref_point,
                    partitioning=partitioning,
                    sampler=sampler,
                    constraints=[lambda Z: torch.zeros_like(Z[..., -1])],
                    eta=eta,
                )
                res = acqf(X)
                self.assertAlmostEqual(res.item(), 0.5 * 1.5, places=4)
            # test feasible
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
                constraints=[lambda Z: -100.0 * torch.ones_like(Z[..., -1])],
                eta=1e-3,
            )
            res = acqf(X)
            self.assertAlmostEqual(res.item(), 1.5, places=4)
            # test infeasible
            acqf = qExpectedHypervolumeImprovement(
                model=mm,
                ref_point=ref_point,
                partitioning=partitioning,
                sampler=sampler,
                constraints=[lambda Z: 100.0 * torch.ones_like(Z[..., -1])],
                eta=1e-3,
            )
            res = acqf(X)
            self.assertAlmostEqual(res.item(), 0.0, places=4)
Пример #28
0
    def test_expected_hypervolume_improvement(self):
        tkwargs = {"device": self.device}
        for dtype in (torch.float, torch.double):
            ref_point = [0.0, 0.0]
            tkwargs["dtype"] = dtype
            pareto_Y = torch.tensor(
                [[4.0, 5.0], [5.0, 5.0], [8.5, 3.5], [8.5, 3.0], [9.0, 1.0]],
                **tkwargs)
            partitioning = NondominatedPartitioning(
                ref_point=torch.tensor(ref_point, **tkwargs))
            # the event shape is `b x q x m` = 1 x 1 x 1
            mean = torch.zeros(1, 1, 2, **tkwargs)
            variance = torch.zeros(1, 1, 2, **tkwargs)
            mm = MockModel(MockPosterior(mean=mean, variance=variance))
            # test error if there is not pareto_Y initialized in partitioning
            with self.assertRaises(BotorchError):
                ExpectedHypervolumeImprovement(model=mm,
                                               ref_point=ref_point,
                                               partitioning=partitioning)
            partitioning.update(Y=pareto_Y)
            # test error if ref point has wrong shape
            with self.assertRaises(ValueError):
                ExpectedHypervolumeImprovement(model=mm,
                                               ref_point=ref_point[:1],
                                               partitioning=partitioning)

            with self.assertRaises(ValueError):
                # test error if no pareto_Y point is better than ref_point
                ExpectedHypervolumeImprovement(model=mm,
                                               ref_point=[10.0, 10.0],
                                               partitioning=partitioning)
            X = torch.zeros(1, 1, **tkwargs)
            # basic test
            acqf = ExpectedHypervolumeImprovement(model=mm,
                                                  ref_point=ref_point,
                                                  partitioning=partitioning)
            res = acqf(X)
            self.assertEqual(res.item(), 0.0)
            # check ref point
            self.assertTrue(
                torch.equal(acqf.ref_point, torch.tensor(ref_point,
                                                         **tkwargs)))
            # check bounds
            self.assertTrue(hasattr(acqf, "cell_lower_bounds"))
            self.assertTrue(hasattr(acqf, "cell_upper_bounds"))
            # check cached indices
            expected_indices = torch.tensor([[0, 0], [0, 1], [1, 0], [1, 1]],
                                            dtype=torch.long,
                                            device=self.device)
            self.assertTrue(
                torch.equal(acqf._cross_product_indices, expected_indices))
Пример #29
0
    def test_setup(self):
        mean = torch.zeros(1, 1)
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        # basic test
        sampler = IIDNormalSampler(1)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        acqf._setup(model=mm, sampler=sampler)
        self.assertFalse(acqf._is_mt)
        self.assertFalse(acqf._is_deterministic)
        self.assertFalse(acqf._uses_matheron)
        self.assertFalse(acqf._cache_root)
        acqf._setup(model=mm, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._cache_root)

        # test check_sampler
        with warnings.catch_warnings(record=True) as ws, settings.debug(True):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
            self.assertEqual(len(ws), 0)

        # test collapse_batch_dims=False
        sampler = IIDNormalSampler(1, collapse_batch_dims=False)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        with self.assertRaises(UnsupportedError):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
        # test warning if base_samples is not None
        sampler = IIDNormalSampler(1)
        sampler.base_samples = torch.zeros(1, 1)
        acqf = DummyCachedCholeskyAcqf(model=mm, sampler=sampler)
        with warnings.catch_warnings(record=True) as ws, settings.debug(True):
            acqf._setup(model=mm, sampler=sampler, check_sampler=True)
            self.assertTrue(issubclass(ws[-1].category, BotorchWarning))
        # test the base_samples are set to None
        self.assertIsNone(acqf.sampler.base_samples)
        # test model that uses matheron's rule and sampler.batch_range != (0, -1)
        hogp = HigherOrderGP(torch.zeros(1, 1), torch.zeros(1, 1, 1)).eval()
        acqf = DummyCachedCholeskyAcqf(model=hogp, sampler=sampler)
        with self.assertRaises(RuntimeError):
            acqf._setup(model=hogp, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._uses_matheron)
        self.assertTrue(acqf._is_mt)
        self.assertFalse(acqf._is_deterministic)

        # test deterministic model
        model = GenericDeterministicModel(f=lambda X: X)
        acqf = DummyCachedCholeskyAcqf(model=model, sampler=sampler)
        acqf._setup(model=model, sampler=sampler, cache_root=True)
        self.assertTrue(acqf._is_deterministic)
        self.assertFalse(acqf._uses_matheron)
        self.assertFalse(acqf._is_mt)
        self.assertFalse(acqf._cache_root)
Пример #30
0
 def test_init(self):
     NO = "botorch.utils.testing.MockModel.num_outputs"
     with mock.patch(NO, new_callable=mock.PropertyMock) as mock_num_outputs:
         mock_num_outputs.return_value = 1
         mm = MockModel(None)
         acqf = PosteriorMean(mm)
         BS = BoltzmannSampling(acqf)
         self.assertEqual(BS.acq_func, acqf)
         self.assertEqual(BS.eta, 1.0)
         self.assertTrue(BS.replacement)
         BS = BoltzmannSampling(acqf, eta=0.5, replacement=False)
         self.assertEqual(BS.acq_func, acqf)
         self.assertEqual(BS.eta, 0.5)
         self.assertFalse(BS.replacement)