Пример #1
0
    def test_acqf_input_constructor(self):
        with self.assertRaises(RuntimeError) as e:
            get_acqf_input_constructor(DummyAcquisitionFunction)
            self.assertTrue("not registered" in str(e))

        with self.assertRaises(ValueError) as e:

            @acqf_input_constructor(ExpectedImprovement)
            class NewAcquisitionFunction(AcquisitionFunction):
                ...

            self.assertTrue("duplicate" in str(e))
Пример #2
0
    def test_construct_inputs_mfkg(self):
        constructor_args = {
            "model": None,
            "training_data": self.bd_td,
            "objective": None,
            "bounds": self.bounds,
            "num_fantasies": 123,
            "target_fidelities": {
                0: 0.987
            },
            "fidelity_weights": {
                0: 0.654
            },
            "cost_intercept": 0.321,
        }
        with mock.patch(
                target=
                "botorch.acquisition.input_constructors.construct_inputs_mf_base",
                return_value={"foo": 0},
        ), mock.patch(
                target=
                "botorch.acquisition.input_constructors.construct_inputs_qKG",
                return_value={"bar": 1},
        ):
            from botorch.acquisition import input_constructors

            input_constructor = input_constructors.get_acqf_input_constructor(
                qMultiFidelityKnowledgeGradient)
            inputs_mfkg = input_constructor(**constructor_args)
            inputs_test = {"foo": 0, "bar": 1}
            self.assertEqual(inputs_mfkg, inputs_test)
Пример #3
0
 def test_construct_inputs_qEI(self):
     c = get_acqf_input_constructor(qExpectedImprovement)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["objective"])
     self.assertIsNone(kwargs["X_pending"])
     self.assertIsNone(kwargs["sampler"])
     X_pending = torch.rand(2, 2)
     objective = LinearMCObjective(torch.rand(2))
     kwargs = c(
         model=mock_model,
         training_data=self.bd_td_mo,
         objective=objective,
         X_pending=X_pending,
     )
     self.assertEqual(kwargs["model"], mock_model)
     self.assertTrue(
         torch.equal(kwargs["objective"].weights, objective.weights))
     self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
     self.assertIsNone(kwargs["sampler"])
     best_f_expected = objective(self.bd_td_mo.Y).max()
     self.assertEqual(kwargs["best_f"], best_f_expected)
     # Check explicitly specifying `best_f`.
     best_f_expected = best_f_expected - 1  # Random value.
     kwargs = c(
         model=mock_model,
         training_data=self.bd_td_mo,
         objective=objective,
         X_pending=X_pending,
         best_f=best_f_expected,
     )
     self.assertEqual(kwargs["best_f"], best_f_expected)
Пример #4
0
 def test_construct_inputs_qNEI(self):
     c = get_acqf_input_constructor(qNoisyExpectedImprovement)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["objective"])
     self.assertIsNone(kwargs["X_pending"])
     self.assertIsNone(kwargs["sampler"])
     self.assertFalse(kwargs["prune_baseline"])
     self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X))
     with self.assertRaises(NotImplementedError):
         c(model=mock_model, training_data=self.nbd_td)
     X_baseline = torch.rand(2, 2)
     kwargs = c(
         model=mock_model,
         training_data=self.bd_td,
         X_baseline=X_baseline,
         prune_baseline=True,
     )
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["objective"])
     self.assertIsNone(kwargs["X_pending"])
     self.assertIsNone(kwargs["sampler"])
     self.assertTrue(kwargs["prune_baseline"])
     self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
Пример #5
0
 def setUp(self):
     qNEI_input_constructor = get_acqf_input_constructor(
         qNoisyExpectedImprovement)
     self.mock_input_constructor = mock.MagicMock(
         qNEI_input_constructor, side_effect=qNEI_input_constructor)
     # Adding wrapping here to be able to count calls and inspect arguments.
     _register_acqf_input_constructor(
         acqf_cls=DummyACQFClass,
         input_constructor=self.mock_input_constructor,
     )
     self.botorch_model_class = SingleTaskGP
     self.surrogate = Surrogate(
         botorch_model_class=self.botorch_model_class)
     self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
     self.Y = torch.tensor([[3.0], [4.0]])
     self.Yvar = torch.tensor([[0.0], [2.0]])
     self.training_data = TrainingData.from_block_design(X=self.X,
                                                         Y=self.Y,
                                                         Yvar=self.Yvar)
     self.fidelity_features = [2]
     self.surrogate.construct(training_data=self.training_data,
                              fidelity_features=self.fidelity_features)
     self.search_space_digest = SearchSpaceDigest(
         feature_names=["a", "b", "c"],
         bounds=[(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)],
         target_fidelities={2: 1.0},
     )
     self.botorch_acqf_class = DummyACQFClass
     self.objective_weights = torch.tensor([1.0])
     self.objective_thresholds = None
     self.pending_observations = [torch.tensor([[1.0, 3.0, 4.0]])]
     self.outcome_constraints = (torch.tensor([[1.0]]), torch.tensor([[0.5]
                                                                      ]))
     self.linear_constraints = None
     self.fixed_features = {1: 2.0}
     self.options = {"best_f": 0.0}
     self.acquisition = Acquisition(
         botorch_acqf_class=self.botorch_acqf_class,
         surrogate=self.surrogate,
         search_space_digest=self.search_space_digest,
         objective_weights=self.objective_weights,
         objective_thresholds=self.objective_thresholds,
         pending_observations=self.pending_observations,
         outcome_constraints=self.outcome_constraints,
         linear_constraints=self.linear_constraints,
         fixed_features=self.fixed_features,
         options=self.options,
     )
     self.inequality_constraints = [(torch.tensor([0, 1]),
                                     torch.tensor([-1.0, 1.0]), 1)]
     self.rounding_func = lambda x: x
     self.optimizer_options = {
         Keys.NUM_RESTARTS: 40,
         Keys.RAW_SAMPLES: 1024
     }
Пример #6
0
 def test_construct_inputs_analytic_base(self):
     c = get_acqf_input_constructor(PosteriorMean)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["posterior_transform"])
     mock_obj = mock.Mock()
     kwargs = c(model=mock_model,
                training_data=self.bd_td,
                posterior_transform=mock_obj)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertEqual(kwargs["posterior_transform"], mock_obj)
Пример #7
0
 def test_construct_inputs_best_f(self):
     c = get_acqf_input_constructor(ExpectedImprovement)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     best_f_expected = self.bd_td.Y.squeeze().max()
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["posterior_transform"])
     self.assertEqual(kwargs["best_f"], best_f_expected)
     self.assertTrue(kwargs["maximize"])
     kwargs = c(model=mock_model, training_data=self.bd_td, best_f=0.1)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["posterior_transform"])
     self.assertEqual(kwargs["best_f"], 0.1)
     self.assertTrue(kwargs["maximize"])
Пример #8
0
    def test_construct_inputs_mes(self):
        func = get_acqf_input_constructor(qMaxValueEntropy)
        kwargs = func(
            model=mock.Mock(),
            training_data=self.bd_td,
            objective=LinearMCObjective(torch.rand(2)),
            bounds=self.bounds,
            candidate_size=17,
            maximize=False,
        )

        self.assertFalse(kwargs["maximize"])
        self.assertGreaterEqual(kwargs["candidate_set"].min(), 0.0)
        self.assertLessEqual(kwargs["candidate_set"].max(), 1.0)
        self.assertEqual([int(s) for s in kwargs["candidate_set"].shape],
                         [17, len(self.bounds)])
Пример #9
0
 def test_construct_inputs_ucb(self):
     c = get_acqf_input_constructor(UpperConfidenceBound)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["posterior_transform"])
     self.assertEqual(kwargs["beta"], 0.2)
     self.assertTrue(kwargs["maximize"])
     kwargs = c(model=mock_model,
                training_data=self.bd_td,
                beta=0.1,
                maximize=False)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["posterior_transform"])
     self.assertEqual(kwargs["beta"], 0.1)
     self.assertFalse(kwargs["maximize"])
Пример #10
0
 def test_construct_inputs_noisy_ei(self):
     c = get_acqf_input_constructor(NoisyExpectedImprovement)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertTrue(torch.equal(kwargs["X_observed"], self.bd_td.X))
     self.assertEqual(kwargs["num_fantasies"], 20)
     self.assertTrue(kwargs["maximize"])
     kwargs = c(model=mock_model,
                training_data=self.bd_td,
                num_fantasies=10,
                maximize=False)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertTrue(torch.equal(kwargs["X_observed"], self.bd_td.X))
     self.assertEqual(kwargs["num_fantasies"], 10)
     self.assertFalse(kwargs["maximize"])
     with self.assertRaisesRegex(NotImplementedError, "only block designs"):
         c(model=mock_model, training_data=self.nbd_td)
Пример #11
0
 def test_construct_inputs_mc_base(self):
     c = get_acqf_input_constructor(qSimpleRegret)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["objective"])
     self.assertIsNone(kwargs["X_pending"])
     self.assertIsNone(kwargs["sampler"])
     X_pending = torch.rand(2, 2)
     objective = LinearMCObjective(torch.rand(2))
     kwargs = c(
         model=mock_model,
         training_data=self.bd_td,
         objective=objective,
         X_pending=X_pending,
     )
     self.assertEqual(kwargs["model"], mock_model)
     self.assertTrue(
         torch.equal(kwargs["objective"].weights, objective.weights))
     self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
     self.assertIsNone(kwargs["sampler"])
Пример #12
0
    def test_construct_inputs_kg(self):
        current_value = torch.tensor(1.23)
        with mock.patch(
                target=
                "botorch.acquisition.input_constructors.optimize_objective",
                return_value=(None, current_value),
        ):
            from botorch.acquisition import input_constructors

            func = input_constructors.get_acqf_input_constructor(
                qKnowledgeGradient)
            kwargs = func(
                model=mock.Mock(),
                training_data=self.bd_td,
                objective=LinearMCObjective(torch.rand(2)),
                bounds=self.bounds,
                num_fantasies=33,
            )

            self.assertEqual(kwargs["num_fantasies"], 33)
            self.assertEqual(kwargs["current_value"], current_value)
Пример #13
0
    def test_construct_inputs_mfmes(self):
        constructor_args = {
            "model": None,
            "training_data": self.bd_td,
            "objective": None,
            "bounds": self.bounds,
            "num_fantasies": 123,
            "candidate_size": 17,
            "target_fidelities": {
                0: 0.987
            },
            "fidelity_weights": {
                0: 0.654
            },
            "cost_intercept": 0.321,
        }
        current_value = torch.tensor(1.23)
        with mock.patch(
                target=
                "botorch.acquisition.input_constructors.construct_inputs_mf_base",
                return_value={"foo": 0},
        ), mock.patch(
                target=
                "botorch.acquisition.input_constructors.construct_inputs_qMES",
                return_value={"bar": 1},
        ), mock.patch(
                target=
                "botorch.acquisition.input_constructors.optimize_objective",
                return_value=(None, current_value),
        ):
            from botorch.acquisition import input_constructors

            input_constructor = input_constructors.get_acqf_input_constructor(
                qMultiFidelityMaxValueEntropy)
            inputs_mfmes = input_constructor(**constructor_args)
            inputs_test = {"foo": 0, "bar": 1, "current_value": current_value}
            self.assertEqual(inputs_mfmes, inputs_test)
Пример #14
0
 def test_construct_inputs_qUCB(self):
     c = get_acqf_input_constructor(qUpperConfidenceBound)
     mock_model = mock.Mock()
     kwargs = c(model=mock_model, training_data=self.bd_td)
     self.assertEqual(kwargs["model"], mock_model)
     self.assertIsNone(kwargs["objective"])
     self.assertIsNone(kwargs["X_pending"])
     self.assertIsNone(kwargs["sampler"])
     self.assertEqual(kwargs["beta"], 0.2)
     X_pending = torch.rand(2, 2)
     objective = LinearMCObjective(torch.rand(2))
     kwargs = c(
         model=mock_model,
         training_data=self.bd_td,
         objective=objective,
         X_pending=X_pending,
         beta=0.1,
     )
     self.assertEqual(kwargs["model"], mock_model)
     self.assertTrue(
         torch.equal(kwargs["objective"].weights, objective.weights))
     self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
     self.assertIsNone(kwargs["sampler"])
     self.assertEqual(kwargs["beta"], 0.1)
Пример #15
0
    def test_MOO(self, _):
        # Add mock for qNEHVI input constructor to catch arguments passed to it.
        qNEHVI_input_constructor = get_acqf_input_constructor(
            qNoisyExpectedHypervolumeImprovement)
        mock_input_constructor = mock.MagicMock(
            qNEHVI_input_constructor, side_effect=qNEHVI_input_constructor)
        _register_acqf_input_constructor(
            acqf_cls=qNoisyExpectedHypervolumeImprovement,
            input_constructor=mock_input_constructor,
        )

        model = BoTorchModel()
        model.fit(
            Xs=self.moo_training_data.Xs,
            Ys=self.moo_training_data.Ys,
            Yvars=self.moo_training_data.Yvars,
            search_space_digest=self.search_space_digest,
            metric_names=self.moo_metric_names,
            candidate_metadata=self.candidate_metadata,
        )
        self.assertIsInstance(model.surrogate.model, FixedNoiseGP)
        _, _, gen_metadata, _ = model.gen(
            n=1,
            bounds=self.search_space_digest.bounds,
            objective_weights=self.moo_objective_weights,
            objective_thresholds=self.moo_objective_thresholds,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            pending_observations=self.pending_observations,
            model_gen_options=self.model_gen_options,
            rounding_func=self.rounding_func,
            target_fidelities=self.mf_search_space_digest.target_fidelities,
        )
        ckwargs = mock_input_constructor.call_args[1]
        self.assertIs(model.botorch_acqf_class,
                      qNoisyExpectedHypervolumeImprovement)
        mock_input_constructor.assert_called_once()
        m = ckwargs["model"]
        self.assertIsInstance(m, FixedNoiseGP)
        self.assertEqual(m.num_outputs, 2)
        training_data = ckwargs["training_data"]
        for attr in ("Xs", "Ys", "Yvars"):
            self.assertTrue(
                all(
                    torch.equal(x1, x2) for x1, x2 in zip(
                        getattr(training_data, attr),
                        getattr(self.moo_training_data, attr),
                    )))
        self.assertTrue(
            torch.equal(ckwargs["objective_thresholds"],
                        self.moo_objective_thresholds[:2]))
        self.assertIsNone(ckwargs["outcome_constraints"], )
        self.assertIsNone(ckwargs["X_pending"], )
        obj_t = gen_metadata["objective_thresholds"]
        self.assertTrue(
            torch.equal(obj_t[:2], self.moo_objective_thresholds[:2]))
        self.assertTrue(np.isnan(obj_t[2].item()))

        self.assertIsInstance(
            ckwargs.get("objective"),
            WeightedMCMultiOutputObjective,
        )
        self.assertTrue(
            torch.equal(
                mock_input_constructor.call_args[1].get("objective").weights,
                self.moo_objective_weights[:2],
            ))
        expected_X_baseline = _filter_X_observed(
            Xs=self.moo_training_data.Xs,
            objective_weights=self.moo_objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.search_space_digest.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        self.assertTrue(
            torch.equal(
                mock_input_constructor.call_args[1].get("X_baseline"),
                expected_X_baseline,
            ))
        # test inferred objective_thresholds
        with ExitStack() as es:
            _mock_model_infer_objective_thresholds = es.enter_context(
                mock.patch(
                    "ax.models.torch.botorch_modular.acquisition."
                    "infer_objective_thresholds",
                    return_value=torch.tensor([9.9, 3.3,
                                               float("nan")]),
                ))

            objective_weights = torch.tensor([-1.0, -1.0, 0.0])
            outcome_constraints = (
                torch.tensor([[1.0, 0.0, 0.0]]),
                torch.tensor([[10.0]]),
            )
            linear_constraints = (
                torch.tensor([[1.0, 0.0, 0.0]]),
                torch.tensor([[2.0]]),
            )
            _, _, gen_metadata, _ = model.gen(
                n=1,
                bounds=self.search_space_digest.bounds,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                linear_constraints=linear_constraints,
                fixed_features=self.fixed_features,
                pending_observations=self.pending_observations,
                model_gen_options=self.model_gen_options,
                rounding_func=self.rounding_func,
                target_fidelities=self.mf_search_space_digest.
                target_fidelities,
            )
            expected_X_baseline = _filter_X_observed(
                Xs=self.moo_training_data.Xs,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                bounds=self.search_space_digest.bounds,
                linear_constraints=linear_constraints,
                fixed_features=self.fixed_features,
            )
            ckwargs = _mock_model_infer_objective_thresholds.call_args[1]
            self.assertTrue(
                torch.equal(
                    ckwargs["objective_weights"],
                    objective_weights,
                ))
            oc = ckwargs["outcome_constraints"]
            self.assertTrue(torch.equal(oc[0], outcome_constraints[0]))
            self.assertTrue(torch.equal(oc[1], outcome_constraints[1]))
            m = ckwargs["model"]
            self.assertIsInstance(m, FixedNoiseGP)
            self.assertEqual(m.num_outputs, 2)
            self.assertIn("objective_thresholds", gen_metadata)
            obj_t = gen_metadata["objective_thresholds"]
            self.assertTrue(torch.equal(obj_t[:2], torch.tensor([9.9, 3.3])))
            self.assertTrue(np.isnan(obj_t[2].item()))

        # Avoid polluting the registry for other tests; re-register correct input
        # contructor for qNEHVI.
        _register_acqf_input_constructor(
            acqf_cls=qNoisyExpectedHypervolumeImprovement,
            input_constructor=qNEHVI_input_constructor,
        )
Пример #16
0
 def __init__(
     self,
     surrogate: Surrogate,
     search_space_digest: SearchSpaceDigest,
     objective_weights: Tensor,
     botorch_acqf_class: Type[AcquisitionFunction],
     options: Optional[Dict[str, Any]] = None,
     pending_observations: Optional[List[Tensor]] = None,
     outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     fixed_features: Optional[Dict[int, float]] = None,
     objective_thresholds: Optional[Tensor] = None,
 ) -> None:
     self.surrogate = surrogate
     self.options = options or {}
     X_pending, X_observed = _get_X_pending_and_observed(
         Xs=self.surrogate.training_data.Xs,
         objective_weights=objective_weights,
         bounds=search_space_digest.bounds,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
     )
     # store objective thresholds for all outcomes (including non-objectives)
     self._objective_thresholds = objective_thresholds
     full_objective_weights = objective_weights
     full_outcome_constraints = outcome_constraints
     # Subset model only to the outcomes we need for the optimization.
     if self.options.get(Keys.SUBSET_MODEL, True):
         subset_model_results = subset_model(
             model=self.surrogate.model,
             objective_weights=objective_weights,
             outcome_constraints=outcome_constraints,
             objective_thresholds=objective_thresholds,
         )
         model = subset_model_results.model
         objective_weights = subset_model_results.objective_weights
         outcome_constraints = subset_model_results.outcome_constraints
         objective_thresholds = subset_model_results.objective_thresholds
         subset_idcs = subset_model_results.indices
     else:
         model = self.surrogate.model
         subset_idcs = None
     # If objective weights suggest multiple objectives but objective
     # thresholds are not specified, infer them using the model that
     # has already been subset to avoid re-subsetting it within
     # `inter_objective_thresholds`.
     if (objective_weights.nonzero().numel() > 1  # pyre-ignore [16]
             and self._objective_thresholds is None):
         self._objective_thresholds = infer_objective_thresholds(
             model=model,
             objective_weights=full_objective_weights,
             outcome_constraints=full_outcome_constraints,
             X_observed=X_observed,
             subset_idcs=subset_idcs,
         )
         objective_thresholds = (
             not_none(self._objective_thresholds)[subset_idcs]
             if subset_idcs is not None else self._objective_thresholds)
     objective = self.get_botorch_objective(
         botorch_acqf_class=botorch_acqf_class,
         model=model,
         objective_weights=objective_weights,
         objective_thresholds=objective_thresholds,
         outcome_constraints=outcome_constraints,
         X_observed=X_observed,
     )
     model_deps = self.compute_model_dependencies(
         surrogate=surrogate,
         search_space_digest=search_space_digest,
         objective_weights=objective_weights,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
         options=self.options,
     )
     input_constructor_kwargs = {
         "X_baseline": X_observed,
         "X_pending": X_pending,
         "objective_thresholds": objective_thresholds,
         "outcome_constraints": outcome_constraints,
         **model_deps,
         **self.options,
     }
     input_constructor = get_acqf_input_constructor(botorch_acqf_class)
     acqf_inputs = input_constructor(
         model=model,
         training_data=self.surrogate.training_data,
         objective=objective,
         **input_constructor_kwargs,
     )
     self.acqf = botorch_acqf_class(**acqf_inputs)  # pyre-ignore [45]
Пример #17
0
    def test_construct_inputs_qEHVI(self):
        c = get_acqf_input_constructor(qExpectedHypervolumeImprovement)
        objective_thresholds = torch.rand(2)

        # Test defaults
        mean = torch.rand(1, 2)
        variance = torch.ones(1, 2)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        self.assertIsInstance(kwargs["objective"],
                              IdentityMCMultiOutputObjective)
        ref_point_expected = objective_thresholds
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(torch.equal(partitioning.ref_point,
                                    ref_point_expected))
        self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([128]))
        self.assertIsNone(kwargs["X_pending"])
        self.assertIsNone(kwargs["constraints"])
        self.assertEqual(kwargs["eta"], 1e-3)

        # Test outcome constraints and custom inputs
        mean = torch.tensor([[1.0, 0.25], [0.5, 1.0]])
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        weights = torch.rand(2)
        obj = WeightedMCMultiOutputObjective(weights=weights)
        outcome_constraints = (torch.tensor([[0.0,
                                              1.0]]), torch.tensor([[0.5]]))
        X_pending = torch.rand(1, 2)
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=obj,
            outcome_constraints=outcome_constraints,
            X_pending=X_pending,
            alpha=0.05,
            eta=1e-2,
            qmc=False,
            mc_samples=64,
        )
        self.assertIsInstance(kwargs["objective"],
                              WeightedMCMultiOutputObjective)
        ref_point_expected = objective_thresholds * weights
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, 0.05)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -ref_point_expected))
        Y_expected = mean[:1] * weights
        self.assertTrue(torch.equal(partitioning._neg_Y, -Y_expected))
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([64]))
        self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
        cons_tfs = kwargs["constraints"]
        self.assertEqual(len(cons_tfs), 1)
        cons_eval = cons_tfs[0](mean)
        cons_eval_expected = torch.tensor([-0.25, 0.5])
        self.assertTrue(torch.equal(cons_eval, cons_eval_expected))
        self.assertEqual(kwargs["eta"], 1e-2)

        # Test custom sampler
        custom_sampler = SobolQMCNormalSampler(num_samples=16, seed=1234)
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            sampler=custom_sampler,
        )
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([16]))
        self.assertEqual(sampler.seed, 1234)
Пример #18
0
    def test_construct_inputs_EHVI(self):
        c = get_acqf_input_constructor(ExpectedHypervolumeImprovement)
        mock_model = mock.Mock()
        objective_thresholds = torch.rand(6)

        # test error on unsupported outcome constraints
        with self.assertRaises(NotImplementedError):
            c(
                model=mock_model,
                training_data=self.bd_td,
                objective_thresholds=objective_thresholds,
                outcome_constraints=mock.Mock(),
            )

        # test with Y_pmean supplied explicitly
        Y_pmean = torch.rand(3, 6)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        alpha_expected = get_default_partitioning_alpha(6)
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, alpha_expected)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -objective_thresholds))

        Y_pmean = torch.rand(3, 2)
        objective_thresholds = torch.rand(2)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))

        # test with custom objective
        weights = torch.rand(2)
        obj = WeightedMCMultiOutputObjective(weights=weights)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=obj,
            Y_pmean=Y_pmean,
            alpha=0.05,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              WeightedMCMultiOutputObjective)
        ref_point_expected = objective_thresholds * weights
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, 0.05)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -ref_point_expected))

        # Test without providing Y_pmean (computed from model)
        mean = torch.rand(1, 2)
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))
        self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
Пример #19
0
    def test_construct_inputs_qNEHVI(self):
        c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement)
        objective_thresholds = torch.rand(2)
        mock_model = mock.Mock()

        # Test defaults
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        ref_point_expected = objective_thresholds
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X))
        self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler)
        self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128]))
        self.assertIsInstance(kwargs["objective"],
                              IdentityMCMultiOutputObjective)
        self.assertIsNone(kwargs["constraints"])
        self.assertIsNone(kwargs["X_pending"])
        self.assertEqual(kwargs["eta"], 1e-3)
        self.assertTrue(kwargs["prune_baseline"])
        self.assertEqual(kwargs["alpha"], 0.0)
        self.assertTrue(kwargs["cache_pending"])
        self.assertEqual(kwargs["max_iep"], 0)
        self.assertTrue(kwargs["incremental_nehvi"])

        # Test custom inputs
        weights = torch.rand(2)
        objective = WeightedMCMultiOutputObjective(weights=weights)
        X_baseline = torch.rand(2, 2)
        sampler = IIDNormalSampler(num_samples=4)
        outcome_constraints = (torch.tensor([[0.0,
                                              1.0]]), torch.tensor([[0.5]]))
        X_pending = torch.rand(1, 2)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=objective,
            X_baseline=X_baseline,
            sampler=sampler,
            outcome_constraints=outcome_constraints,
            X_pending=X_pending,
            eta=1e-2,
            prune_baseline=True,
            alpha=0.1,
            cache_pending=False,
            max_iep=1,
            incremental_nehvi=False,
        )
        ref_point_expected = objective(objective_thresholds)
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
        sampler_ = kwargs["sampler"]
        self.assertIsInstance(sampler_, IIDNormalSampler)
        self.assertEqual(sampler_.sample_shape, torch.Size([4]))
        self.assertEqual(kwargs["objective"], objective)
        cons_tfs_expected = get_outcome_constraint_transforms(
            outcome_constraints)
        cons_tfs = kwargs["constraints"]
        self.assertEqual(len(cons_tfs), 1)
        test_Y = torch.rand(1, 2)
        self.assertTrue(
            torch.equal(cons_tfs[0](test_Y), cons_tfs_expected[0](test_Y)))
        self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
        self.assertEqual(kwargs["eta"], 1e-2)
        self.assertTrue(kwargs["prune_baseline"])
        self.assertEqual(kwargs["alpha"], 0.1)
        self.assertFalse(kwargs["cache_pending"])
        self.assertEqual(kwargs["max_iep"], 1)
        self.assertFalse(kwargs["incremental_nehvi"])