Example #1
0
def get_weighted_mc_objective_and_objective_thresholds(
    objective_weights: Tensor, objective_thresholds: Tensor
) -> Tuple[WeightedMCMultiOutputObjective, Tensor]:
    r"""Construct weighted objective and apply the weights to objective thresholds.

    Args:
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        objective_thresholds: A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.

    Returns:
        A two-element tuple with the objective and objective thresholds:

            - The objective
            - The objective thresholds

    """
    # pyre-ignore [16]
    nonzero_idcs = objective_weights.nonzero(as_tuple=False).view(-1)
    objective_weights = objective_weights[nonzero_idcs]
    objective_thresholds = objective_thresholds[nonzero_idcs]
    objective = WeightedMCMultiOutputObjective(weights=objective_weights,
                                               outcomes=nonzero_idcs.tolist())
    objective_thresholds = torch.mul(objective_thresholds, objective_weights)
    return objective, objective_thresholds
Example #2
0
def get_botorch_objective(
    model: Model,
    objective_weights: Tensor,
    use_scalarized_objective: bool = True,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    objective_thresholds: Optional[Tensor] = None,
    X_observed: Optional[Tensor] = None,
) -> AcquisitionObjective:
    """Constructs a BoTorch `AcquisitionObjective` object.

    Args:
        model: A BoTorch Model
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        use_scalarized_objective: A boolean parameter that defaults to True,
            specifying whether ScalarizedObjective should be used.
            NOTE: when using outcome_constraints, use_scalarized_objective
            will be ignored.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        objective_thresholds: A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        X_observed: Observed points that are feasible and appear in the
            objective or the constraints. None if there are no such points.

    Returns:
        A BoTorch `AcquisitionObjective` object. It will be one of:
        `ScalarizedObjective`, `LinearMCOObjective`, `ConstrainedMCObjective`.
    """
    if objective_thresholds is not None:
        nonzero_idcs = torch.nonzero(objective_weights).view(-1)
        objective_weights = objective_weights[nonzero_idcs]
        return WeightedMCMultiOutputObjective(weights=objective_weights,
                                              outcomes=nonzero_idcs.tolist())
    if X_observed is None:
        raise UnsupportedError(
            "X_observed is required to construct a BoTorch Objective.")
    if outcome_constraints:
        if use_scalarized_objective:
            logger.warning(
                "Currently cannot use ScalarizedObjective when there are outcome "
                "constraints. Ignoring (default) kwarg `use_scalarized_objective`"
                "= True. Creating ConstrainedMCObjective.")
        obj_tf = get_objective_weights_transform(objective_weights)

        def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
            return obj_tf(samples)

        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        return ConstrainedMCObjective(objective=objective,
                                      constraints=con_tfs or [],
                                      infeasible_cost=inf_cost)
    elif use_scalarized_objective:
        return ScalarizedObjective(weights=objective_weights)
    return LinearMCObjective(weights=objective_weights)
Example #3
0
 def test_weighted_mc_multi_output_objective(self):
     with self.assertRaises(BotorchTensorDimensionError):
         WeightedMCMultiOutputObjective(weights=torch.rand(3, 1))
     with self.assertRaises(BotorchTensorDimensionError):
         WeightedMCMultiOutputObjective(weights=torch.rand(3),
                                        outcomes=[0, 1],
                                        num_outcomes=3)
     for batch_shape, m, dtype in itertools.product(
         ([], [3]), (2, 3), (torch.float, torch.double)):
         weights = torch.rand(m, device=self.device, dtype=dtype)
         objective = WeightedMCMultiOutputObjective(weights=weights)
         samples = torch.rand(*batch_shape,
                              2,
                              m,
                              device=self.device,
                              dtype=dtype)
         self.assertTrue(torch.equal(objective(samples), samples * weights))
Example #4
0
File: utils.py Project: Balandat/Ax
def get_botorch_objective_and_transform(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    objective_thresholds: Optional[Tensor] = None,
    X_observed: Optional[Tensor] = None,
) -> Tuple[Optional[MCAcquisitionObjective], Optional[PosteriorTransform]]:
    """Constructs a BoTorch `AcquisitionObjective` object.

    Args:
        model: A BoTorch Model
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        objective_thresholds: A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        X_observed: Observed points that are feasible and appear in the
            objective or the constraints. None if there are no such points.

    Returns:
        A two-tuple containing (optioally) an `MCAcquisitionObjective` and
        (optionally) a `PosteriorTransform`.
    """
    if objective_thresholds is not None:
        # we are doing multi-objective optimization
        nonzero_idcs = torch.nonzero(objective_weights).view(-1)
        objective_weights = objective_weights[nonzero_idcs]
        objective = WeightedMCMultiOutputObjective(
            weights=objective_weights, outcomes=nonzero_idcs.tolist())
        return objective, None
    if X_observed is None:
        raise UnsupportedError(
            "X_observed is required to construct a BoTorch objective.")
    if outcome_constraints:
        # If there are outcome constraints, we use MC Acquistion functions
        obj_tf = get_objective_weights_transform(objective_weights)

        def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
            return obj_tf(samples)

        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=objective,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
        return objective, None
    # Case of linear weights - use ScalarizedPosteriorTransform
    transform = ScalarizedPosteriorTransform(weights=objective_weights)
    return None, transform
Example #5
0
    def test_init(
        self,
        mock_botorch_acqf_class,
        mock_get_objective,
        mock_subset_model,
        mock_get_X,
        mock_get_constraints,
    ):
        botorch_objective = WeightedMCMultiOutputObjective(
            weights=self.objective_weights[:2], outcomes=[0, 1])
        mock_get_objective.return_value = botorch_objective
        mock_get_constraints.return_value = self.con_tfs
        mock_get_X.return_value = (self.pending_observations[0], self.X[:1])
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        # Check `_get_X_pending_and_observed` kwargs
        mock_get_X.assert_called_with(
            Xs=[
                self.training_data.X, self.training_data.X,
                self.training_data.X
            ],
            pending_observations=self.pending_observations,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        # Call `subset_model` only when needed
        mock_subset_model.assert_called_with(
            acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            objective_thresholds=self.objective_thresholds,
        )
        mock_subset_model.reset_mock()
        mock_botorch_acqf_class.reset_mock()
        self.options[Keys.SUBSET_MODEL] = False
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_subset_model.assert_not_called()
        # Check final `acqf` creation
        mock_botorch_acqf_class.assert_called_once()
        _, ckwargs = mock_botorch_acqf_class.call_args
        self.assertIs(ckwargs["model"], self.acquisition.surrogate.model)
        self.assertIs(ckwargs["objective"], botorch_objective)
        self.assertTrue(
            torch.equal(ckwargs["X_pending"], self.pending_observations[0]))
        self.assertEqual(
            ckwargs["ref_point"],
            (self.objective_thresholds[:2] *
             self.objective_weights[:2]).tolist(),
        )
        self.assertIsInstance(ckwargs["partitioning"], BoxDecomposition)
        self.assertIs(ckwargs["constraints"], self.con_tfs)
        self.assertIsInstance(ckwargs["sampler"], SobolQMCNormalSampler)

        # qNoisyExpectedImprovement not supported.
        with self.assertRaisesRegex(
                UnsupportedError,
                "Only qExpectedHypervolumeImprovement is currently supported",
        ):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=self.objective_thresholds,
                botorch_acqf_class=qNoisyExpectedImprovement,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )

        with self.assertRaisesRegex(ValueError,
                                    "Objective Thresholds required"):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=None,
                botorch_acqf_class=self.botorch_acqf_class,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )
    def test_construct_inputs_qNEHVI(self):
        c = get_acqf_input_constructor(qNoisyExpectedHypervolumeImprovement)
        objective_thresholds = torch.rand(2)
        mock_model = mock.Mock()

        # Test defaults
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        ref_point_expected = objective_thresholds
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        self.assertTrue(torch.equal(kwargs["X_baseline"], self.bd_td.X))
        self.assertIsInstance(kwargs["sampler"], SobolQMCNormalSampler)
        self.assertEqual(kwargs["sampler"].sample_shape, torch.Size([128]))
        self.assertIsInstance(kwargs["objective"],
                              IdentityMCMultiOutputObjective)
        self.assertIsNone(kwargs["constraints"])
        self.assertIsNone(kwargs["X_pending"])
        self.assertEqual(kwargs["eta"], 1e-3)
        self.assertTrue(kwargs["prune_baseline"])
        self.assertEqual(kwargs["alpha"], 0.0)
        self.assertTrue(kwargs["cache_pending"])
        self.assertEqual(kwargs["max_iep"], 0)
        self.assertTrue(kwargs["incremental_nehvi"])

        # Test custom inputs
        weights = torch.rand(2)
        objective = WeightedMCMultiOutputObjective(weights=weights)
        X_baseline = torch.rand(2, 2)
        sampler = IIDNormalSampler(num_samples=4)
        outcome_constraints = (torch.tensor([[0.0,
                                              1.0]]), torch.tensor([[0.5]]))
        X_pending = torch.rand(1, 2)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=objective,
            X_baseline=X_baseline,
            sampler=sampler,
            outcome_constraints=outcome_constraints,
            X_pending=X_pending,
            eta=1e-2,
            prune_baseline=True,
            alpha=0.1,
            cache_pending=False,
            max_iep=1,
            incremental_nehvi=False,
        )
        ref_point_expected = objective(objective_thresholds)
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        self.assertTrue(torch.equal(kwargs["X_baseline"], X_baseline))
        sampler_ = kwargs["sampler"]
        self.assertIsInstance(sampler_, IIDNormalSampler)
        self.assertEqual(sampler_.sample_shape, torch.Size([4]))
        self.assertEqual(kwargs["objective"], objective)
        cons_tfs_expected = get_outcome_constraint_transforms(
            outcome_constraints)
        cons_tfs = kwargs["constraints"]
        self.assertEqual(len(cons_tfs), 1)
        test_Y = torch.rand(1, 2)
        self.assertTrue(
            torch.equal(cons_tfs[0](test_Y), cons_tfs_expected[0](test_Y)))
        self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
        self.assertEqual(kwargs["eta"], 1e-2)
        self.assertTrue(kwargs["prune_baseline"])
        self.assertEqual(kwargs["alpha"], 0.1)
        self.assertFalse(kwargs["cache_pending"])
        self.assertEqual(kwargs["max_iep"], 1)
        self.assertFalse(kwargs["incremental_nehvi"])
    def test_construct_inputs_qEHVI(self):
        c = get_acqf_input_constructor(qExpectedHypervolumeImprovement)
        objective_thresholds = torch.rand(2)

        # Test defaults
        mean = torch.rand(1, 2)
        variance = torch.ones(1, 2)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        self.assertIsInstance(kwargs["objective"],
                              IdentityMCMultiOutputObjective)
        ref_point_expected = objective_thresholds
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(torch.equal(partitioning.ref_point,
                                    ref_point_expected))
        self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([128]))
        self.assertIsNone(kwargs["X_pending"])
        self.assertIsNone(kwargs["constraints"])
        self.assertEqual(kwargs["eta"], 1e-3)

        # Test outcome constraints and custom inputs
        mean = torch.tensor([[1.0, 0.25], [0.5, 1.0]])
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        weights = torch.rand(2)
        obj = WeightedMCMultiOutputObjective(weights=weights)
        outcome_constraints = (torch.tensor([[0.0,
                                              1.0]]), torch.tensor([[0.5]]))
        X_pending = torch.rand(1, 2)
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=obj,
            outcome_constraints=outcome_constraints,
            X_pending=X_pending,
            alpha=0.05,
            eta=1e-2,
            qmc=False,
            mc_samples=64,
        )
        self.assertIsInstance(kwargs["objective"],
                              WeightedMCMultiOutputObjective)
        ref_point_expected = objective_thresholds * weights
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, 0.05)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -ref_point_expected))
        Y_expected = mean[:1] * weights
        self.assertTrue(torch.equal(partitioning._neg_Y, -Y_expected))
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, IIDNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([64]))
        self.assertTrue(torch.equal(kwargs["X_pending"], X_pending))
        cons_tfs = kwargs["constraints"]
        self.assertEqual(len(cons_tfs), 1)
        cons_eval = cons_tfs[0](mean)
        cons_eval_expected = torch.tensor([-0.25, 0.5])
        self.assertTrue(torch.equal(cons_eval, cons_eval_expected))
        self.assertEqual(kwargs["eta"], 1e-2)

        # Test custom sampler
        custom_sampler = SobolQMCNormalSampler(num_samples=16, seed=1234)
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            sampler=custom_sampler,
        )
        sampler = kwargs["sampler"]
        self.assertIsInstance(sampler, SobolQMCNormalSampler)
        self.assertEqual(sampler.sample_shape, torch.Size([16]))
        self.assertEqual(sampler.seed, 1234)
    def test_construct_inputs_EHVI(self):
        c = get_acqf_input_constructor(ExpectedHypervolumeImprovement)
        mock_model = mock.Mock()
        objective_thresholds = torch.rand(6)

        # test error on unsupported outcome constraints
        with self.assertRaises(NotImplementedError):
            c(
                model=mock_model,
                training_data=self.bd_td,
                objective_thresholds=objective_thresholds,
                outcome_constraints=mock.Mock(),
            )

        # test with Y_pmean supplied explicitly
        Y_pmean = torch.rand(3, 6)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        alpha_expected = get_default_partitioning_alpha(6)
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, alpha_expected)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -objective_thresholds))

        Y_pmean = torch.rand(3, 2)
        objective_thresholds = torch.rand(2)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            Y_pmean=Y_pmean,
        )
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))

        # test with custom objective
        weights = torch.rand(2)
        obj = WeightedMCMultiOutputObjective(weights=weights)
        kwargs = c(
            model=mock_model,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
            objective=obj,
            Y_pmean=Y_pmean,
            alpha=0.05,
        )
        self.assertEqual(kwargs["model"], mock_model)
        self.assertIsInstance(kwargs["objective"],
                              WeightedMCMultiOutputObjective)
        ref_point_expected = objective_thresholds * weights
        self.assertTrue(torch.equal(kwargs["ref_point"], ref_point_expected))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, NondominatedPartitioning)
        self.assertEqual(partitioning.alpha, 0.05)
        self.assertTrue(
            torch.equal(partitioning._neg_ref_point, -ref_point_expected))

        # Test without providing Y_pmean (computed from model)
        mean = torch.rand(1, 2)
        variance = torch.ones(1, 1)
        mm = MockModel(MockPosterior(mean=mean, variance=variance))
        kwargs = c(
            model=mm,
            training_data=self.bd_td,
            objective_thresholds=objective_thresholds,
        )
        self.assertIsInstance(kwargs["objective"],
                              IdentityAnalyticMultiOutputObjective)
        self.assertTrue(torch.equal(kwargs["ref_point"], objective_thresholds))
        partitioning = kwargs["partitioning"]
        self.assertIsInstance(partitioning, FastNondominatedPartitioning)
        self.assertTrue(
            torch.equal(partitioning.ref_point, objective_thresholds))
        self.assertTrue(torch.equal(partitioning._neg_Y, -mean))
Example #9
0
    def test_init(
        self,
        mock_botorch_acqf_class,
        mock_get_objective,
        mock_subset_model,
        mock_get_X,
    ):
        botorch_objective = WeightedMCMultiOutputObjective(
            weights=self.objective_weights, outcomes=[0, 1])
        mock_get_objective.return_value = botorch_objective
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        # Check `_get_X_pending_and_observed` kwargs
        mock_get_X.assert_called_with(
            Xs=[self.training_data.X, self.training_data.X],
            pending_observations=self.pending_observations,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
            bounds=self.bounds,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
        )
        # Call `subset_model` only when needed
        mock_subset_model.assert_called_with(
            acquisition.surrogate.model,
            objective_weights=self.objective_weights,
            outcome_constraints=self.outcome_constraints,
        )
        mock_subset_model.reset_mock()
        self.options[Keys.SUBSET_MODEL] = False
        acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )
        mock_subset_model.assert_not_called()
        # Check final `acqf` creation
        mock_botorch_acqf_class.assert_called_with(
            model=self.acquisition.surrogate.model,
            objective=botorch_objective,
            X_pending=torch.tensor([2.0]),
            ref_point=self.objective_thresholds.tolist(),
            partitioning=mock.ANY,
            constraints=mock.ANY,
            sampler=mock.ANY,
        )

        # qNoisyExpectedImprovement not supported.
        with self.assertRaisesRegex(
                UnsupportedError,
                "Only qExpectedHypervolumeImprovement is currently supported",
        ):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=self.objective_thresholds,
                botorch_acqf_class=qNoisyExpectedImprovement,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )

        with self.assertRaisesRegex(ValueError,
                                    "Objective Thresholds required"):
            MOOAcquisition(
                surrogate=self.surrogate,
                bounds=self.bounds,
                objective_weights=self.objective_weights,
                objective_thresholds=None,
                botorch_acqf_class=self.botorch_acqf_class,
                pending_observations=self.pending_observations,
                outcome_constraints=self.outcome_constraints,
                linear_constraints=self.linear_constraints,
                fixed_features=self.fixed_features,
                target_fidelities=self.target_fidelities,
                options=self.options,
            )