Exemple #1
0
    def infer_objective_thresholds(
        self,
        search_space: Optional[SearchSpace] = None,
        optimization_config: Optional[OptimizationConfig] = None,
        fixed_features: Optional[ObservationFeatures] = None,
    ) -> List[ObjectiveThreshold]:
        """Infer objective thresholds.

        This method is only applicable for Multi-Objective optimization problems.

        This method uses the model-estimated Pareto frontier over the in-sample points
        to infer absolute (not relativized) objective thresholds.

        This uses a heuristic that sets the objective threshold to be a scaled nadir
        point, where the nadir point is scaled back based on the range of each
        objective across the current in-sample Pareto frontier.
        """

        assert (
            self.is_moo_problem
        ), "Objective thresholds are only supported for multi-objective optimization."

        search_space = (search_space or self._model_space).clone()
        base_gen_args = self._get_transformed_gen_args(
            search_space=search_space,
            optimization_config=optimization_config,
            fixed_features=fixed_features,
        )
        # Get transformed args from ArrayModelbridge.
        array_model_gen_args = self._get_transformed_model_gen_args(
            search_space=base_gen_args.search_space,
            fixed_features=base_gen_args.fixed_features,
            pending_observations={},
            optimization_config=base_gen_args.optimization_config,
        )
        # Get transformed args from TorchModelbridge.
        obj_w, oc_c, l_c, pend_obs, _ = validate_and_apply_final_transform(
            objective_weights=array_model_gen_args.objective_weights,
            outcome_constraints=array_model_gen_args.outcome_constraints,
            pending_observations=None,
            linear_constraints=array_model_gen_args.linear_constraints,
            final_transform=self._array_to_tensor,
        )
        # Infer objective thresholds.
        model = checked_cast(MultiObjectiveBotorchModel, self.model)
        obj_thresholds_arr = infer_objective_thresholds(
            model=not_none(model.model),
            objective_weights=obj_w,
            bounds=array_model_gen_args.search_space_digest.bounds,
            outcome_constraints=oc_c,
            linear_constraints=l_c,
            fixed_features=array_model_gen_args.fixed_features,
            Xs=model.Xs,
        )
        return self._untransform_objective_thresholds(
            objective_thresholds=obj_thresholds_arr,
            objective_weights=obj_w,
            bounds=array_model_gen_args.search_space_digest.bounds,
            fixed_features=array_model_gen_args.fixed_features,
        )
Exemple #2
0
 def __init__(
     self,
     surrogate: Surrogate,
     search_space_digest: SearchSpaceDigest,
     objective_weights: Tensor,
     botorch_acqf_class: Type[AcquisitionFunction],
     options: Optional[Dict[str, Any]] = None,
     pending_observations: Optional[List[Tensor]] = None,
     outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     fixed_features: Optional[Dict[int, float]] = None,
     objective_thresholds: Optional[Tensor] = None,
 ) -> None:
     self.surrogate = surrogate
     self.options = options or {}
     X_pending, X_observed = _get_X_pending_and_observed(
         Xs=self.surrogate.training_data.Xs,
         objective_weights=objective_weights,
         bounds=search_space_digest.bounds,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
     )
     # store objective thresholds for all outcomes (including non-objectives)
     self._objective_thresholds = objective_thresholds
     full_objective_weights = objective_weights
     full_outcome_constraints = outcome_constraints
     # Subset model only to the outcomes we need for the optimization.
     if self.options.get(Keys.SUBSET_MODEL, True):
         subset_model_results = subset_model(
             model=self.surrogate.model,
             objective_weights=objective_weights,
             outcome_constraints=outcome_constraints,
             objective_thresholds=objective_thresholds,
         )
         model = subset_model_results.model
         objective_weights = subset_model_results.objective_weights
         outcome_constraints = subset_model_results.outcome_constraints
         objective_thresholds = subset_model_results.objective_thresholds
         subset_idcs = subset_model_results.indices
     else:
         model = self.surrogate.model
         subset_idcs = None
     # If objective weights suggest multiple objectives but objective
     # thresholds are not specified, infer them using the model that
     # has already been subset to avoid re-subsetting it within
     # `inter_objective_thresholds`.
     if (objective_weights.nonzero().numel() > 1  # pyre-ignore [16]
             and self._objective_thresholds is None):
         self._objective_thresholds = infer_objective_thresholds(
             model=model,
             objective_weights=full_objective_weights,
             outcome_constraints=full_outcome_constraints,
             X_observed=X_observed,
             subset_idcs=subset_idcs,
         )
         objective_thresholds = (
             not_none(self._objective_thresholds)[subset_idcs]
             if subset_idcs is not None else self._objective_thresholds)
     objective = self.get_botorch_objective(
         botorch_acqf_class=botorch_acqf_class,
         model=model,
         objective_weights=objective_weights,
         objective_thresholds=objective_thresholds,
         outcome_constraints=outcome_constraints,
         X_observed=X_observed,
     )
     model_deps = self.compute_model_dependencies(
         surrogate=surrogate,
         search_space_digest=search_space_digest,
         objective_weights=objective_weights,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
         options=self.options,
     )
     input_constructor_kwargs = {
         "X_baseline": X_observed,
         "X_pending": X_pending,
         "objective_thresholds": objective_thresholds,
         "outcome_constraints": outcome_constraints,
         **model_deps,
         **self.options,
     }
     input_constructor = get_acqf_input_constructor(botorch_acqf_class)
     acqf_inputs = input_constructor(
         model=model,
         training_data=self.surrogate.training_data,
         objective=objective,
         **input_constructor_kwargs,
     )
     self.acqf = botorch_acqf_class(**acqf_inputs)  # pyre-ignore [45]
Exemple #3
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,  # objective_directions
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        objective_thresholds: Optional[Tensor] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")
        if (objective_thresholds is not None and
                objective_weights.shape[0] != objective_thresholds.shape[0]):
            raise AxError(
                "Objective weights and thresholds most both contain an element for"
                " each modeled metric.")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = not_none(self.model)
        full_objective_thresholds = objective_thresholds
        full_objective_weights = objective_weights
        full_outcome_constraints = outcome_constraints
        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            full_objective_weights
            subset_model_results = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                objective_thresholds=objective_thresholds,
            )
            model = subset_model_results.model
            objective_weights = subset_model_results.objective_weights
            outcome_constraints = subset_model_results.outcome_constraints
            objective_thresholds = subset_model_results.objective_thresholds
            idcs = subset_model_results.indices
        else:
            idcs = None
        if objective_thresholds is None:
            full_objective_thresholds = infer_objective_thresholds(
                model=model,
                X_observed=not_none(X_observed),
                objective_weights=full_objective_weights,
                outcome_constraints=full_outcome_constraints,
                subset_idcs=idcs,
            )
            # subset the objective thresholds
            objective_thresholds = (full_objective_thresholds
                                    if idcs is None else
                                    full_objective_thresholds[idcs].clone())

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        botorch_rounding_func = get_rounding_func(rounding_func)
        if acf_options.get("random_scalarization", False) or acf_options.get(
                "chebyshev_scalarization", False):
            # If using a list of acquisition functions, the algorithm to generate
            # that list is configured by acquisition_function_kwargs.
            objective_weights_list = [
                randomize_objective_weights(objective_weights, **acf_options)
                for _ in range(n)
            ]
            acquisition_function_list = [
                self.acqf_constructor(  # pyre-ignore: [28]
                    model=model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=X_observed,
                    X_pending=X_pending,
                    **acf_options,
                ) for objective_weights in objective_weights_list
            ]
            acquisition_function_list = [
                checked_cast(AcquisitionFunction, acq_function)
                for acq_function in acquisition_function_list
            ]
            # Multiple acquisition functions require a sequential optimizer
            # always use scipy_optimizer_list.
            # TODO(jej): Allow any optimizer.
            candidates, expected_acquisition_value = scipy_optimizer_list(
                acq_function_list=acquisition_function_list,
                bounds=bounds_,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        else:
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                objective_thresholds=objective_thresholds,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                **acf_options,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        gen_metadata = {
            "expected_acquisition_value": expected_acquisition_value.tolist(),
            "objective_thresholds": not_none(full_objective_thresholds).cpu(),
        }
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            gen_metadata,
            None,
        )
Exemple #4
0
    def test_infer_objective_thresholds(self, cuda=False):
        for dtype in (torch.float, torch.double):

            tkwargs = {
                "device":
                torch.device("cuda") if cuda else torch.device("cpu"),
                "dtype": dtype,
            }
            Xs = [torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]], **tkwargs)]
            bounds = [(0.0, 1.0), (1.0, 4.0), (2.0, 5.0)]
            outcome_constraints = (
                torch.tensor([[1.0, 0.0, 0.0]], **tkwargs),
                torch.tensor([[10.0]], **tkwargs),
            )
            linear_constraints = (
                torch.tensor([1.0, 0.0, 0.0], **tkwargs),
                torch.tensor([2.0], **tkwargs),
            )
            objective_weights = torch.tensor([-1.0, -1.0, 0.0], **tkwargs)
            with ExitStack() as es:
                _mock_get_X_pending_and_observed = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults."
                        "_get_X_pending_and_observed",
                        wraps=_get_X_pending_and_observed,
                    ))
                _mock_infer_reference_point = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults.infer_reference_point",
                        wraps=infer_reference_point,
                    ))
                # after subsetting, the model will only have two outputs
                _mock_num_outputs = es.enter_context(
                    mock.patch(
                        "botorch.utils.testing.MockModel.num_outputs",
                        new_callable=mock.PropertyMock,
                    ))
                _mock_num_outputs.return_value = 3
                # after subsetting, the model will only have two outputs
                model = MockModel(
                    MockPosterior(mean=torch.tensor(
                        [
                            [11.0, 2.0],
                            [9.0, 3.0],
                        ],
                        **tkwargs,
                    )))
                es.enter_context(
                    mock.patch.object(
                        model,
                        "subset_output",
                        return_value=model,
                    ))
                # test passing Xs
                obj_thresholds = infer_objective_thresholds(
                    model,
                    bounds=bounds,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    fixed_features={},
                    linear_constraints=linear_constraints,
                    Xs=Xs + Xs,
                )
                _mock_get_X_pending_and_observed.assert_called_once()
                ckwargs = _mock_get_X_pending_and_observed.call_args[1]
                actual_Xs = ckwargs["Xs"]
                for X in actual_Xs:
                    self.assertTrue(torch.equal(X, Xs[0]))
                self.assertEqual(ckwargs["bounds"], bounds)
                self.assertTrue(
                    torch.equal(ckwargs["objective_weights"],
                                objective_weights))
                oc = ckwargs["outcome_constraints"]
                self.assertTrue(torch.equal(oc[0], outcome_constraints[0]))
                self.assertTrue(torch.equal(oc[1], outcome_constraints[1]))
                self.assertEqual(ckwargs["fixed_features"], {})
                lc = ckwargs["linear_constraints"]
                self.assertTrue(torch.equal(lc[0], linear_constraints[0]))
                self.assertTrue(torch.equal(lc[1], linear_constraints[1]))
                _mock_infer_reference_point.assert_called_once()
                ckwargs = _mock_infer_reference_point.call_args[1]
                self.assertEqual(ckwargs["scale"], 0.1)
                self.assertTrue(
                    torch.equal(ckwargs["pareto_Y"],
                                torch.tensor([[-9.0, -3.0]], **tkwargs)))
                self.assertTrue(
                    torch.equal(obj_thresholds[:2],
                                torch.tensor([9.9, 3.3], **tkwargs)))
                self.assertTrue(np.isnan(obj_thresholds[2].item()))
            with ExitStack() as es:
                _mock_get_X_pending_and_observed = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults."
                        "_get_X_pending_and_observed",
                        wraps=_get_X_pending_and_observed,
                    ))
                _mock_infer_reference_point = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults.infer_reference_point",
                        wraps=infer_reference_point,
                    ))
                model = MockModel(
                    MockPosterior(mean=torch.tensor(
                        # after subsetting, there should only be two outcomes
                        [
                            [11.0, 2.0],
                            [9.0, 3.0],
                        ],
                        **tkwargs,
                    )))

                # test passing X_observed
                obj_thresholds = infer_objective_thresholds(
                    model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=Xs[0],
                )
                _mock_get_X_pending_and_observed.assert_not_called()
                self.assertTrue(
                    torch.equal(obj_thresholds[:2],
                                torch.tensor([9.9, 3.3], **tkwargs)))
                self.assertTrue(np.isnan(obj_thresholds[2].item()))
            # test that value error is raised if bounds are not supplied
            with self.assertRaises(ValueError):
                infer_objective_thresholds(
                    model,
                    objective_weights=objective_weights,
                    Xs=Xs + Xs,
                )
            # test that value error is raised if Xs are not supplied
            with self.assertRaises(ValueError):
                infer_objective_thresholds(
                    model,
                    bounds=bounds,
                    objective_weights=objective_weights,
                )
            # test subset_model without subset_idcs
            subset_model = MockModel(
                MockPosterior(mean=torch.tensor(
                    [
                        [11.0, 2.0],
                        [9.0, 3.0],
                    ],
                    **tkwargs,
                )))
            obj_thresholds = infer_objective_thresholds(
                subset_model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=Xs[0],
            )
            self.assertTrue(
                torch.equal(obj_thresholds[:2],
                            torch.tensor([9.9, 3.3], **tkwargs)))
            self.assertTrue(np.isnan(obj_thresholds[2].item()))
            # test passing subset_idcs
            subset_idcs = torch.tensor([0, 1],
                                       dtype=torch.long,
                                       device=tkwargs["device"])
            obj_thresholds = infer_objective_thresholds(
                subset_model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=Xs[0],
                subset_idcs=subset_idcs,
            )
            self.assertTrue(
                torch.equal(obj_thresholds[:2],
                            torch.tensor([9.9, 3.3], **tkwargs)))
            self.assertTrue(np.isnan(obj_thresholds[2].item()))
            # test without subsetting (e.g. if there are
            # 3 metrics for 2 objectives + 1 outcome constraint)
            outcome_constraints = (
                torch.tensor([[0.0, 0.0, 1.0]], **tkwargs),
                torch.tensor([[5.0]], **tkwargs),
            )
            with ExitStack() as es:
                _mock_get_X_pending_and_observed = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults."
                        "_get_X_pending_and_observed",
                        wraps=_get_X_pending_and_observed,
                    ))
                _mock_infer_reference_point = es.enter_context(
                    mock.patch(
                        "ax.models.torch.botorch_moo_defaults.infer_reference_point",
                        wraps=infer_reference_point,
                    ))
                model = MockModel(
                    MockPosterior(mean=torch.tensor(
                        [
                            [11.0, 2.0, 6.0],
                            [9.0, 3.0, 4.0],
                        ],
                        **tkwargs,
                    )))
                # test passing Xs
                obj_thresholds = infer_objective_thresholds(
                    model,
                    bounds=bounds,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    fixed_features={},
                    linear_constraints=linear_constraints,
                    Xs=Xs + Xs + Xs,
                )
                self.assertTrue(
                    torch.equal(obj_thresholds[:2],
                                torch.tensor([9.9, 3.3], **tkwargs)))
                self.assertTrue(np.isnan(obj_thresholds[2].item()))
Exemple #5
0
    def infer_objective_thresholds(
        self,
        search_space: Optional[SearchSpace] = None,
        optimization_config: Optional[OptimizationConfig] = None,
        fixed_features: Optional[ObservationFeatures] = None,
    ) -> List[ObjectiveThreshold]:
        """Infer objective thresholds.

        This method uses the model-estimated Pareto frontier over the in-sample points
        to infer absolute (not relativized) objective thresholds.

        This uses a heuristic that sets the objective threshold to be a scaled nadir
        point, where the nadir point is scaled back based on the range of each
        objective across the current in-sample Pareto frontier.
        """
        if search_space is None:
            search_space = self._model_space
        search_space = search_space.clone()
        base_gen_args = self._get_transformed_gen_args(
            search_space=search_space,
            optimization_config=optimization_config,
            fixed_features=fixed_features,
        )
        # get transformed args from ArrayModelbridge
        array_model_gen_args = self._get_transformed_model_gen_args(
            search_space=base_gen_args.search_space,
            fixed_features=base_gen_args.fixed_features,
            pending_observations={},
            optimization_config=base_gen_args.optimization_config,
        )
        # get transformed args from TorchModelbridge
        obj_w, oc_c, l_c, pend_obs, _ = validate_and_apply_final_transform(
            objective_weights=array_model_gen_args.objective_weights,
            outcome_constraints=array_model_gen_args.outcome_constraints,
            pending_observations=None,
            linear_constraints=array_model_gen_args.linear_constraints,
            final_transform=self._array_to_tensor,
        )
        # infer objective thresholds
        model = not_none(self.model)
        try:
            torch_model = model.model  # pyre-ignore [16]
            Xs = model.Xs  # pyre-ignore [16]
        except AttributeError:
            raise AxError(
                "infer_objective_thresholds requires a TorchModel with model "
                "and Xs attributes.")
        obj_thresholds_arr = infer_objective_thresholds(
            model=torch_model,
            objective_weights=obj_w,
            bounds=array_model_gen_args.search_space_digest.bounds,
            outcome_constraints=oc_c,
            linear_constraints=l_c,
            fixed_features=array_model_gen_args.fixed_features,
            Xs=Xs,
        )
        return self.untransform_objective_thresholds(
            objective_thresholds=obj_thresholds_arr,
            objective_weights=obj_w,
            bounds=array_model_gen_args.search_space_digest.bounds,
            fixed_features=array_model_gen_args.fixed_features,
        )