Exemplo n.º 1
0
    def __init__(
        self,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        objective_thresholds: Optional[Tensor],
        botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None,
        options: Optional[Dict[str, Any]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> None:
        botorch_acqf_class = not_none(botorch_acqf_class
                                      or self.default_botorch_acqf_class)
        if not issubclass(botorch_acqf_class, qExpectedHypervolumeImprovement):
            raise UnsupportedError(
                "Only qExpectedHypervolumeImprovement is currently supported as "
                f"a MOOAcquisition botorch_acqf_class. Got: {botorch_acqf_class}."
            )

        # Calculate `Y` and inject into options.
        # NOTE: Ideally we would do this in `compute_model_dependencies` and not need a
        # separate `__init__` for `MOOAcquisition`, but the obstacle is currently that
        # in that case `Y` would not be `subset` along with the model. This should be
        # revisited in the future.
        trd = self._extract_training_data(surrogate=surrogate)
        Ys = [trd.Y] if isinstance(
            trd, TrainingData) else [i.Y for i in trd.values()]
        options = options or {}

        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            _, _, _, Ys = subset_model(
                model=surrogate.model,
                objective_weights=objective_weights,
                Ys=Ys,
            )

        # pyre-ignore [6]: pyre wrong that `Ys` is not optional
        Y = torch.stack(Ys).transpose(0, 1).squeeze()
        options["Y"] = Y

        super().__init__(
            surrogate=surrogate,
            botorch_acqf_class=botorch_acqf_class,
            bounds=bounds,
            objective_weights=objective_weights,
            objective_thresholds=objective_thresholds,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            pending_observations=pending_observations,
            target_fidelities=target_fidelities,
            options=options,
        )
Exemplo n.º 2
0
 def testSubsetModel(self):
     x = torch.zeros(1, 1)
     y = torch.zeros(1, 2)
     model = SingleTaskGP(x, y)
     self.assertEqual(model.num_outputs, 2)
     # basic test, can subset
     obj_weights = torch.tensor([1.0, 0.0])
     model_sub, obj_weights_sub, ocs_sub = subset_model(model, obj_weights)
     self.assertIsNone(ocs_sub)
     self.assertEqual(model_sub.num_outputs, 1)
     self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
     # basic test, cannot subset
     obj_weights = torch.tensor([1.0, 2.0])
     model_sub, obj_weights_sub, ocs_sub = subset_model(model, obj_weights)
     self.assertIsNone(ocs_sub)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     # test w/ outcome constraints, can subset
     obj_weights = torch.tensor([1.0, 0.0])
     ocs = (torch.tensor([[1.0, 0.0]]), torch.tensor([1.0]))
     model_sub, obj_weights_sub, ocs_sub = subset_model(
         model, obj_weights, ocs)
     self.assertEqual(model_sub.num_outputs, 1)
     self.assertTrue(torch.equal(obj_weights_sub, torch.tensor([1.0])))
     self.assertTrue(torch.equal(ocs_sub[0], torch.tensor([[1.0]])))
     self.assertTrue(torch.equal(ocs_sub[1], torch.tensor([1.0])))
     # test w/ outcome constraints, cannot subset
     obj_weights = torch.tensor([1.0, 0.0])
     ocs = (torch.tensor([[0.0, 1.0]]), torch.tensor([1.0]))
     model_sub, obj_weights_sub, ocs_sub = subset_model(
         model, obj_weights, ocs)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     self.assertIs(ocs_sub, ocs)  # check identity
     # test unsupported
     yvar = torch.ones(1, 2)
     model = HeteroskedasticSingleTaskGP(x, y, yvar)
     model_sub, obj_weights_sub, ocs = subset_model(model, obj_weights)
     self.assertIsNone(ocs)
     self.assertIs(model_sub, model)  # check identity
     self.assertIs(obj_weights_sub, obj_weights)  # check identity
     # test error on size inconsistency
     obj_weights = torch.ones(3)
     with self.assertRaises(RuntimeError):
         subset_model(model, obj_weights)
Exemplo n.º 3
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        r"""Generate new candidates.

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            3-element tuple containing

            - (n x d) tensor of generated points.
            - n-tensor of weights for each point.
            - Dictionary of model-specific metadata for the given
                generation candidates.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        # subset model only to the outcomes we need for the optimization
        model = not_none(self.model)
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        objective = get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )

        inequality_constraints = _to_inequality_constraints(linear_constraints)
        # TODO: update optimizers to handle inequality_constraints
        if inequality_constraints is not None:
            raise UnsupportedError(
                "Inequality constraints are not yet supported for KnowledgeGradient!"
            )

        # extract a few options
        n_fantasies = acf_options.get("num_fantasies", 64)
        qmc = acf_options.get("qmc", True)
        seed_inner = acf_options.get("seed_inner", None)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        # get current value
        current_value = self._get_current_value(
            model=model,
            bounds=bounds,
            X_observed=not_none(X_observed),
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            seed_inner=seed_inner,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
            qmc=qmc,
        )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        # get acquisition function
        acq_function = _instantiate_KG(
            model=model,
            objective=objective,
            qmc=qmc,
            n_fantasies=n_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            mc_samples=acf_options.get("mc_samples", 256),
            seed_inner=seed_inner,
            seed_outer=acf_options.get("seed_outer", None),
            X_pending=X_pending,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            current_value=current_value,
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        new_x = _optimize_and_get_candidates(
            acq_function=acq_function,
            bounds_=bounds_,
            n=n,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            optimizer_options=optimizer_options,
            rounding_func=rounding_func,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
        )

        return new_x, torch.ones(n, dtype=self.dtype), {}, None
Exemplo n.º 4
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,  # objective_directions
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        objective_thresholds: Optional[Tensor] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, Ys = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                Ys=self.Ys,
            )
        else:
            Ys = self.Ys

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        botorch_rounding_func = get_rounding_func(rounding_func)
        if acf_options.get("random_scalarization", False) or acf_options.get(
                "chebyshev_scalarization", False):
            # If using a list of acquisition functions, the algorithm to generate
            # that list is configured by acquisition_function_kwargs.
            objective_weights_list = [
                randomize_objective_weights(objective_weights, **acf_options)
                for _ in range(n)
            ]
            acquisition_function_list = [
                self.acqf_constructor(  # pyre-ignore: [28]
                    model=model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=X_observed,
                    X_pending=X_pending,
                    Ys=Ys,  # Required for chebyshev scalarization calculations.
                    **acf_options,
                ) for objective_weights in objective_weights_list
            ]
            acquisition_function_list = [
                checked_cast(AcquisitionFunction, acq_function)
                for acq_function in acquisition_function_list
            ]
            # Multiple acquisition functions require a sequential optimizer
            # always use scipy_optimizer_list.
            # TODO(jej): Allow any optimizer.
            candidates, expected_acquisition_value = scipy_optimizer_list(
                acq_function_list=acquisition_function_list,
                bounds=bounds_,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        else:
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                objective_thresholds=objective_thresholds,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                Ys=self.Ys,  # Required for qEHVI calculations.
                **acf_options,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {
                "expected_acquisition_value":
                expected_acquisition_value.tolist()
            },
            None,
        )
Exemplo n.º 5
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get(Keys.ACQF_KWARGS, {})
        optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization	357
        if options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        botorch_rounding_func = get_rounding_func(rounding_func)

        # The following logic is to work around the limitation of PyTorch's Sobol
        # sampler to <1111 dimensions.
        # TODO: Remove once https://github.com/pytorch/pytorch/issues/41489 is resolved.

        from botorch.exceptions.errors import UnsupportedError

        def make_and_optimize_acqf(
                override_qmc: bool = False) -> Tuple[Tensor, Tensor]:
            add_kwargs = {"qmc": False} if override_qmc else {}
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                **acf_options,
                **add_kwargs,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
            return candidates, expected_acquisition_value

        try:
            candidates, expected_acquisition_value = make_and_optimize_acqf()
        except UnsupportedError as e:
            if "SobolQMCSampler only supports dimensions q * o <= 1111" in str(
                    e):
                # dimension too large for Sobol, let's use IID
                candidates, expected_acquisition_value = make_and_optimize_acqf(
                    override_qmc=True)
            else:
                raise e

        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {
                "expected_acquisition_value":
                expected_acquisition_value.tolist()
            },
            None,
        )
Exemplo n.º 6
0
    def __init__(
        self,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None,
        options: Optional[Dict[str, Any]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> None:
        if not botorch_acqf_class and not self.default_botorch_acqf_class:
            raise ValueError(
                f"Acquisition class {self.__class__} does not specify a default "
                "BoTorch `AcquisitionFunction`, so `botorch_acqf_class` "
                "argument must be specified.")
        self._botorch_acqf_class = not_none(botorch_acqf_class
                                            or self.default_botorch_acqf_class)
        self.surrogate = surrogate
        self.options = options or {}
        trd = self._extract_training_data(surrogate=surrogate)
        Xs = (
            # Assumes 1-D objective_weights, which should be safe.
            [trd.X for o in range(objective_weights.shape[0])] if isinstance(
                trd, TrainingData) else [i.X for i in trd.values()])
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        # Subset model only to the outcomes we need for the optimization.
        if self.options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                self.surrogate.model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )
        else:
            model = self.surrogate.model

        objective = self._get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )
        model_deps = self.compute_model_dependencies(
            surrogate=surrogate,
            bounds=bounds,
            objective_weights=objective_weights,
            pending_observations=pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=self.options,
        )
        X_baseline = X_observed
        overriden_X_baseline = model_deps.get(Keys.X_BASELINE)
        if overriden_X_baseline is not None:
            X_baseline = overriden_X_baseline
            model_deps.pop(Keys.X_BASELINE)
        self.acqf = self._botorch_acqf_class(  # pyre-ignore[28]: Some kwargs are
            # not expected in base `AcquisitionFunction` but are expected in
            # its subclasses.
            model=model,
            objective=objective,
            X_pending=X_pending,
            X_baseline=X_baseline,
            **self.options,
            **model_deps,
        )
Exemplo n.º 7
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get(Keys.ACQF_KWARGS, {})
        optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel"
            )
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization	357
        if options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        botorch_rounding_func = get_rounding_func(rounding_func)
        acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            X_pending=X_pending,
            **acf_options,
        )
        acquisition_function = checked_cast(AcquisitionFunction, acquisition_function)
        # pyre-ignore: [28]
        candidates, expected_acquisition_value = self.acqf_optimizer(
            acq_function=checked_cast(AcquisitionFunction, acquisition_function),
            bounds=bounds_,
            n=n,
            inequality_constraints=_to_inequality_constraints(
                linear_constraints=linear_constraints
            ),
            fixed_features=fixed_features,
            rounding_func=botorch_rounding_func,
            **optimizer_options,
        )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {"expected_acquisition_value": expected_acquisition_value.tolist()},
            None,
        )
Exemplo n.º 8
0
def infer_objective_thresholds(
    model: Model,
    objective_weights: Tensor,  # objective_directions
    bounds: Optional[List[Tuple[float, float]]] = None,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    fixed_features: Optional[Dict[int, float]] = None,
    subset_idcs: Optional[Tensor] = None,
    Xs: Optional[List[Tensor]] = None,
    X_observed: Optional[Tensor] = None,
) -> Tensor:
    """Infer objective thresholds.

    This method uses the model-estimated Pareto frontier over the in-sample points
    to infer absolute (not relativized) objective thresholds.

    This uses a heuristic that sets the objective threshold to be a scaled nadir
    point, where the nadir point is scaled back based on the range of each
    objective across the current in-sample Pareto frontier.

    See `botorch.utils.multi_objective.hypervolume.infer_reference_point` for
    details on the heuristic.

    Args:
        model: A fitted botorch Model.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights. These should not
            be subsetted.
        bounds: A list of (lower, upper) tuples for each column of X.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. These should not be subsetted.
        linear_constraints: A tuple of (A, b). For k linear constraints on
            d-dimensional x, A is (k x d) and b is (k x 1) such that
            A x <= b.
        fixed_features: A map {feature_index: value} for features that
            should be fixed to a particular value during generation.
        subset_idcs: The indices of the outcomes that are modeled by the
            provided model. If subset_idcs not None, this method infers
            whether the model is subsetted.
        Xs: A list of m (k_i x d) feature tensors X. Number of rows k_i can
            vary from i=1,...,m.
        X_observed: A `n x d`-dim tensor of in-sample points to use for
            determining the current in-sample Pareto frontier.

    Returns:
        A `m`-dim tensor of objective thresholds, where the objective
            threshold is `nan` if the outcome is not an objective.
    """
    if X_observed is None:
        if bounds is None:
            raise ValueError("bounds is required if X_observed is None.")
        elif Xs is None:
            raise ValueError("Xs is required if X_observed is None.")
        _, X_observed = _get_X_pending_and_observed(
            Xs=Xs,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )
    num_outcomes = objective_weights.shape[0]
    if subset_idcs is None:
        # check if only a subset of outcomes are modeled
        nonzero = objective_weights != 0
        if outcome_constraints is not None:
            A, _ = outcome_constraints
            nonzero = nonzero | torch.any(A != 0, dim=0)
        expected_subset_idcs = nonzero.nonzero().view(-1)
        if model.num_outputs > expected_subset_idcs.numel():
            # subset the model so that we only compute the posterior
            # over the relevant outcomes
            subset_model_results = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )
            model = subset_model_results.model
            objective_weights = subset_model_results.objective_weights
            outcome_constraints = subset_model_results.outcome_constraints
            subset_idcs = subset_model_results.indices
        else:
            # model is already subsetted.
            subset_idcs = expected_subset_idcs
            # subset objective weights and outcome constraints
            objective_weights = objective_weights[subset_idcs]
            if outcome_constraints is not None:
                outcome_constraints = (
                    outcome_constraints[0][:, subset_idcs],
                    outcome_constraints[1],
                )
    else:
        objective_weights = objective_weights[subset_idcs]
        if outcome_constraints is not None:
            outcome_constraints = (
                outcome_constraints[0][:, subset_idcs],
                outcome_constraints[1],
            )
    with torch.no_grad():
        pred = not_none(model).posterior(not_none(X_observed)).mean
    if outcome_constraints is not None:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
        # pyre-ignore [16]
        feas = torch.stack([c(pred) <= 0 for c in cons_tfs],
                           dim=-1).all(dim=-1)
        pred = pred[feas]
    if pred.shape[0] == 0:
        raise AxError("There are no feasible observed points.")
    obj_mask = objective_weights.nonzero().view(-1)
    obj_weights_subset = objective_weights[obj_mask]
    obj = pred[..., obj_mask] * obj_weights_subset
    pareto_obj = obj[is_non_dominated(obj)]
    objective_thresholds = infer_reference_point(
        pareto_Y=pareto_obj,
        scale=0.1,
    )
    # multiply by objective weights to return objective thresholds in the
    # unweighted space
    objective_thresholds = objective_thresholds * obj_weights_subset
    full_objective_thresholds = torch.full(
        (num_outcomes, ),
        float("nan"),
        dtype=objective_weights.dtype,
        device=objective_weights.device,
    )
    obj_idcs = subset_idcs[obj_mask]
    full_objective_thresholds[obj_idcs] = objective_thresholds.clone()
    return full_objective_thresholds
Exemplo n.º 9
0
 def __init__(
     self,
     surrogate: Surrogate,
     search_space_digest: SearchSpaceDigest,
     objective_weights: Tensor,
     botorch_acqf_class: Type[AcquisitionFunction],
     options: Optional[Dict[str, Any]] = None,
     pending_observations: Optional[List[Tensor]] = None,
     outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
     fixed_features: Optional[Dict[int, float]] = None,
     objective_thresholds: Optional[Tensor] = None,
 ) -> None:
     self.surrogate = surrogate
     self.options = options or {}
     X_pending, X_observed = _get_X_pending_and_observed(
         Xs=self.surrogate.training_data.Xs,
         objective_weights=objective_weights,
         bounds=search_space_digest.bounds,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
     )
     # store objective thresholds for all outcomes (including non-objectives)
     self._objective_thresholds = objective_thresholds
     full_objective_weights = objective_weights
     full_outcome_constraints = outcome_constraints
     # Subset model only to the outcomes we need for the optimization.
     if self.options.get(Keys.SUBSET_MODEL, True):
         subset_model_results = subset_model(
             model=self.surrogate.model,
             objective_weights=objective_weights,
             outcome_constraints=outcome_constraints,
             objective_thresholds=objective_thresholds,
         )
         model = subset_model_results.model
         objective_weights = subset_model_results.objective_weights
         outcome_constraints = subset_model_results.outcome_constraints
         objective_thresholds = subset_model_results.objective_thresholds
         subset_idcs = subset_model_results.indices
     else:
         model = self.surrogate.model
         subset_idcs = None
     # If objective weights suggest multiple objectives but objective
     # thresholds are not specified, infer them using the model that
     # has already been subset to avoid re-subsetting it within
     # `inter_objective_thresholds`.
     if (objective_weights.nonzero().numel() > 1  # pyre-ignore [16]
             and self._objective_thresholds is None):
         self._objective_thresholds = infer_objective_thresholds(
             model=model,
             objective_weights=full_objective_weights,
             outcome_constraints=full_outcome_constraints,
             X_observed=X_observed,
             subset_idcs=subset_idcs,
         )
         objective_thresholds = (
             not_none(self._objective_thresholds)[subset_idcs]
             if subset_idcs is not None else self._objective_thresholds)
     objective = self.get_botorch_objective(
         botorch_acqf_class=botorch_acqf_class,
         model=model,
         objective_weights=objective_weights,
         objective_thresholds=objective_thresholds,
         outcome_constraints=outcome_constraints,
         X_observed=X_observed,
     )
     model_deps = self.compute_model_dependencies(
         surrogate=surrogate,
         search_space_digest=search_space_digest,
         objective_weights=objective_weights,
         pending_observations=pending_observations,
         outcome_constraints=outcome_constraints,
         linear_constraints=linear_constraints,
         fixed_features=fixed_features,
         options=self.options,
     )
     input_constructor_kwargs = {
         "X_baseline": X_observed,
         "X_pending": X_pending,
         "objective_thresholds": objective_thresholds,
         "outcome_constraints": outcome_constraints,
         **model_deps,
         **self.options,
     }
     input_constructor = get_acqf_input_constructor(botorch_acqf_class)
     acqf_inputs = input_constructor(
         model=model,
         training_data=self.surrogate.training_data,
         objective=objective,
         **input_constructor_kwargs,
     )
     self.acqf = botorch_acqf_class(**acqf_inputs)  # pyre-ignore [45]
Exemplo n.º 10
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,  # objective_directions
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        objective_thresholds: Optional[Tensor] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata,
               Optional[List[TCandidateMetadata]]]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")
        if (objective_thresholds is not None and
                objective_weights.shape[0] != objective_thresholds.shape[0]):
            raise AxError(
                "Objective weights and thresholds most both contain an element for"
                " each modeled metric.")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = not_none(self.model)
        full_objective_thresholds = objective_thresholds
        full_objective_weights = objective_weights
        full_outcome_constraints = outcome_constraints
        # subset model only to the outcomes we need for the optimization
        if options.get(Keys.SUBSET_MODEL, True):
            full_objective_weights
            subset_model_results = subset_model(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                objective_thresholds=objective_thresholds,
            )
            model = subset_model_results.model
            objective_weights = subset_model_results.objective_weights
            outcome_constraints = subset_model_results.outcome_constraints
            objective_thresholds = subset_model_results.objective_thresholds
            idcs = subset_model_results.indices
        else:
            idcs = None
        if objective_thresholds is None:
            full_objective_thresholds = infer_objective_thresholds(
                model=model,
                X_observed=not_none(X_observed),
                objective_weights=full_objective_weights,
                outcome_constraints=full_outcome_constraints,
                subset_idcs=idcs,
            )
            # subset the objective thresholds
            objective_thresholds = (full_objective_thresholds
                                    if idcs is None else
                                    full_objective_thresholds[idcs].clone())

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        botorch_rounding_func = get_rounding_func(rounding_func)
        if acf_options.get("random_scalarization", False) or acf_options.get(
                "chebyshev_scalarization", False):
            # If using a list of acquisition functions, the algorithm to generate
            # that list is configured by acquisition_function_kwargs.
            objective_weights_list = [
                randomize_objective_weights(objective_weights, **acf_options)
                for _ in range(n)
            ]
            acquisition_function_list = [
                self.acqf_constructor(  # pyre-ignore: [28]
                    model=model,
                    objective_weights=objective_weights,
                    outcome_constraints=outcome_constraints,
                    X_observed=X_observed,
                    X_pending=X_pending,
                    **acf_options,
                ) for objective_weights in objective_weights_list
            ]
            acquisition_function_list = [
                checked_cast(AcquisitionFunction, acq_function)
                for acq_function in acquisition_function_list
            ]
            # Multiple acquisition functions require a sequential optimizer
            # always use scipy_optimizer_list.
            # TODO(jej): Allow any optimizer.
            candidates, expected_acquisition_value = scipy_optimizer_list(
                acq_function_list=acquisition_function_list,
                bounds=bounds_,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        else:
            acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
                model=model,
                objective_weights=objective_weights,
                objective_thresholds=objective_thresholds,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
                **acf_options,
            )
            acquisition_function = checked_cast(AcquisitionFunction,
                                                acquisition_function)
            # pyre-ignore: [28]
            candidates, expected_acquisition_value = self.acqf_optimizer(
                acq_function=checked_cast(AcquisitionFunction,
                                          acquisition_function),
                bounds=bounds_,
                n=n,
                inequality_constraints=_to_inequality_constraints(
                    linear_constraints=linear_constraints),
                fixed_features=fixed_features,
                rounding_func=botorch_rounding_func,
                **optimizer_options,
            )
        gen_metadata = {
            "expected_acquisition_value": expected_acquisition_value.tolist(),
            "objective_thresholds": not_none(full_objective_thresholds).cpu(),
        }
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            gen_metadata,
            None,
        )
Exemplo n.º 11
0
    def gen(
        self,
        n: int,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata]:
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        if target_fidelities:
            raise NotImplementedError(
                "target_fidelities not implemented for base BotorchModel")

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        if linear_constraints is not None:
            A, b = linear_constraints
            inequality_constraints = []
            k, d = A.shape
            for i in range(k):
                indicies = A[i, :].nonzero().view(-1)
                coefficients = -A[i, indicies]
                rhs = -b[i, 0]
                inequality_constraints.append((indicies, coefficients, rhs))
        else:
            inequality_constraints = None

        acquisition_function = self.acqf_constructor(  # pyre-ignore: [28]
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            X_pending=X_pending,
            **acf_options,
        )

        botorch_rounding_func = get_rounding_func(rounding_func)
        # pyre-ignore: [28]
        candidates, expected_acquisition_value = self.acqf_optimizer(
            acq_function=checked_cast(AcquisitionFunction,
                                      acquisition_function),
            bounds=bounds_,
            n=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            rounding_func=botorch_rounding_func,
            **optimizer_options,
        )
        return (
            candidates.detach().cpu(),
            torch.ones(n, dtype=self.dtype),
            {
                "expected_acquisition_value":
                expected_acquisition_value.tolist()
            },
        )
Exemplo n.º 12
0
    def __init__(
        self,
        surrogate: Surrogate,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None,
        options: Optional[Dict[str, Any]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> None:
        if not botorch_acqf_class and not self.default_botorch_acqf_class:
            raise ValueError(
                f"Acquisition class {self.__class__} does not specify a default "
                "BoTorch `AcquisitionFunction`, so `botorch_acqf_class` "
                "argument must be specified.")
        botorch_acqf_class = not_none(botorch_acqf_class
                                      or self.default_botorch_acqf_class)
        self.surrogate = surrogate
        self.options = options or {}
        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=[self.surrogate.training_data.X],
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        # Subset model only to the outcomes we need for the optimization.
        if self.options.get(Keys.SUBSET_MODEL, True):
            model, objective_weights, outcome_constraints, _ = subset_model(
                self.surrogate.model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )
        else:
            model = self.surrogate.model

        objective = get_botorch_objective(
            model=model,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
            use_scalarized_objective=issubclass(botorch_acqf_class,
                                                AnalyticAcquisitionFunction),
        )
        # NOTE: Computing model dependencies might be handled entirely on
        # BoTorch side.
        model_deps = self.compute_model_dependencies(
            surrogate=surrogate,
            bounds=bounds,
            objective_weights=objective_weights,
            pending_observations=pending_observations,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            options=self.options,
        )
        data_deps = self.compute_data_dependencies(
            training_data=self.surrogate.training_data)
        # pyre-ignore[28]: Some kwargs are not expected in base `Model`
        # but are expected in its subclasses.
        self.acqf = botorch_acqf_class(
            model=model,
            objective=objective,
            X_pending=X_pending,
            X_baseline=X_observed,
            **self.options,
            **model_deps,
            **data_deps,
        )
Exemplo n.º 13
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata]:
        """
        Generate new candidates.

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            3-element tuple containing

            - (n x d) tensor of generated points.
            - n-tensor of weights for each point.
            - Dictionary of model-specific metadata for the given
                generation candidates.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        objective = _get_objective(
            model=model,  # pyre-ignore [6]
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )

        # get the acquisition function
        n_fantasies = acf_options.get("num_fantasies", 64)
        qmc = acf_options.get("qmc", True)
        seed_inner = acf_options.get("seed_inner", None)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        inequality_constraints = _to_inequality_constraints(linear_constraints)
        # TODO: update optimizers to handle inequality_constraints
        if inequality_constraints is not None:
            raise UnsupportedError(
                "Inequality constraints are not yet supported for KnowledgeGradient!"
            )

        # get current value
        best_point_acqf, non_fixed_idcs = self._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,  # pyre-ignore: [6]
            seed_inner=seed_inner,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            qmc=qmc,
        )

        # solution from previous iteration
        recommended_point = self.best_point(
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
        )
        recommended_point = recommended_point.detach().unsqueeze(
            0)  # pyre-ignore: [16]
        # Extract acquisition value (TODO: Make this less painful and repetitive)
        if non_fixed_idcs is not None:
            recommended_point = recommended_point[..., non_fixed_idcs]
        current_value = best_point_acqf(recommended_point).max()

        acq_function = _instantiate_KG(
            model=model,  # pyre-ignore [6]
            objective=objective,
            qmc=qmc,
            n_fantasies=n_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            mc_samples=acf_options.get("mc_samples", 256),
            seed_inner=seed_inner,
            seed_outer=acf_options.get("seed_outer", None),
            X_pending=X_pending,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            current_value=current_value,
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        batch_initial_conditions = gen_one_shot_kg_initial_conditions(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "frac_random": optimizer_options.get("frac_random", 0.1),
                "num_inner_restarts": num_restarts,
                "raw_inner_samples": raw_samples,
            },
        )

        botorch_rounding_func = get_rounding_func(rounding_func)

        candidates, _ = optimize_acqf(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            post_processing_func=botorch_rounding_func,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "batch_limit": optimizer_options.get("batch_limit", 8),
                "maxiter": optimizer_options.get("maxiter", 200),
                "method": "L-BFGS-B",
                "nonnegative": optimizer_options.get("nonnegative", False),
            },
            batch_initial_conditions=batch_initial_conditions,
        )
        new_x = candidates.detach().cpu()
        return new_x, torch.ones(n, dtype=self.dtype), {}
Exemplo n.º 14
0
    def _get_best_point_acqf(
        self,
        X_observed: Tensor,
        objective_weights: Tensor,
        mc_samples: int = 512,
        fixed_features: Optional[Dict[int, float]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        seed_inner: Optional[int] = None,
        qmc: bool = True,
        **kwargs: Any,
    ) -> Tuple[AcquisitionFunction, Optional[List[int]]]:
        model = self.model

        # subset model only to the outcomes we need for the optimization
        if kwargs.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        fixed_features = fixed_features or {}
        target_fidelities = target_fidelities or {}
        objective = _get_objective(
            model=model,  # pyre-ignore [6]
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )
        if isinstance(objective, ScalarizedObjective):
            acq_function = PosteriorMean(
                model=model,
                objective=objective  # pyre-ignore: [6]
            )
        elif isinstance(objective, MCAcquisitionObjective):
            if qmc:
                sampler = SobolQMCNormalSampler(num_samples=mc_samples,
                                                seed=seed_inner)
            else:
                sampler = IIDNormalSampler(num_samples=mc_samples,
                                           seed=seed_inner)
            acq_function = qSimpleRegret(
                model=model,
                sampler=sampler,
                objective=objective  # pyre-ignore [6]
            )
        else:
            raise UnsupportedError(
                f"Unknown objective type: {objective.__class__}"  # pragma: nocover
            )

        if self.fidelity_features:
            # we need to optimize at the target fidelities
            if any(f in self.fidelity_features for f in fixed_features):
                raise RuntimeError(
                    "Fixed features cannot also be fidelity features")
            elif not set(self.fidelity_features) == set(target_fidelities):
                raise RuntimeError(
                    "Must provide a target fidelity for every fidelity feature"
                )
            # make sure to not modify fixed_features in-place
            fixed_features = {**fixed_features, **target_fidelities}
        elif target_fidelities:
            raise RuntimeError(
                "Must specify fidelity_features in fit() when using target fidelities"
            )

        if fixed_features:
            acq_function = FixedFeatureAcquisitionFunction(
                acq_function=acq_function,
                d=X_observed.size(-1),
                columns=list(fixed_features.keys()),
                values=list(fixed_features.values()),
            )
            non_fixed_idcs = [
                i for i in range(self.Xs[0].size(-1))
                if i not in fixed_features
            ]
        else:
            non_fixed_idcs = None

        return acq_function, non_fixed_idcs