コード例 #1
0
ファイル: botorch_defaults.py プロジェクト: AdrianaMusic/Ax
def recommend_best_out_of_sample_point(
    model: TorchModel,
    bounds: List[Tuple[float, float]],
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    fixed_features: Optional[Dict[int, float]] = None,
    model_gen_options: Optional[TConfig] = None,
    target_fidelities: Optional[Dict[int, float]] = None,
) -> Optional[Tensor]:
    """
    Identify the current best point by optimizing the posterior mean of the model.
    This is "out-of-sample" because it considers un-observed designs as well.

    Return None if no such point can be identified.

    Args:
        model: A TorchModel.
        bounds: A list of (lower, upper) tuples for each column of X.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b.
        linear_constraints: A tuple of (A, b). For k linear constraints on
            d-dimensional x, A is (k x d) and b is (k x 1) such that
            A x <= b.
        fixed_features: A map {feature_index: value} for features that
            should be fixed to a particular value in the best point.
        model_gen_options: A config dictionary that can contain
            model-specific options.
        target_fidelities: A map {feature_index: value} of fidelity feature
            column indices to their respective target fidelities. Used for
            multi-fidelity optimization.

    Returns:
        A d-array of the best point, or None if no feasible point exists.
    """
    options = model_gen_options or {}
    fixed_features = fixed_features or {}
    acf_options = options.get("acquisition_function_kwargs", {})
    optimizer_options = options.get("optimizer_kwargs", {})

    X_observed = get_observed(
        Xs=model.Xs,  # pyre-ignore: [16]
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
    )

    if hasattr(model, "_get_best_point_acqf"):
        acq_function, non_fixed_idcs = model._get_best_point_acqf(  # pyre-ignore: [16]
            X_observed=X_observed,
            objective_weights=objective_weights,
            mc_samples=acf_options.get("mc_samples", 512),
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            outcome_constraints=outcome_constraints,
            seed_inner=acf_options.get("seed_inner", None),
            qmc=acf_options.get("qmc", True),
        )
    else:
        raise RuntimeError("The model should implement _get_best_point_acqf.")

    inequality_constraints = _to_inequality_constraints(linear_constraints)
    # TODO: update optimizers to handle inequality_constraints
    # (including transforming constraints b/c of fixed features)
    if inequality_constraints is not None:
        raise UnsupportedError("Inequality constraints are not supported!")

    return_best_only = optimizer_options.get("return_best_only", True)
    bounds_ = torch.tensor(bounds, dtype=model.dtype, device=model.device)
    bounds_ = bounds_.transpose(-1, -2)
    if non_fixed_idcs is not None:
        bounds_ = bounds_[..., non_fixed_idcs]

    candidates, _ = optimize_acqf(
        acq_function=acq_function,
        bounds=bounds_,
        q=1,
        num_restarts=optimizer_options.get("num_restarts", 60),
        raw_samples=optimizer_options.get("raw_samples", 1024),
        inequality_constraints=inequality_constraints,
        fixed_features=None,  # handled inside the acquisition function
        options={
            "batch_limit": optimizer_options.get("batch_limit", 8),
            "maxiter": optimizer_options.get("maxiter", 200),
            "nonnegative": optimizer_options.get("nonnegative", False),
            "method": "L-BFGS-B",
        },
        return_best_only=return_best_only,
    )
    rec_point = candidates.detach().cpu()
    if isinstance(acq_function, FixedFeatureAcquisitionFunction):
        rec_point = acq_function._construct_X_full(rec_point)
    if return_best_only:
        rec_point = rec_point.view(-1)
    return rec_point
コード例 #2
0
ファイル: test_optimize.py プロジェクト: woooodbond/botorch
    def test_optimize_acqf_joint(
        self, mock_gen_candidates, mock_gen_batch_initial_conditions
    ):
        q = 3
        num_restarts = 2
        raw_samples = 10
        options = {}
        mock_acq_function = MockAcquisitionFunction()
        cnt = 1
        for dtype in (torch.float, torch.double):
            mock_gen_batch_initial_conditions.return_value = torch.zeros(
                num_restarts, q, 3, device=self.device, dtype=dtype
            )
            base_cand = torch.ones(1, q, 3, device=self.device, dtype=dtype)
            mock_candidates = torch.cat(
                [i * base_cand for i in range(num_restarts)], dim=0
            )
            mock_acq_values = num_restarts - torch.arange(
                num_restarts, device=self.device, dtype=dtype
            )
            mock_gen_candidates.return_value = (mock_candidates, mock_acq_values)
            bounds = torch.stack(
                [
                    torch.zeros(3, device=self.device, dtype=dtype),
                    4 * torch.ones(3, device=self.device, dtype=dtype),
                ]
            )
            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
            )
            self.assertTrue(torch.equal(candidates, mock_candidates[0]))
            self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))

            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
                return_best_only=False,
                batch_initial_conditions=torch.zeros(
                    num_restarts, q, 3, device=self.device, dtype=dtype
                ),
            )
            self.assertTrue(torch.equal(candidates, mock_candidates))
            self.assertTrue(torch.equal(acq_vals, mock_acq_values))
            self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)
            cnt += 1

        # test OneShotAcquisitionFunction
        mock_acq_function = MockOneShotAcquisitionFunction()
        candidates, acq_vals = optimize_acqf(
            acq_function=mock_acq_function,
            bounds=bounds,
            q=q,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options=options,
        )
        self.assertTrue(
            torch.equal(
                candidates, mock_acq_function.extract_candidates(mock_candidates[0])
            )
        )
        self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
コード例 #3
0
def gen_value_function_initial_conditions(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    num_restarts: int,
    raw_samples: int,
    current_model: Model,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
) -> Tensor:
    r"""Generate a batch of smart initializations for optimizing
    the value function of qKnowledgeGradient.

    This function generates initial conditions for optimizing the inner problem of
    KG, i.e. its value function, using the maximizer of the posterior objective.
    Intutively, the maximizer of the fantasized posterior will often be close to a
    maximizer of the current posterior. This function uses that fact to generate the
    initital conditions for the fantasy points. Specifically, a fraction of `1 -
    frac_random` (see options) of raw samples is generated by sampling from the set of
    maximizers of the posterior objective (obtained via random restart optimization)
    according to a softmax transformation of their respective values. This means that
    this initialization strategy internally solves an acquisition function
    maximization problem. The remaining raw samples are generated using
    `draw_sobol_samples`. All raw samples are then evaluated, and the initial
    conditions are selected according to the standard initialization strategy in
    'initialize_q_batch' individually for each inner problem.

    Args:
        acq_function: The value function instance to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of
            task features.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic.
        current_model: The model of the KG acquisition function that was used to
            generate the fantasy model of the value function.
        options: Options for initial condition generation. These contain all
            settings for the standard heuristic initialization from
            `gen_batch_initial_conditions`. In addition, they contain
            `frac_random` (the fraction of fully random fantasy points),
            `num_inner_restarts` and `raw_inner_samples` (the number of random
            restarts and raw samples for solving the posterior objective
            maximization problem, respectively) and `eta` (temperature parameter
            for sampling heuristic from posterior objective maximizers).

    Returns:
        A `num_restarts x batch_shape x q x d` tensor that can be used as initial
        conditions for `optimize_acqf()`. Here `batch_shape` is the batch shape
        of value function model.

    Example:
        >>> fant_X = torch.rand(5, 1, 2)
        >>> fantasy_model = model.fantasize(fant_X, SobolQMCNormalSampler(16))
        >>> value_function = PosteriorMean(fantasy_model)
        >>> bounds = torch.tensor([[0., 0.], [1., 1.]])
        >>> Xinit = gen_value_function_initial_conditions(
        >>>     value_function, bounds, num_restarts=10, raw_samples=512,
        >>>     options={"frac_random": 0.25},
        >>> )
    """
    options = options or {}
    seed: Optional[int] = options.get("seed")
    frac_random: float = options.get("frac_random", 0.6)
    if not 0 < frac_random < 1:
        raise ValueError(
            f"frac_random must take on values in (0,1). Value: {frac_random}")

    # compute maximizer of the current value function
    value_function = _get_value_function(
        model=current_model,
        objective=acq_function.objective,
        sampler=getattr(acq_function, "sampler", None),
        project=getattr(acq_function, "project", None),
    )
    from botorch.optim.optimize import optimize_acqf

    fantasy_cands, fantasy_vals = optimize_acqf(
        acq_function=value_function,
        bounds=bounds,
        q=1,
        num_restarts=options.get("num_inner_restarts", 20),
        raw_samples=options.get("raw_inner_samples", 1024),
        return_best_only=False,
        options={
            k: v
            for k, v in options.items()
            if k not in ("frac_random", "num_inner_restarts",
                         "raw_inner_samples", "eta")
        },
    )

    batch_shape = acq_function.model.batch_shape
    # sampling from the optimizers
    n_value = int((1 - frac_random) * raw_samples)  # number of non-random ICs
    if n_value > 0:
        eta = options.get("eta", 2.0)
        weights = torch.exp(eta * standardize(fantasy_vals))
        idx = batched_multinomial(
            weights=weights.expand(*batch_shape, -1),
            num_samples=n_value,
            replacement=True,
        ).permute(-1, *range(len(batch_shape)))
        resampled = fantasy_cands[idx]
    else:
        resampled = torch.empty(0,
                                *batch_shape,
                                1,
                                bounds.shape[-1],
                                dtype=bounds.dtype)
    # add qMC samples
    randomized = draw_sobol_samples(bounds=bounds,
                                    n=raw_samples - n_value,
                                    q=1,
                                    batch_shape=batch_shape,
                                    seed=seed)
    # full set of raw samples
    X_rnd = torch.cat([resampled, randomized], dim=0)

    # evaluate the raw samples
    with torch.no_grad():
        Y_rnd = acq_function(X_rnd)

    # select the restart points using the heuristic
    return initialize_q_batch(X=X_rnd,
                              Y=Y_rnd,
                              n=num_restarts,
                              eta=options.get("eta", 2.0))
コード例 #4
0
def alebo_acqf_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]],
    fixed_features: Optional[Dict[int, float]],
    rounding_func: Optional[Callable[[Tensor], Tensor]],
    raw_samples: int,
    num_restarts: int,
    B: Tensor,
) -> Tuple[Tensor, Tensor]:
    """
    Optimize the acquisition function for ALEBO.

    We are optimizing over a polytope within the subspace, and so begin each
    random restart of the acquisition function optimization with points that
    lie within that polytope.
    """
    candidate_list, acq_value_list = [], []
    candidates = torch.tensor([], device=B.device, dtype=B.dtype)
    try:
        base_X_pending = acq_function.X_pending  # pyre-ignore
        acq_has_X_pend = True
    except AttributeError:
        base_X_pending = None
        acq_has_X_pend = False
        assert n == 1
    for i in range(n):
        # Generate initial points for optimization inside embedding
        m_init = ALEBOInitializer(B.cpu().numpy(), nsamp=10 * raw_samples)
        Xrnd_npy, _ = m_init.gen(n=raw_samples, bounds=[(-1.0, 1.0)] * B.shape[1])

        Xrnd = torch.tensor(Xrnd_npy, dtype=B.dtype, device=B.device).unsqueeze(1)
        Yrnd = torch.matmul(Xrnd, B.t())  # Project down to the embedding
        with gpytorch.settings.max_cholesky_size(2000):
            with torch.no_grad():
                alpha = acq_function(Yrnd)

            Yinit = initialize_q_batch_nonneg(X=Yrnd, Y=alpha, n=num_restarts)

            # Optimize the acquisition function, separately for each random restart.
            candidate, acq_value = optimize_acqf(
                acq_function=acq_function,
                bounds=[None, None],  # pyre-ignore
                q=1,
                num_restarts=num_restarts,
                raw_samples=0,
                options={"method": "SLSQP", "batch_limit": 1},
                inequality_constraints=inequality_constraints,
                batch_initial_conditions=Yinit,
                sequential=False,
            )
            candidate_list.append(candidate)
            acq_value_list.append(acq_value)
            candidates = torch.cat(candidate_list, dim=-2)
            if acq_has_X_pend:
                acq_function.set_X_pending(
                    torch.cat([base_X_pending, candidates], dim=-2)
                    if base_X_pending is not None
                    else candidates
                )
        logger.info(f"Generated sequential candidate {i+1} of {n}")
    if acq_has_X_pend:
        acq_function.set_X_pending(base_X_pending)
    return candidates, torch.stack(acq_value_list)
コード例 #5
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata, List[TCandidateMetadata]]:
        if linear_constraints is not None or outcome_constraints is not None:
            raise UnsupportedError(
                "Constraints are not yet supported by max-value entropy search!"
            )

        if len(objective_weights) > 1:
            raise UnsupportedError(
                "Models with multiple outcomes are not yet supported by MES!"
            )

        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        # get the acquisition function
        num_fantasies = acf_options.get("num_fantasies", 16)
        num_mv_samples = acf_options.get("num_mv_samples", 10)
        num_y_samples = acf_options.get("num_y_samples", 128)
        candidate_size = acf_options.get("candidate_size", 1000)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        # generate the discrete points in the design space to sample max values
        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        candidate_set = torch.rand(candidate_size, bounds_.size(1))
        candidate_set = bounds_[0] + (bounds_[1] - bounds_[0]) * candidate_set

        acq_function = _instantiate_MES(
            model=model,  # pyre-ignore [6]
            candidate_set=candidate_set,
            num_fantasies=num_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            num_mv_samples=num_mv_samples,
            num_y_samples=num_y_samples,
            X_pending=X_pending,
            maximize=True if objective_weights[0] == 1 else False,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        botorch_rounding_func = get_rounding_func(rounding_func)
        candidates, _ = optimize_acqf(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            inequality_constraints=None,
            fixed_features=fixed_features,
            post_processing_func=botorch_rounding_func,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "batch_limit": optimizer_options.get("batch_limit", 8),
                "maxiter": optimizer_options.get("maxiter", 200),
                "method": "L-BFGS-B",
                "nonnegative": optimizer_options.get("nonnegative", False),
            },
            sequential=True,
        )
        new_x = candidates.detach().cpu()
        # pyre-fixme[7]: Expected `Tuple[Tensor, Tensor, Dict[str, typing.Any],
        #  List[Optional[Dict[str, typing.Any]]]]` but got `Tuple[Tensor, typing.Any,
        #  Dict[str, typing.Any], None]`.
        return new_x, torch.ones(n, dtype=self.dtype), {}, None
コード例 #6
0
def gen_one_shot_kg_initial_conditions(
    acq_function: qKnowledgeGradient,
    bounds: Tensor,
    q: int,
    num_restarts: int,
    raw_samples: int,
    options: Optional[Dict[str, Union[bool, float, int]]] = None,
) -> Optional[Tensor]:
    r"""Generate a batch of smart initializations for qKnowledgeGradient.

    This function generates initial conditions for optimizing one-shot KG using
    the maximizer of the posterior objective. Intutively, the maximizer of the
    fantasized posterior will often be close to a maximizer of the current
    posterior. This function uses that fact to generate the initital conditions
    for the fantasy points. Specifically, a fraction of `1 - frac_random` (see
    options) is generated by sampling from the set of maximizers of the
    posterior objective (obtained via random restart optimization) according to
    a softmax transformation of their respective values. This means that this
    initialization strategy internally solves an acquisition function
    maximization problem. The remaining `frac_random` fantasy points as well as
    all `q` candidate points are chosen according to the standard initialization
    strategy in `gen_batch_initial_conditions`.

    Args:
        acq_function: The qKnowledgeGradient instance to be optimized.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of
            task features.
        q: The number of candidates to consider.
        num_restarts: The number of starting points for multistart acquisition
            function optimization.
        raw_samples: The number of raw samples to consider in the initialization
            heuristic.
        options: Options for initial condition generation. These contain all
            settings for the standard heuristic initialization from
            `gen_batch_initial_conditions`. In addition, they contain
            `frac_random` (the fraction of fully random fantasy points),
            `num_inner_restarts` and `raw_inner_samples` (the number of random
            restarts and raw samples for solving the posterior objective
            maximization problem, respectively) and `eta` (temperature parameter
            for sampling heuristic from posterior objective maximizers).

    Returns:
        A `num_restarts x q' x d` tensor that can be used as initial conditions
        for `optimize_acqf()`. Here `q' = q + num_fantasies` is the total number
        of points (candidate points plus fantasy points).

    Example:
        >>> qKG = qKnowledgeGradient(model, num_fantasies=64)
        >>> bounds = torch.tensor([[0., 0.], [1., 1.]])
        >>> Xinit = gen_one_shot_kg_initial_conditions(
        >>>     qKG, bounds, q=3, num_restarts=10, raw_samples=512,
        >>>     options={"frac_random": 0.25},
        >>> )
    """
    options = options or {}
    frac_random: float = options.get("frac_random", 0.1)
    if not 0 < frac_random < 1:
        raise ValueError(
            f"frac_random must take on values in (0,1). Value: {frac_random}")
    q_aug = acq_function.get_augmented_q_batch_size(q=q)

    # TODO: Avoid unnecessary computation by not generating all candidates
    ics = gen_batch_initial_conditions(
        acq_function=acq_function,
        bounds=bounds,
        q=q_aug,
        num_restarts=num_restarts,
        raw_samples=raw_samples,
        options=options,
    )

    # compute maximizer of the value function
    value_function = _get_value_function(
        model=acq_function.model,
        objective=acq_function.objective,
        sampler=acq_function.inner_sampler,
        project=getattr(acq_function, "project", None),
    )
    from botorch.optim.optimize import optimize_acqf

    fantasy_cands, fantasy_vals = optimize_acqf(
        acq_function=value_function,
        bounds=bounds,
        q=1,
        num_restarts=options.get("num_inner_restarts", 20),
        raw_samples=options.get("raw_inner_samples", 1024),
        return_best_only=False,
    )

    # sampling from the optimizers
    n_value = int((1 - frac_random) * (q_aug - q))  # number of non-random ICs
    eta = options.get("eta", 2.0)
    weights = torch.exp(eta * standardize(fantasy_vals))
    idx = torch.multinomial(weights, num_restarts * n_value, replacement=True)

    # set the respective initial conditions to the sampled optimizers
    ics[..., -n_value:, :] = fantasy_cands[idx,
                                           0].view(num_restarts, n_value, -1)
    return ics
コード例 #7
0
ファイル: acquisition.py プロジェクト: kjanoudi/Ax
    def optimize(
        self,
        n: int,
        search_space_digest: SearchSpaceDigest,
        inequality_constraints: Optional[List[Tuple[Tensor, Tensor,
                                                    float]]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        optimizer_options: Optional[Dict[str, Any]] = None,
    ) -> Tuple[Tensor, Tensor]:
        """Generate a set of candidates via multi-start optimization. Obtains
        candidates and their associated acquisition function values.

        Args:
            n: The number of candidates to generate.
            search_space_digest: A ``SearchSpaceDigest`` object containing search space
                properties, e.g. ``bounds`` for optimization.
            inequality constraints: A list of tuples (indices, coefficients, rhs),
                with each tuple encoding an inequality constraint of the form
                ``sum_i (X[indices[i]] * coefficients[i]) >= rhs``.
            fixed_features: A map `{feature_index: value}` for features that
                should be fixed to a particular value during generation.
            rounding_func: A function that post-processes an optimization
                result appropriately (i.e., according to `round-trip`
                transformations).
            optimizer_options: Options for the optimizer function, e.g. ``sequential``
                or ``raw_samples``.
        """
        # NOTE: Could make use of `optimizer_class` when its added to BoTorch
        # instead of calling `optimizer_acqf` or `optimize_acqf_discrete` etc.

        optimizer_options_with_defaults = {
            **get_default_optimizer_options(acqf_class=self.botorch_acqf_class),
            **(optimizer_options or {}),
        }
        ssd = search_space_digest
        tkwargs = {"dtype": self.dtype, "device": self.device}
        # pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for 2nd param but
        #  got `Union[Type[torch._dtype], torch.device]`.
        bounds = torch.tensor(ssd.bounds, **tkwargs).transpose(0, 1)
        discrete_features = sorted(ssd.ordinal_features +
                                   ssd.categorical_features)

        if fixed_features is not None:
            for i in fixed_features:
                if not 0 <= i < len(ssd.feature_names):
                    raise ValueError(f"Invalid fixed_feature index: {i}")

        # 1. Handle the fully continuous search space.
        if not discrete_features:
            return optimize_acqf(
                acq_function=self.acqf,
                bounds=bounds,
                q=n,
                inequality_constraints=inequality_constraints,
                fixed_features=fixed_features,
                post_processing_func=rounding_func,
                **optimizer_options_with_defaults,
            )

        # 2. Handle search spaces with discrete features.
        discrete_choices = mk_discrete_choices(ssd=ssd,
                                               fixed_features=fixed_features)

        # 2a. Handle the fully discrete search space.
        if len(discrete_choices) == len(ssd.feature_names):
            # For now we just enumerate all possible discrete combinations. This is not
            # scalable and and only works for a reasonably small number of choices.
            all_choices = (discrete_choices[i]
                           for i in range(len(discrete_choices)))
            # pyre-fixme[6]: Expected `Optional[Type[torch._dtype]]` for 2nd param
            #  but got `Union[Type[torch._dtype], torch.device]`.
            all_choices = torch.tensor(list(itertools.product(*all_choices)),
                                       **tkwargs)

            return optimize_acqf_discrete(
                acq_function=self.acqf,
                q=n,
                choices=all_choices,
                **optimizer_options_with_defaults,
            )

        # 2b. Handle mixed search spaces that have discrete and continuous features.
        return optimize_acqf_mixed(
            acq_function=self.acqf,
            bounds=bounds,
            q=n,
            # For now we just enumerate all possible discrete combinations. This is not
            # scalable and and only works for a reasonably small number of choices. A
            # slowdown warning is logged in `enumerate_discrete_combinations` if needed.
            fixed_features_list=enumerate_discrete_combinations(
                discrete_choices=discrete_choices),
            inequality_constraints=inequality_constraints,
            post_processing_func=rounding_func,
            **optimizer_options_with_defaults,
        )
コード例 #8
0
    def test_optimize_acqf_joint(self, mock_gen_candidates,
                                 mock_gen_batch_initial_conditions):
        q = 3
        num_restarts = 2
        raw_samples = 10
        options = {}
        mock_acq_function = MockAcquisitionFunction()
        cnt = 0
        for dtype in (torch.float, torch.double):
            mock_gen_batch_initial_conditions.return_value = torch.zeros(
                num_restarts, q, 3, device=self.device, dtype=dtype)
            base_cand = torch.arange(3, device=self.device,
                                     dtype=dtype).expand(1, q, 3)
            mock_candidates = torch.cat(
                [i * base_cand for i in range(num_restarts)], dim=0)
            mock_acq_values = num_restarts - torch.arange(
                num_restarts, device=self.device, dtype=dtype)
            mock_gen_candidates.return_value = (mock_candidates,
                                                mock_acq_values)
            bounds = torch.stack([
                torch.zeros(3, device=self.device, dtype=dtype),
                4 * torch.ones(3, device=self.device, dtype=dtype),
            ])
            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
            )
            self.assertTrue(torch.equal(candidates, mock_candidates[0]))
            self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
            cnt += 1
            self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)

            # test generation with provided initial conditions
            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
                return_best_only=False,
                batch_initial_conditions=torch.zeros(num_restarts,
                                                     q,
                                                     3,
                                                     device=self.device,
                                                     dtype=dtype),
            )
            self.assertTrue(torch.equal(candidates, mock_candidates))
            self.assertTrue(torch.equal(acq_vals, mock_acq_values))
            self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)

            # test fixed features
            fixed_features = {0: 0.1}
            mock_candidates[:, 0] = 0.1
            mock_gen_candidates.return_value = (mock_candidates,
                                                mock_acq_values)
            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
                fixed_features=fixed_features,
            )
            self.assertEqual(
                mock_gen_candidates.call_args[1]["fixed_features"],
                fixed_features)
            self.assertTrue(torch.equal(candidates, mock_candidates[0]))
            cnt += 1
            self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)

            # test trivial case when all features are fixed
            candidates, acq_vals = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=q,
                num_restarts=num_restarts,
                raw_samples=raw_samples,
                options=options,
                fixed_features={
                    0: 0.1,
                    1: 0.2,
                    2: 0.3
                },
            )
            self.assertTrue(
                torch.equal(
                    candidates,
                    torch.tensor([0.1, 0.2, 0.3],
                                 device=self.device,
                                 dtype=dtype).expand(3, 3),
                ))
            self.assertEqual(mock_gen_batch_initial_conditions.call_count, cnt)

        # test OneShotAcquisitionFunction
        mock_acq_function = MockOneShotAcquisitionFunction()
        candidates, acq_vals = optimize_acqf(
            acq_function=mock_acq_function,
            bounds=bounds,
            q=q,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options=options,
        )
        self.assertTrue(
            torch.equal(
                candidates,
                mock_acq_function.extract_candidates(mock_candidates[0])))
        self.assertTrue(torch.equal(acq_vals, mock_acq_values[0]))
コード例 #9
0
    def test_optimize_acqf_nonlinear_constraints(self):
        num_restarts = 2
        for dtype in (torch.float, torch.double):
            tkwargs = {"device": self.device, "dtype": dtype}
            mock_acq_function = SquaredAcquisitionFunction()
            bounds = torch.stack(
                [torch.zeros(3, **tkwargs), 4 * torch.ones(3, **tkwargs)]
            )

            # Make sure we find the global optimum [4, 4, 4] without constraints
            with torch.random.fork_rng():
                torch.manual_seed(0)
                candidates, acq_value = optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    num_restarts=num_restarts,
                    sequential=True,
                    raw_samples=16,
                )
            self.assertTrue(torch.allclose(candidates, 4 * torch.ones(3, **tkwargs)))

            # Constrain the sum to be <= 4 in which case the solution is a
            # permutation of [4, 0, 0]
            def nlc1(x):
                return 4 - x.sum(dim=-1)

            batch_initial_conditions = torch.tensor([[[0.5, 0.5, 3]]], **tkwargs)
            candidates, acq_value = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=1,
                nonlinear_inequality_constraints=[nlc1],
                batch_initial_conditions=batch_initial_conditions,
                num_restarts=1,
            )
            self.assertTrue(
                torch.allclose(
                    torch.sort(candidates).values,
                    torch.tensor([[0, 0, 4]], **tkwargs),
                )
            )
            self.assertTrue(
                torch.allclose(acq_value, torch.tensor([4], **tkwargs), atol=1e-3)
            )

            # Make sure we return the initial solution if SLSQP fails to return
            # a feasible point.
            with mock.patch("botorch.generation.gen.minimize") as mock_minimize:
                mock_minimize.return_value = OptimizeResult(x=np.array([4, 4, 4]))
                candidates, acq_value = optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=[nlc1],
                    batch_initial_conditions=batch_initial_conditions,
                    num_restarts=1,
                )
                self.assertTrue(torch.allclose(candidates, batch_initial_conditions))

            # Constrain all variables to be >= 1. The global optimum is 2.45 and
            # is attained by some permutation of [1, 1, 2]
            def nlc2(x):
                return x[..., 0] - 1

            def nlc3(x):
                return x[..., 1] - 1

            def nlc4(x):
                return x[..., 2] - 1

            with torch.random.fork_rng():
                torch.manual_seed(0)
                batch_initial_conditions = 1 + 0.33 * torch.rand(
                    num_restarts, 1, 3, **tkwargs
                )
            candidates, acq_value = optimize_acqf(
                acq_function=mock_acq_function,
                bounds=bounds,
                q=1,
                nonlinear_inequality_constraints=[nlc1, nlc2, nlc3, nlc4],
                batch_initial_conditions=batch_initial_conditions,
                num_restarts=num_restarts,
            )
            self.assertTrue(
                torch.allclose(
                    torch.sort(candidates).values,
                    torch.tensor([[1, 1, 2]], **tkwargs),
                )
            )
            self.assertTrue(
                torch.allclose(acq_value, torch.tensor(2.45, **tkwargs), atol=1e-3)
            )

            # Make sure fixed features aren't supported
            with self.assertRaisesRegex(
                NotImplementedError,
                "Fixed features are not supported when non-linear inequality "
                "constraints are given.",
            ):
                optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=[nlc1, nlc2, nlc3, nlc4],
                    batch_initial_conditions=batch_initial_conditions,
                    num_restarts=num_restarts,
                    fixed_features={0: 0.1},
                )

            # Constraints must be passed in as lists
            with self.assertRaisesRegex(
                ValueError,
                "`nonlinear_inequality_constraints` must be a list of callables, "
                "got <class 'function'>.",
            ):
                optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=nlc1,
                    num_restarts=num_restarts,
                    batch_initial_conditions=batch_initial_conditions,
                )

            # batch_initial_conditions must be given
            with self.assertRaisesRegex(
                NotImplementedError,
                "`batch_initial_conditions` must be given if there are non-linear "
                "inequality constraints.",
            ):
                optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=[nlc1],
                    num_restarts=num_restarts,
                )

            # batch_initial_conditions must be feasible
            with self.assertRaisesRegex(
                ValueError,
                "`batch_initial_conditions` must satisfy the non-linear "
                "inequality constraints.",
            ):
                optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=[nlc1],
                    num_restarts=num_restarts,
                    batch_initial_conditions=4 * torch.ones(1, 1, 3, **tkwargs),
                )
            # Explicitly setting batch_limit to be >1 should raise
            with self.assertRaisesRegex(
                ValueError,
                "`batch_limit` must be 1 when non-linear inequality constraints "
                "are given.",
            ):
                optimize_acqf(
                    acq_function=mock_acq_function,
                    bounds=bounds,
                    q=1,
                    nonlinear_inequality_constraints=[nlc1],
                    batch_initial_conditions=torch.rand(5, 1, 3, **tkwargs),
                    num_restarts=5,
                    options={"batch_limit": 5},
                )
コード例 #10
0
ファイル: botorch_mes.py プロジェクト: xiecong/Ax
    def best_point(
        self,
        bounds: List[Tuple[float, float]],
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        model_gen_options: Optional[TConfig] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Optional[Tensor]:
        """
        Identify the current best point, satisfying the constraints in the same
        format as to gen.

        Return None if no such point can be identified.

        Args:
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value in the best point.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            d-tensor of the best point.
        """
        if linear_constraints is not None or outcome_constraints is not None:
            raise UnsupportedError(
                "Constraints are not yet supported by max-value entropy search!"
            )

        options = model_gen_options or {}
        fixed_features = fixed_features or {}
        optimizer_options = options.get("optimizer_kwargs", {})

        X_observed = get_observed(
            Xs=self.Xs,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
        )

        acq_function, non_fixed_idcs = self._get_best_point_acqf(
            X_observed=X_observed,  # pyre-ignore: [6]
            objective_weights=objective_weights,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
        )

        return_best_only = optimizer_options.get("return_best_only", True)
        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)
        if non_fixed_idcs is not None:
            bounds_ = bounds_[..., non_fixed_idcs]

        candidates, _ = optimize_acqf(
            acq_function=acq_function,
            bounds=bounds_,
            q=1,
            num_restarts=optimizer_options.get("num_restarts", 60),
            raw_samples=optimizer_options.get("raw_samples", 1024),
            inequality_constraints=None,
            fixed_features=None,  # handled inside the acquisition function
            options={
                "batch_limit": optimizer_options.get("batch_limit", 8),
                "maxiter": optimizer_options.get("maxiter", 200),
                "nonnegative": optimizer_options.get("nonnegative", False),
                "method": "L-BFGS-B",
            },
            return_best_only=return_best_only,
        )
        rec_point = candidates.detach().cpu()
        if isinstance(acq_function, FixedFeatureAcquisitionFunction):
            rec_point = acq_function._construct_X_full(rec_point)
        if return_best_only:
            rec_point = rec_point.view(-1)
        return rec_point
コード例 #11
0
def optimize_objective(
    model: Model,
    bounds: Tensor,
    q: int,
    objective: Optional[MCAcquisitionObjective] = None,
    posterior_transform: Optional[PosteriorTransform] = None,
    linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    fixed_features: Optional[Dict[int, float]] = None,
    target_fidelities: Optional[Dict[int, float]] = None,
    qmc: bool = True,
    mc_samples: int = 512,
    seed_inner: Optional[int] = None,
    optimizer_options: Dict[str, Any] = None,
    post_processing_func: Optional[Callable[[Tensor], Tensor]] = None,
    batch_initial_conditions: Optional[Tensor] = None,
    sequential: bool = False,
    **ignore,
) -> Tuple[Tensor, Tensor]:
    r"""Optimize an objective under the given model.

    Args:
        model: The model to be used in the objective.
        bounds: A `2 x d` tensor of lower and upper bounds for each column of `X`.
        q: The cardinality of input sets on which the objective is to be evaluated.
        objective: The objective to optimize.
        posterior_transform: The posterior transform to be used in the
            acquisition function.
        linear_constraints: A tuple of (A, b). Given `k` linear constraints on a
            `d`-dimensional space, `A` is `k x d` and `b` is `k x 1` such that
            `A x <= b`. (Not used by single task models).
        fixed_features: A dictionary of feature assignments `{feature_index: value}` to
            hold fixed during generation.
        target_fidelities: A dictionary mapping input feature indices to fidelity
            values. Defaults to `{-1: 1.0}`.
        qmc: Toggle for enabling (qmc=1) or disabling (qmc=0) use of Quasi Monte Carlo.
        mc_samples: Integer number of samples used to estimate Monte Carlo objectives.
        seed_inner: Integer seed used to initialize the sampler passed to MCObjective.
        optimizer_options: Table used to lookup keyword arguments for the optimizer.
        post_processing_func: A function that post-processes an optimization
            result appropriately (i.e. according to `round-trip` transformations).
        batch_initial_conditions: A Tensor of initial values for the optimizer.
        sequential: If False, uses joint optimization, otherwise uses sequential
            optimization.

    Returns:
        A tuple containing the best input locations and corresponding objective values.
    """
    if optimizer_options is None:
        optimizer_options = {}

    if objective is not None:
        sampler_cls = SobolQMCNormalSampler if qmc else IIDNormalSampler
        acq_function = qSimpleRegret(
            model=model,
            objective=objective,
            posterior_transform=posterior_transform,
            sampler=sampler_cls(num_samples=mc_samples, seed=seed_inner),
        )
    else:
        acq_function = PosteriorMean(model=model,
                                     posterior_transform=posterior_transform)

    if fixed_features:
        acq_function = FixedFeatureAcquisitionFunction(
            acq_function=acq_function,
            d=bounds.shape[-1],
            columns=list(fixed_features.keys()),
            values=list(fixed_features.values()),
        )
        free_feature_dims = list(range(len(bounds)) - fixed_features.keys())
        free_feature_bounds = bounds[:, free_feature_dims]  # (2, d' <= d)
    else:
        free_feature_bounds = bounds

    if linear_constraints is None:
        inequality_constraints = None
    else:
        A, b = linear_constraints
        inequality_constraints = []
        k, d = A.shape
        for i in range(k):
            indicies = A[i, :].nonzero(as_tuple=False).squeeze()
            coefficients = -A[i, indicies]
            rhs = -b[i, 0]
            inequality_constraints.append((indicies, coefficients, rhs))

    return optimize_acqf(
        acq_function=acq_function,
        bounds=free_feature_bounds,
        q=q,
        num_restarts=optimizer_options.get("num_restarts", 60),
        raw_samples=optimizer_options.get("raw_samples", 1024),
        options={
            "batch_limit": optimizer_options.get("batch_limit", 8),
            "maxiter": optimizer_options.get("maxiter", 200),
            "nonnegative": optimizer_options.get("nonnegative", False),
            "method": optimizer_options.get("method", "L-BFGS-B"),
        },
        inequality_constraints=inequality_constraints,
        fixed_features=None,  # handled inside the acquisition function
        post_processing_func=post_processing_func,
        batch_initial_conditions=batch_initial_conditions,
        return_best_only=True,
        sequential=sequential,
    )
コード例 #12
0
ファイル: botorch_defaults.py プロジェクト: bletham/Ax
def scipy_optimizer(
    acq_function: AcquisitionFunction,
    bounds: Tensor,
    n: int,
    inequality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
    equality_constraints: Optional[List[Tuple[Tensor, Tensor, float]]] = None,
    fixed_features: Optional[Dict[int, float]] = None,
    rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
    **kwargs: Any,
) -> Tuple[Tensor, Tensor]:
    r"""Optimizer using scipy's minimize module on a numpy-adpator.

    Args:
        acq_function: A botorch AcquisitionFunction.
        bounds: A `2 x d`-dim tensor, where `bounds[0]` (`bounds[1]`) are the
            lower (upper) bounds of the feasible hyperrectangle.
        n: The number of candidates to generate.
        inequality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an inequality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) >= rhs`
        equality constraints: A list of tuples (indices, coefficients, rhs),
            with each tuple encoding an equality constraint of the form
            `\sum_i (X[indices[i]] * coefficients[i]) == rhs`
        fixed_features: A map {feature_index: value} for features that should
            be fixed to a particular value during generation.
        rounding_func: A function that rounds an optimization result
            appropriately (i.e., according to `round-trip` transformations).

    Returns:
        2-element tuple containing

        - A `n x d`-dim tensor of generated candidates.
        - In the case of joint optimization, a scalar tensor containing
          the joint acquisition value of the `n` points. In the case of
          sequential optimization, a `n`-dim tensor of conditional acquisition
          values, where `i`-th element is the expected acquisition value
          conditional on having observed candidates `0,1,...,i-1`.
    """
    num_restarts: int = kwargs.get("num_restarts", 20)
    raw_samples: int = kwargs.get("num_raw_samples", 50 * num_restarts)

    if kwargs.get("joint_optimization", False):
        sequential = False
    else:
        sequential = True
        # use SLSQP by default for small problems since it yields faster wall times
        if "method" not in kwargs:
            kwargs["method"] = "SLSQP"
    X, expected_acquisition_value = optimize_acqf(
        acq_function=acq_function,
        bounds=bounds,
        q=n,
        num_restarts=num_restarts,
        raw_samples=raw_samples,
        options=kwargs,
        inequality_constraints=inequality_constraints,
        equality_constraints=equality_constraints,
        fixed_features=fixed_features,
        sequential=sequential,
        post_processing_func=rounding_func,
    )
    return X, expected_acquisition_value
コード例 #13
0
    def gen(
        self,
        n: int,
        bounds: List,
        objective_weights: Tensor,
        outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        linear_constraints: Optional[Tuple[Tensor, Tensor]] = None,
        fixed_features: Optional[Dict[int, float]] = None,
        pending_observations: Optional[List[Tensor]] = None,
        model_gen_options: Optional[TConfig] = None,
        rounding_func: Optional[Callable[[Tensor], Tensor]] = None,
        target_fidelities: Optional[Dict[int, float]] = None,
    ) -> Tuple[Tensor, Tensor, TGenMetadata]:
        """
        Generate new candidates.

        Args:
            n: Number of candidates to generate.
            bounds: A list of (lower, upper) tuples for each column of X.
            objective_weights: The objective is to maximize a weighted sum of
                the columns of f(x). These are the weights.
            outcome_constraints: A tuple of (A, b). For k outcome constraints
                and m outputs at f(x), A is (k x m) and b is (k x 1) such that
                A f(x) <= b.
            linear_constraints: A tuple of (A, b). For k linear constraints on
                d-dimensional x, A is (k x d) and b is (k x 1) such that
                A x <= b.
            fixed_features: A map {feature_index: value} for features that
                should be fixed to a particular value during generation.
            pending_observations:  A list of m (k_i x d) feature tensors X
                for m outcomes and k_i pending observations for outcome i.
            model_gen_options: A config dictionary that can contain
                model-specific options.
            rounding_func: A function that rounds an optimization result
                appropriately (i.e., according to `round-trip` transformations).
            target_fidelities: A map {feature_index: value} of fidelity feature
                column indices to their respective target fidelities. Used for
                multi-fidelity optimization.

        Returns:
            3-element tuple containing

            - (n x d) tensor of generated points.
            - n-tensor of weights for each point.
            - Dictionary of model-specific metadata for the given
                generation candidates.
        """
        options = model_gen_options or {}
        acf_options = options.get("acquisition_function_kwargs", {})
        optimizer_options = options.get("optimizer_kwargs", {})

        X_pending, X_observed = _get_X_pending_and_observed(
            Xs=self.Xs,
            pending_observations=pending_observations,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            bounds=bounds,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
        )

        model = self.model

        # subset model only to the outcomes we need for the optimization
        if options.get("subset_model", True):
            model, objective_weights, outcome_constraints = subset_model(
                model=model,  # pyre-ignore [6]
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
            )

        objective = _get_objective(
            model=model,  # pyre-ignore [6]
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,
        )

        # get the acquisition function
        n_fantasies = acf_options.get("num_fantasies", 64)
        qmc = acf_options.get("qmc", True)
        seed_inner = acf_options.get("seed_inner", None)
        num_restarts = optimizer_options.get("num_restarts", 40)
        raw_samples = optimizer_options.get("raw_samples", 1024)

        inequality_constraints = _to_inequality_constraints(linear_constraints)
        # TODO: update optimizers to handle inequality_constraints
        if inequality_constraints is not None:
            raise UnsupportedError(
                "Inequality constraints are not yet supported for KnowledgeGradient!"
            )

        # get current value
        best_point_acqf, non_fixed_idcs = self._get_best_point_acqf(
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            X_observed=X_observed,  # pyre-ignore: [6]
            seed_inner=seed_inner,
            fixed_features=fixed_features,
            target_fidelities=target_fidelities,
            qmc=qmc,
        )

        # solution from previous iteration
        recommended_point = self.best_point(
            bounds=bounds,
            objective_weights=objective_weights,
            outcome_constraints=outcome_constraints,
            linear_constraints=linear_constraints,
            fixed_features=fixed_features,
            model_gen_options=model_gen_options,
            target_fidelities=target_fidelities,
        )
        recommended_point = recommended_point.detach().unsqueeze(
            0)  # pyre-ignore: [16]
        # Extract acquisition value (TODO: Make this less painful and repetitive)
        if non_fixed_idcs is not None:
            recommended_point = recommended_point[..., non_fixed_idcs]
        current_value = best_point_acqf(recommended_point).max()

        acq_function = _instantiate_KG(
            model=model,  # pyre-ignore [6]
            objective=objective,
            qmc=qmc,
            n_fantasies=n_fantasies,
            num_trace_observations=options.get("num_trace_observations", 0),
            mc_samples=acf_options.get("mc_samples", 256),
            seed_inner=seed_inner,
            seed_outer=acf_options.get("seed_outer", None),
            X_pending=X_pending,
            target_fidelities=target_fidelities,
            fidelity_weights=options.get("fidelity_weights"),
            current_value=current_value,
            cost_intercept=self.cost_intercept,
        )

        # optimize and get new points
        bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device)
        bounds_ = bounds_.transpose(0, 1)

        batch_initial_conditions = gen_one_shot_kg_initial_conditions(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "frac_random": optimizer_options.get("frac_random", 0.1),
                "num_inner_restarts": num_restarts,
                "raw_inner_samples": raw_samples,
            },
        )

        botorch_rounding_func = get_rounding_func(rounding_func)

        candidates, _ = optimize_acqf(
            acq_function=acq_function,
            bounds=bounds_,
            q=n,
            inequality_constraints=inequality_constraints,
            fixed_features=fixed_features,
            post_processing_func=botorch_rounding_func,
            num_restarts=num_restarts,
            raw_samples=raw_samples,
            options={
                "batch_limit": optimizer_options.get("batch_limit", 8),
                "maxiter": optimizer_options.get("maxiter", 200),
                "method": "L-BFGS-B",
                "nonnegative": optimizer_options.get("nonnegative", False),
            },
            batch_initial_conditions=batch_initial_conditions,
        )
        new_x = candidates.detach().cpu()
        return new_x, torch.ones(n, dtype=self.dtype), {}