Example #1
0
 def test_get_NEI_with_chebyshev_and_missing_Ys_error(self):
     model = MultiObjectiveBotorchModel()
     x = torch.zeros(2, 2)
     weights = torch.ones(2)
     with self.assertRaisesRegex(
             ValueError, "Chebyshev Scalarization requires Ys argument"):
         get_NEI(
             model=model,
             X_observed=x,
             objective_weights=weights,
             chebyshev_scalarization=True,
         )
Example #2
0
def ei_or_nei(
    model: Union[ALEBOGP, ModelListGP],
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]],
    X_observed: Tensor,
    X_pending: Optional[Tensor],
    q: int,
    noiseless: bool,
) -> AcquisitionFunction:
    """Use analytic EI if appropriate, otherwise Monte Carlo NEI.

    Analytic EI can be used if: Single outcome, no constraints, no pending
    points, not batch, and no noise.

    Args:
        model: GP.
        objective_weights: Weights on each outcome for the objective.
        outcome_constraints: Outcome constraints.
        X_observed: Observed points for NEI.
        X_pending: Pending points.
        q: Batch size.
        noiseless: True if evaluations are noiseless.

    Returns: An AcquisitionFunction, either analytic EI or MC NEI.
    """
    if (
        len(objective_weights) == 1
        and outcome_constraints is None
        and X_pending is None
        and q == 1
        and noiseless
    ):
        maximize = objective_weights[0] > 0
        if maximize:
            best_f = model.train_targets.max()
        else:
            best_f = model.train_targets.min()
        return ExpectedImprovement(model=model, best_f=best_f, maximize=maximize)
    else:
        with gpytorch.settings.max_cholesky_size(2000):
            acq = get_NEI(
                model=model,
                objective_weights=objective_weights,
                outcome_constraints=outcome_constraints,
                X_observed=X_observed,
                X_pending=X_pending,
            )
        return acq