Exemple #1
0
 def test_squeeze_last_dim(self):
     Y = torch.rand(2, 1, 1)
     with warnings.catch_warnings(record=True) as ws:
         Y_squeezed = squeeze_last_dim(Y=Y)
         self.assertTrue(
             any(issubclass(w.category, DeprecationWarning) for w in ws))
     self.assertTrue(torch.equal(Y_squeezed, Y.squeeze(-1)))
Exemple #2
0
def get_PosteriorMean(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a PosteriorMean acquisition function.

    Note: If no OutcomeConstraints given, return an analytic acquisition
    function. This requires {optimizer_kwargs: {joint_optimization: True}} or an
    optimizer that does not assume pending point support.

    Args:
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).

    Returns:
        PosteriorMean: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        obj_tf = get_chebyshev_scalarization(
            weights=objective_weights,
            Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)),
        )
    else:
        obj_tf = get_objective_weights_transform(objective_weights)
    if outcome_constraints is None:
        objective = GenericMCObjective(objective=obj_tf)
    else:
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=obj_tf,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    # Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns.
    acq_func = qSimpleRegret(model, objective=objective)
    return acq_func
Exemple #3
0
 def test_squeeze_last_dim(self):
     Y = torch.rand(2, 1, 1)
     Y_squeezed = squeeze_last_dim(Y=Y)
     self.assertTrue(torch.equal(Y_squeezed, Y.squeeze(-1)))
Exemple #4
0
def get_EHVI(
    model: Model,
    objective_weights: Tensor,
    objective_thresholds: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qExpectedHyperVolumeImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        objective_thresholds:  A tensor containing thresholds forming a reference point
            from which to calculate pareto frontier hypervolume. Points that do not
            dominate the objective_thresholds contribute nothing to hypervolume.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).

    Returns:
        qExpectedHypervolumeImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    (
        objective,
        objective_thresholds,
    ) = get_weighted_mc_objective_and_objective_thresholds(
        objective_weights=objective_weights, objective_thresholds=objective_thresholds
    )
    if "Ys" not in kwargs:
        raise ValueError("Expected Hypervolume Improvement requires Ys argument")
    Y_tensor = squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1))
    # For EHVI acquisition functions we pass the constraint transform directly.
    if outcome_constraints is None:
        cons_tfs = None
    else:
        cons_tfs = get_outcome_constraint_transforms(outcome_constraints)

    return get_acquisition_function(
        acquisition_function_name="qEHVI",
        model=model,
        # TODO (jej): Fix pyre error below by restructuring class hierarchy.
        # pyre-fixme[6]: Expected `botorch.acquisition.objective.
        #  MCAcquisitionObjective` for 3rd parameter `objective` to call
        #  `get_acquisition_function` but got `IdentityMCMultiOutputObjective`.
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        constraints=cons_tfs,
        mc_samples=kwargs.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES),
        qmc=kwargs.get("qmc", True),
        seed=torch.randint(1, 10000, (1,)).item(),
        ref_point=objective_thresholds.tolist(),
        Y=Y_tensor,
    )
Exemple #5
0
 def test_squeeze_last_dim(self):
     Y = torch.rand(2, 1, 1)
     Y_squeezed = squeeze_last_dim(Y=Y)
     self.assertTrue(torch.equal(Y_squeezed, Y.squeeze(-1)))