コード例 #1
0
def get_NEI(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedImprovement acquisition function.

    Args:
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).

    Returns:
        qNoisyExpectedImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # Parse random_scalarization params
    objective_weights = _extract_random_scalarization_settings(
        objective_weights, outcome_constraints, **kwargs)
    # construct Objective module
    if outcome_constraints is None:
        objective = LinearMCObjective(weights=objective_weights)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        X_observed = torch.as_tensor(X_observed)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=obj_tf,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    return get_acquisition_function(
        acquisition_function_name="qNEI",
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        seed=torch.randint(1, 10000, (1, )).item(),
    )
コード例 #2
0
def get_PosteriorMean(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a PosteriorMean acquisition function.

    Note: If no OutcomeConstraints given, return an analytic acquisition
    function. This requires {optimizer_kwargs: {joint_optimization: True}} or an
    optimizer that does not assume pending point support.

    Args:
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).

    Returns:
        PosteriorMean: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        obj_tf = get_chebyshev_scalarization(
            weights=objective_weights,
            Y=torch.stack(kwargs.get("Ys")).transpose(0, 1).squeeze(-1),
        )
    else:
        obj_tf = get_objective_weights_transform(objective_weights)

    def obj_fn(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
        return obj_tf(samples)

    if outcome_constraints is None:
        objective = GenericMCObjective(objective=obj_fn)
    else:
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_fn)
        objective = ConstrainedMCObjective(objective=obj_fn,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    # Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns.
    acq_func = qSimpleRegret(model, objective=objective)
    return acq_func
コード例 #3
0
 def test_BroadcastEvaluation(self):
     k, t = 3, 4
     mc_samples, b, q = 6, 4, 5
     for dtype in (torch.float, torch.double):
         A_ = torch.randn(k, t, dtype=dtype, device=self.device)
         b_ = torch.randn(k, 1, dtype=dtype, device=self.device)
         Y = torch.randn(mc_samples, b, q, t, dtype=dtype, device=self.device)
         ocs = get_outcome_constraint_transforms((A_, b_))
         self.assertEqual(len(ocs), k)
         self.assertEqual(ocs[0](Y).shape, torch.Size([mc_samples, b, q]))
コード例 #4
0
 def test_BasicEvaluation(self):
     for dtype in (torch.float, torch.double):
         A = self.A.to(dtype=dtype, device=self.device)
         b = self.b.to(dtype=dtype, device=self.device)
         Ys = self.Ys.to(dtype=dtype, device=self.device)
         results = self.results.to(dtype=dtype, device=self.device)
         ocs = get_outcome_constraint_transforms((A, b))
         self.assertEqual(len(ocs), 2)
         for i in (0, 1):
             for j in (0, 1):
                 self.assertTrue(torch.equal(ocs[j](Ys[:, i]), results[:, i, j]))
コード例 #5
0
ファイル: test_constraints.py プロジェクト: saschwan/botorch
 def test_BroadcastEvaluation(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     k, t = 3, 4
     mc_samples, b, q = 6, 4, 5
     for dtype in (torch.float, torch.double):
         A_ = torch.randn(k, t, dtype=dtype, device=device)
         b_ = torch.randn(k, 1, dtype=dtype, device=device)
         Y = torch.randn(mc_samples, b, q, t, dtype=dtype, device=device)
         ocs = get_outcome_constraint_transforms((A_, b_))
         self.assertEqual(len(ocs), k)
         self.assertEqual(ocs[0](Y).shape, torch.Size([mc_samples, b, q]))
コード例 #6
0
ファイル: test_moo_acquisition.py プロジェクト: fahriwm/Ax
    def setUp(self):
        self.botorch_model_class = SingleTaskGP
        self.surrogate = Surrogate(
            botorch_model_class=self.botorch_model_class)
        self.X = torch.tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]])
        self.Y = torch.tensor([[3.0, 4.0, 2.0], [4.0, 3.0, 1.0]])
        self.Yvar = torch.tensor([[0.0, 2.0, 1.0], [2.0, 0.0, 1.0]])
        self.training_data = TrainingData(X=self.X, Y=self.Y, Yvar=self.Yvar)
        self.fidelity_features = [2]
        self.surrogate.construct(training_data=self.training_data)

        self.bounds = [(0.0, 10.0), (0.0, 10.0), (0.0, 10.0)]
        self.botorch_acqf_class = DummyACQFClass
        self.objective_weights = torch.tensor([1.0, -1.0, 0.0])
        self.objective_thresholds = torch.tensor([2.0, 1.0, float("nan")])
        self.pending_observations = [
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
            torch.tensor([[1.0, 3.0, 4.0]]),
        ]
        self.outcome_constraints = (
            torch.tensor([[1.0, 0.5, 0.5]]),
            torch.tensor([[0.5]]),
        )
        self.con_tfs = get_outcome_constraint_transforms(
            self.outcome_constraints)
        self.linear_constraints = None
        self.fixed_features = {1: 2.0}
        self.target_fidelities = {2: 1.0}
        self.options = {}
        self.acquisition = MOOAcquisition(
            surrogate=self.surrogate,
            bounds=self.bounds,
            objective_weights=self.objective_weights,
            objective_thresholds=self.objective_thresholds,
            botorch_acqf_class=self.botorch_acqf_class,
            pending_observations=self.pending_observations,
            outcome_constraints=self.outcome_constraints,
            linear_constraints=self.linear_constraints,
            fixed_features=self.fixed_features,
            target_fidelities=self.target_fidelities,
            options=self.options,
        )

        self.inequality_constraints = [(torch.tensor([0, 1]),
                                        torch.tensor([-1.0, 1.0]), 1)]
        self.rounding_func = lambda x: x
        self.optimizer_options = {
            Keys.NUM_RESTARTS: 40,
            Keys.RAW_SAMPLES: 1024
        }
コード例 #7
0
ファイル: test_constraints.py プロジェクト: saschwan/botorch
 def test_BasicEvaluation(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         A = self.A.to(dtype=dtype, device=device)
         b = self.b.to(dtype=dtype, device=device)
         Ys = self.Ys.to(dtype=dtype, device=device)
         results = self.results.to(dtype=dtype, device=device)
         ocs = get_outcome_constraint_transforms((A, b))
         self.assertEqual(len(ocs), 2)
         for i in (0, 1):
             for j in (0, 1):
                 print(f"Actual: {ocs[j](Ys[:,i])}")
                 print(f"Expected: {results[:, i, j]}")
                 self.assertTrue(torch.equal(ocs[j](Ys[:, i]), results[:, i, j]))
コード例 #8
0
 def test_BasicEvaluation(self, cuda=False):
     device = torch.device("cuda") if cuda else torch.device("cpu")
     for dtype in (torch.float, torch.double):
         A = self.A.to(dtype=dtype, device=device)
         b = self.b.to(dtype=dtype, device=device)
         Ys = self.Ys.to(dtype=dtype, device=device)
         results = self.results.to(dtype=dtype, device=device)
         ocs = get_outcome_constraint_transforms((A, b))
         self.assertEqual(len(ocs), 2)
         for i in (0, 1):
             for j in (0, 1):
                 print(f"Actual: {ocs[j](Ys[:,i])}")
                 print(f"Expected: {results[:, i, j]}")
                 self.assertTrue(torch.equal(ocs[j](Ys[:, i]), results[:, i, j]))
コード例 #9
0
ファイル: moo_acquisition.py プロジェクト: jeffersonp317/Ax
    def _instantiate_acqf(
        self,
        model: Model,
        objective: AcquisitionObjective,
        model_dependent_kwargs: Dict[str, Any],
        objective_thresholds: Optional[Tensor] = None,
        X_pending: Optional[Tensor] = None,
        X_baseline: Optional[Tensor] = None,
    ) -> None:
        # Extract model dependent kwargs
        outcome_constraints = model_dependent_kwargs.pop("outcome_constraints")
        # Replicate `get_EHVI` transformation code
        X_observed = X_baseline
        if X_observed is None:
            raise ValueError("There are no feasible observed points.")
        if objective_thresholds is None:
            raise ValueError("Objective Thresholds required")
        with torch.no_grad():
            Y = model.posterior(X_observed).mean

        # For EHVI acquisition functions we pass the constraint transform directly.
        if outcome_constraints is None:
            cons_tfs = None
        else:
            cons_tfs = get_outcome_constraint_transforms(outcome_constraints)
        num_objectives = objective_thresholds.shape[0]

        mc_samples = self.options.get("mc_samples", DEFAULT_EHVI_MC_SAMPLES)
        qmc = self.options.get("qmc", True)
        alpha = self.options.get(
            "alpha",
            get_default_partitioning_alpha(num_objectives=num_objectives),
        )
        # this selects the objectives (a subset of the outcomes) and set each
        # objective threhsold to have the proper optimization direction
        ref_point = objective(objective_thresholds).tolist()

        # initialize the sampler
        seed = int(torch.randint(1, 10000, (1,)).item())
        if qmc:
            sampler = SobolQMCNormalSampler(num_samples=mc_samples, seed=seed)
        else:
            sampler = IIDNormalSampler(
                num_samples=mc_samples, seed=seed
            )  # pragma: nocover
        if not ref_point:
            raise ValueError(
                "`ref_point` must be specified in kwargs for qEHVI"
            )  # pragma: nocover
        # get feasible points
        if cons_tfs is not None:
            # pyre-ignore [16]: `Tensor` has no attribute `all`.
            feas = torch.stack([c(Y) <= 0 for c in cons_tfs], dim=-1).all(dim=-1)
            Y = Y[feas]
        obj = objective(Y)
        partitioning = NondominatedPartitioning(
            ref_point=torch.as_tensor(ref_point, dtype=Y.dtype, device=Y.device),
            Y=obj,
            alpha=alpha,
        )
        self.acqf = self._botorch_acqf_class(  # pyre-ignore[28]: Some kwargs are
            # not expected in base `AcquisitionFunction` but are expected in
            # its subclasses.
            model=model,
            ref_point=ref_point,
            partitioning=partitioning,
            sampler=sampler,
            objective=objective,
            constraints=cons_tfs,
            X_pending=X_pending,
        )
コード例 #10
0
def get_NEI(
    model: Model,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a qNoisyExpectedImprovement acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        qNoisyExpectedImprovement: The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        if "Ys" not in kwargs:
            raise ValueError("Chebyshev Scalarization requires Ys argument")
        Y_tensor = torch.cat(kwargs.get("Ys"), dim=-1)
        obj_tf = get_chebyshev_scalarization(weights=objective_weights,
                                             Y=Y_tensor)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)
    if outcome_constraints is None:
        objective = GenericMCObjective(objective=obj_tf)
    else:
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed,
                                       model=model,
                                       objective=obj_tf)
        objective = ConstrainedMCObjective(objective=obj_tf,
                                           constraints=con_tfs or [],
                                           infeasible_cost=inf_cost)
    return get_acquisition_function(
        acquisition_function_name="qNEI",
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 9th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1, )).item(),
    )
コード例 #11
0
 def test_None(self):
     self.assertIsNone(get_outcome_constraint_transforms(None))
コード例 #12
0
ファイル: botorch_defaults.py プロジェクト: facebook/Ax
def _get_acquisition_func(
    model: Model,
    acquisition_function_name: str,
    objective_weights: Tensor,
    outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
    X_observed: Optional[Tensor] = None,
    X_pending: Optional[Tensor] = None,
    mc_objective: Type[GenericMCObjective] = GenericMCObjective,
    constrained_mc_objective: Optional[
        Type[ConstrainedMCObjective]
    ] = ConstrainedMCObjective,
    mc_objective_kwargs: Optional[Dict] = None,
    **kwargs: Any,
) -> AcquisitionFunction:
    r"""Instantiates a acquisition function.

    Args:
        model: The underlying model which the acqusition function uses
            to estimate acquisition values of candidates.
        acquisition_function_name: Name of the acquisition function.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b. (Not used by single task models)
        X_observed: A tensor containing points observed for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        X_pending: A tensor containing points whose evaluation is pending (i.e.
            that have been submitted for evaluation) present for all objective
            outcomes and outcomes that appear in the outcome constraints (if
            there are any).
        mc_objective: GenericMCObjective class, used for constructing a
            MC-objective. If constructing a penalized MC-objective, pass in
            PenalizedMCObjective together with mc_objective_kwargs .
        constrained_mc_objective: ConstrainedMCObjective class, used when
            applying constraints on the outcomes.
        mc_objective_kwargs: kwargs for constructing MC-objective.
            For GenericMCObjective, leave it as None. For PenalizedMCObjective,
            it needs to be specified in the format of kwargs.
        mc_samples: The number of MC samples to use (default: 512).
        qmc: If True, use qMC instead of MC (default: True).
        prune_baseline: If True, prune the baseline points for NEI (default: True).
        chebyshev_scalarization: Use augmented Chebyshev scalarization.

    Returns:
        The instantiated acquisition function.
    """
    if X_observed is None:
        raise ValueError("There are no feasible observed points.")
    # construct Objective module
    if kwargs.get("chebyshev_scalarization", False):
        with torch.no_grad():
            Y = model.posterior(X_observed).mean
        obj_tf = get_chebyshev_scalarization(weights=objective_weights, Y=Y)
    else:
        obj_tf = get_objective_weights_transform(objective_weights)

    def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor:
        return obj_tf(samples)

    if outcome_constraints is None:
        mc_objective_kwargs = {} if mc_objective_kwargs is None else mc_objective_kwargs
        objective = mc_objective(objective=objective, **mc_objective_kwargs)
    else:
        if constrained_mc_objective is None:
            raise ValueError(
                "constrained_mc_objective cannot be set to None "
                "when applying outcome constraints."
            )
        if issubclass(mc_objective, PenalizedMCObjective):
            raise RuntimeError(
                "Outcome constraints are not supported for PenalizedMCObjective."
            )
        con_tfs = get_outcome_constraint_transforms(outcome_constraints)
        inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=objective)
        objective = constrained_mc_objective(
            objective=objective, constraints=con_tfs or [], infeasible_cost=inf_cost
        )
    return get_acquisition_function(
        acquisition_function_name=acquisition_function_name,
        model=model,
        objective=objective,
        X_observed=X_observed,
        X_pending=X_pending,
        prune_baseline=kwargs.get("prune_baseline", True),
        mc_samples=kwargs.get("mc_samples", 512),
        qmc=kwargs.get("qmc", True),
        # pyre-fixme[6]: Expected `Optional[int]` for 9th param but got
        #  `Union[float, int]`.
        seed=torch.randint(1, 10000, (1,)).item(),
        marginalize_dim=kwargs.get("marginalize_dim"),
    )
コード例 #13
0
    def fit_albo_objective(self) -> AlboMCObjective:
        r"""Inner loop of Augmented Lagrangian algorithm

        Args:
            model: A BoTorch model, fitted to observed data
            objective_callable: A callable transformation from model outputs to objective
            constraints_callable_list: A callable transformation from model outputs to constraints,
                with negative values imply feasibility
            sampler: An MCSampler instance for monte-carlo acquisition

        Returns:
            albo_objective: augmented objective with fitted Lagrangian multipliers
            trace: optimization trace
        """
        objective_callable = get_objective_weights_transform(
            self.objective_weights)
        constraints_callable_list = get_outcome_constraint_transforms(
            self.outcome_constraints)
        penalty_rate = self.init_penalty_rate
        num_mults = self.outcome_constraints[0].shape[0]
        if self.init_mults is not None:
            assert num_mults == self.init_mults.shape[-1]
            mults = self.init_mults
        else:
            mults = torch.Tensor(
                [self._default_init_mult for _ in range(num_mults)])

        x_trace = torch.zeros_like(self.bounds[0].unsqueeze(0))
        mults_trace = mults.unsqueeze(0)
        output_means = torch.zeros((1, self.model.num_outputs), dtype=float)
        output_variances = torch.zeros((1, self.model.num_outputs),
                                       dtype=float)

        for i in range(self.num_iter):
            self._execute_callbacks("on_iter_start", locals())

            # 1. Optimize the augmented objective with fixed multipliers to find the next point for multipliers update
            albo_objective = self.albo_objective_constructor(
                objective=objective_callable,
                constraints=constraints_callable_list,
                penalty_rate=penalty_rate,
                lagrange_mults=mults)

            # Using predictive mean for inner loop optimization
            acq_function = qSimpleRegret(model=self.model,
                                         objective=albo_objective,
                                         sampler=self.sampler)

            x, val = optimize_acqf(acq_function=acq_function,
                                   bounds=self.bounds,
                                   q=1,
                                   num_restarts=self.num_restarts,
                                   raw_samples=self.raw_samples)

            # 2. Compute update of lagrange multipliers at the optimal point of augmented objective
            posterior = self.model.posterior(x.unsqueeze(0))
            samples = self.sampler(posterior)
            mults_next, mults_stds_next = albo_objective.get_mults_update(
                samples)

            # 3. Possibly apply heuristics here, i.e clamp mults before update and increase penalty rate
            mults = mults_next  # currently not using any heuristics
            penalty_rate = self.init_penalty_rate  # seems to work just fine with a constant penalty rate

            # 4. Write trace of inner-loop optimization for debugging
            x_trace = torch.cat([x_trace, x], dim=0)
            mults_trace = torch.cat([mults_trace, mults.unsqueeze(0)], dim=0)
            output_means = torch.cat(
                [output_means,
                 posterior.mean.detach().squeeze(dim=0)], dim=0)
            output_variances = torch.cat(
                [output_variances,
                 posterior.variance.detach().squeeze(dim=0)],
                dim=0)

            # 5. Check stopping condition for inner loop (not implemented)
            self._execute_callbacks("on_iter_end", locals())
            continue

        # Construct final objective
        albo_objective = self.albo_objective_constructor(
            objective=objective_callable,
            constraints=constraints_callable_list,
            penalty_rate=penalty_rate,
            lagrange_mults=mults)

        trace = {
            'x': x_trace,
            'mults': mults_trace,
            'output': {
                'mean': output_means,
                'variance': output_variances
            }
        }

        return albo_objective, trace
コード例 #14
0
ファイル: test_constraints.py プロジェクト: saschwan/botorch
 def test_None(self):
     self.assertIsNone(get_outcome_constraint_transforms(None))