def get_botorch_objective( model: Model, objective_weights: Tensor, use_scalarized_objective: bool = True, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, objective_thresholds: Optional[Tensor] = None, X_observed: Optional[Tensor] = None, ) -> AcquisitionObjective: """Constructs a BoTorch `AcquisitionObjective` object. Args: model: A BoTorch Model objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. use_scalarized_objective: A boolean parameter that defaults to True, specifying whether ScalarizedObjective should be used. NOTE: when using outcome_constraints, use_scalarized_objective will be ignored. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) objective_thresholds: A tensor containing thresholds forming a reference point from which to calculate pareto frontier hypervolume. Points that do not dominate the objective_thresholds contribute nothing to hypervolume. X_observed: Observed points that are feasible and appear in the objective or the constraints. None if there are no such points. Returns: A BoTorch `AcquisitionObjective` object. It will be one of: `ScalarizedObjective`, `LinearMCOObjective`, `ConstrainedMCObjective`. """ if objective_thresholds is not None: nonzero_idcs = torch.nonzero(objective_weights).view(-1) objective_weights = objective_weights[nonzero_idcs] return WeightedMCMultiOutputObjective(weights=objective_weights, outcomes=nonzero_idcs.tolist()) if X_observed is None: raise UnsupportedError( "X_observed is required to construct a BoTorch Objective.") if outcome_constraints: if use_scalarized_objective: logger.warning( "Currently cannot use ScalarizedObjective when there are outcome " "constraints. Ignoring (default) kwarg `use_scalarized_objective`" "= True. Creating ConstrainedMCObjective.") obj_tf = get_objective_weights_transform(objective_weights) def objective(samples: Tensor, X: Optional[Tensor] = None) -> Tensor: return obj_tf(samples) con_tfs = get_outcome_constraint_transforms(outcome_constraints) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) return ConstrainedMCObjective(objective=objective, constraints=con_tfs or [], infeasible_cost=inf_cost) elif use_scalarized_objective: return ScalarizedObjective(weights=objective_weights) return LinearMCObjective(weights=objective_weights)
def get_NEI( model: Model, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, X_observed: Optional[Tensor] = None, X_pending: Optional[Tensor] = None, **kwargs: Any, ) -> AcquisitionFunction: r"""Instantiates a qNoisyExpectedImprovement acquisition function. Args: objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) X_observed: A tensor containing points observed for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). X_pending: A tensor containing points whose evaluation is pending (i.e. that have been submitted for evaluation) present for all objective outcomes and outcomes that appear in the outcome constraints (if there are any). mc_samples: The number of MC samples to use (default: 512). qmc: If True, use qMC instead of MC (default: True). prune_baseline: If True, prune the baseline points for NEI (default: True). Returns: qNoisyExpectedImprovement: The instantiated acquisition function. """ if X_observed is None: raise ValueError("There are no feasible observed points.") # Parse random_scalarization params objective_weights = _extract_random_scalarization_settings( objective_weights, outcome_constraints, **kwargs) # construct Objective module if outcome_constraints is None: objective = LinearMCObjective(weights=objective_weights) else: obj_tf = get_objective_weights_transform(objective_weights) con_tfs = get_outcome_constraint_transforms(outcome_constraints) X_observed = torch.as_tensor(X_observed) inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf) objective = ConstrainedMCObjective(objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost) return get_acquisition_function( acquisition_function_name="qNEI", model=model, objective=objective, X_observed=X_observed, X_pending=X_pending, prune_baseline=kwargs.get("prune_baseline", True), mc_samples=kwargs.get("mc_samples", 512), qmc=kwargs.get("qmc", True), seed=torch.randint(1, 10000, (1, )).item(), )
return Z[..., 0] # def constraint_callable(Z): # return Z[..., 1] def constraint_callable_list(num_constraints, num_objs=1): return [lambda Z: Z[..., i + num_objs] for i in range(num_constraints)] # define a feasibility-weighted objective for optimization constrained_obj = ConstrainedMCObjective( objective=obj_callable, constraints=constraint_callable_list(problem.num_constraints, 1), infeasible_cost= MIN_OBJ_UPPER_BOUND, # CAUTION if objective to be minimized can return positive value!!! # (or objective to be maximized can return nagetive value) ) def optimize_acqf_and_get_observation(acq_func, obj_func, time_list, global_start_time): """Optimizes the acquisition function, and returns a new candidate and observation.""" # optimize candidates, _ = optimize_acqf( acq_function=acq_func, bounds=standard_bounds, q=1, num_restarts=10, raw_samples=512, # used for intialization heuristic