def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata]: options = model_gen_options or {} acf_options = options.get("acquisition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) if target_fidelities: raise NotImplementedError( "target_fidelities not implemented for base BotorchModel") X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=self.model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) if linear_constraints is not None: A, b = linear_constraints inequality_constraints = [] k, d = A.shape for i in range(k): indicies = A[i, :].nonzero().view(-1) coefficients = -A[i, indicies] rhs = -b[i, 0] inequality_constraints.append((indicies, coefficients, rhs)) else: inequality_constraints = None botorch_rounding_func = get_rounding_func(rounding_func) # pyre-ignore: [28] candidates, expected_acquisition_value = self.acqf_optimizer( acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=inequality_constraints, fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) return ( candidates.detach().cpu(), torch.ones(n, dtype=self.dtype), { "expected_acquisition_value": expected_acquisition_value.tolist() }, )
def gen( self, n: int, bounds: List, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]: r"""Generate new candidates. Args: n: Number of candidates to generate. bounds: A list of (lower, upper) tuples for each column of X. objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. linear_constraints: A tuple of (A, b). For k linear constraints on d-dimensional x, A is (k x d) and b is (k x 1) such that A x <= b. fixed_features: A map {feature_index: value} for features that should be fixed to a particular value during generation. pending_observations: A list of m (k_i x d) feature tensors X for m outcomes and k_i pending observations for outcome i. model_gen_options: A config dictionary that can contain model-specific options. rounding_func: A function that rounds an optimization result appropriately (i.e., according to `round-trip` transformations). target_fidelities: A map {feature_index: value} of fidelity feature column indices to their respective target fidelities. Used for multi-fidelity optimization. Returns: 3-element tuple containing - (n x d) tensor of generated points. - n-tensor of weights for each point. - Dictionary of model-specific metadata for the given generation candidates. """ options = model_gen_options or {} acf_options = options.get("acquisition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) # subset model only to the outcomes we need for the optimization model = not_none(self.model) if options.get("subset_model", True): subset_model_results = subset_model( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) model = subset_model_results.model objective_weights = subset_model_results.objective_weights outcome_constraints = subset_model_results.outcome_constraints objective = get_botorch_objective( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, ) inequality_constraints = _to_inequality_constraints(linear_constraints) # TODO: update optimizers to handle inequality_constraints if inequality_constraints is not None: raise UnsupportedError( "Inequality constraints are not yet supported for KnowledgeGradient!" ) # extract a few options n_fantasies = acf_options.get("num_fantasies", 64) qmc = acf_options.get("qmc", True) seed_inner = acf_options.get("seed_inner", None) num_restarts = optimizer_options.get("num_restarts", 40) raw_samples = optimizer_options.get("raw_samples", 1024) # get current value current_value = self._get_current_value( model=model, bounds=bounds, X_observed=not_none(X_observed), objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, seed_inner=seed_inner, fixed_features=fixed_features, model_gen_options=model_gen_options, target_fidelities=target_fidelities, qmc=qmc, ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) # get acquisition function acq_function = _instantiate_KG( model=model, objective=objective, qmc=qmc, n_fantasies=n_fantasies, num_trace_observations=options.get("num_trace_observations", 0), mc_samples=acf_options.get("mc_samples", 256), seed_inner=seed_inner, seed_outer=acf_options.get("seed_outer", None), X_pending=X_pending, target_fidelities=target_fidelities, fidelity_weights=options.get("fidelity_weights"), current_value=current_value, cost_intercept=self.cost_intercept, ) # optimize and get new points new_x = _optimize_and_get_candidates( acq_function=acq_function, bounds_=bounds_, n=n, num_restarts=num_restarts, raw_samples=raw_samples, optimizer_options=optimizer_options, rounding_func=rounding_func, inequality_constraints=inequality_constraints, fixed_features=fixed_features, ) return new_x, torch.ones(n, dtype=self.dtype), {}, None
def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]: options = model_gen_options or {} acf_options = options.get(Keys.ACQF_KWARGS, {}) optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {}) if target_fidelities: raise NotImplementedError( "target_fidelities not implemented for base BotorchModel") X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = self.model # subset model only to the outcomes we need for the optimization if options.get(Keys.SUBSET_MODEL, True): model, objective_weights, outcome_constraints = subset_model( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) botorch_rounding_func = get_rounding_func(rounding_func) if acf_options.get("random_scalarization", False) or acf_options.get( "chebyshev_scalarization", False): # TODO (jej): Move into MultiObjectiveBotorch # If using a list of acquisition functions, the algorithm to generate # that list is configured by acquisition_function_kwargs. objective_weights_list = [ randomize_objective_weights(objective_weights, **acf_options) for _ in range(n) ] acquisition_function_list = [ self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, Ys=self. Ys, # Required for chebyshev scalarization calculations. **acf_options, ) for objective_weights in objective_weights_list ] acquisition_function_list = [ checked_cast(AcquisitionFunction, acq_function) for acq_function in acquisition_function_list ] # Multiple acquisition functions require a sequential optimizer # always use scipy_optimizer_list. # TODO(jej): Allow any optimizer. candidates, expected_acquisition_value = scipy_optimizer_list( acq_function_list=acquisition_function_list, bounds=bounds_, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) else: acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) acquisition_function = checked_cast(AcquisitionFunction, acquisition_function) # pyre-ignore: [28] candidates, expected_acquisition_value = self.acqf_optimizer( acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) return ( candidates.detach().cpu(), torch.ones(n, dtype=self.dtype), { "expected_acquisition_value": expected_acquisition_value.tolist() }, None, )
def __init__( self, surrogate: Surrogate, bounds: List[Tuple[float, float]], objective_weights: Tensor, botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None, options: Optional[Dict[str, Any]] = None, pending_observations: Optional[List[Tensor]] = None, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> None: if not botorch_acqf_class and not self.default_botorch_acqf_class: raise ValueError( f"Acquisition class {self.__class__} does not specify a default " "BoTorch `AcquisitionFunction`, so `botorch_acqf_class` " "argument must be specified.") botorch_acqf_class = not_none(botorch_acqf_class or self.default_botorch_acqf_class) self.surrogate = surrogate self.options = options or {} X_pending, X_observed = _get_X_pending_and_observed( Xs=[self.surrogate.training_data.X], pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) # Subset model only to the outcomes we need for the optimization. if self.options.get(Keys.SUBSET_MODEL, True): model, objective_weights, outcome_constraints, _ = subset_model( self.surrogate.model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) else: model = self.surrogate.model objective = get_botorch_objective( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, use_scalarized_objective=issubclass(botorch_acqf_class, AnalyticAcquisitionFunction), ) # NOTE: Computing model dependencies might be handled entirely on # BoTorch side. model_deps = self.compute_model_dependencies( surrogate=surrogate, bounds=bounds, objective_weights=objective_weights, pending_observations=pending_observations, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, fixed_features=fixed_features, target_fidelities=target_fidelities, options=self.options, ) data_deps = self.compute_data_dependencies( training_data=self.surrogate.training_data) # pyre-ignore[28]: Some kwargs are not expected in base `Model` # but are expected in its subclasses. self.acqf = botorch_acqf_class( model=model, objective=objective, X_pending=X_pending, X_baseline=X_observed, **self.options, **model_deps, **data_deps, )
def gen( self, n: int, bounds: List, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata]: """ Generate new candidates. Args: n: Number of candidates to generate. bounds: A list of (lower, upper) tuples for each column of X. objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. linear_constraints: A tuple of (A, b). For k linear constraints on d-dimensional x, A is (k x d) and b is (k x 1) such that A x <= b. fixed_features: A map {feature_index: value} for features that should be fixed to a particular value during generation. pending_observations: A list of m (k_i x d) feature tensors X for m outcomes and k_i pending observations for outcome i. model_gen_options: A config dictionary that can contain model-specific options. rounding_func: A function that rounds an optimization result appropriately (i.e., according to `round-trip` transformations). target_fidelities: A map {feature_index: value} of fidelity feature column indices to their respective target fidelities. Used for multi-fidelity optimization. Returns: 3-element tuple containing - (n x d) tensor of generated points. - n-tensor of weights for each point. - Dictionary of model-specific metadata for the given generation candidates. """ options = model_gen_options or {} acf_options = options.get("acquisition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = self.model # subset model only to the outcomes we need for the optimization if options.get("subset_model", True): model, objective_weights, outcome_constraints = subset_model( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) objective = _get_objective( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, ) # get the acquisition function n_fantasies = acf_options.get("num_fantasies", 64) qmc = acf_options.get("qmc", True) seed_inner = acf_options.get("seed_inner", None) num_restarts = optimizer_options.get("num_restarts", 40) raw_samples = optimizer_options.get("raw_samples", 1024) inequality_constraints = _to_inequality_constraints(linear_constraints) # TODO: update optimizers to handle inequality_constraints if inequality_constraints is not None: raise UnsupportedError( "Inequality constraints are not yet supported for KnowledgeGradient!" ) # get current value best_point_acqf, non_fixed_idcs = self._get_best_point_acqf( objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, # pyre-ignore: [6] seed_inner=seed_inner, fixed_features=fixed_features, target_fidelities=target_fidelities, qmc=qmc, ) # solution from previous iteration recommended_point = self.best_point( bounds=bounds, objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, fixed_features=fixed_features, model_gen_options=model_gen_options, target_fidelities=target_fidelities, ) recommended_point = recommended_point.detach().unsqueeze( 0) # pyre-ignore: [16] # Extract acquisition value (TODO: Make this less painful and repetitive) if non_fixed_idcs is not None: recommended_point = recommended_point[..., non_fixed_idcs] current_value = best_point_acqf(recommended_point).max() acq_function = _instantiate_KG( model=model, # pyre-ignore [6] objective=objective, qmc=qmc, n_fantasies=n_fantasies, num_trace_observations=options.get("num_trace_observations", 0), mc_samples=acf_options.get("mc_samples", 256), seed_inner=seed_inner, seed_outer=acf_options.get("seed_outer", None), X_pending=X_pending, target_fidelities=target_fidelities, fidelity_weights=options.get("fidelity_weights"), current_value=current_value, cost_intercept=self.cost_intercept, ) # optimize and get new points bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) batch_initial_conditions = gen_one_shot_kg_initial_conditions( acq_function=acq_function, bounds=bounds_, q=n, num_restarts=num_restarts, raw_samples=raw_samples, options={ "frac_random": optimizer_options.get("frac_random", 0.1), "num_inner_restarts": num_restarts, "raw_inner_samples": raw_samples, }, ) botorch_rounding_func = get_rounding_func(rounding_func) candidates, _ = optimize_acqf( acq_function=acq_function, bounds=bounds_, q=n, inequality_constraints=inequality_constraints, fixed_features=fixed_features, post_processing_func=botorch_rounding_func, num_restarts=num_restarts, raw_samples=raw_samples, options={ "batch_limit": optimizer_options.get("batch_limit", 8), "maxiter": optimizer_options.get("maxiter", 200), "method": "L-BFGS-B", "nonnegative": optimizer_options.get("nonnegative", False), }, batch_initial_conditions=batch_initial_conditions, ) new_x = candidates.detach().cpu() return new_x, torch.ones(n, dtype=self.dtype), {}
def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, ) -> Tuple[Tensor, Tensor]: """Generate new candidates. An initialized acquisition function can be passed in as model_gen_options["acquisition_function"]. Args: n: Number of candidates to generate. bounds: A list of (lower, upper) tuples for each column of X. objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. (Not used by single task models) linear_constraints: A tuple of (A, b). For k linear constraints on d-dimensional x, A is (k x d) and b is (k x 1) such that A x <= b. fixed_features: A map {feature_index: value} for features that should be fixed to a particular value during generation. pending_observations: A list of m (k_i x d) feature tensors X for m outcomes and k_i pending observations for outcome i. model_gen_options: A config dictionary that can contain model-specific options. rounding_func: A function that rounds an optimization result appropriately (i.e., according to `round-trip` transformations). Returns: Tensor: `n x d`-dim Tensor of generated points. Tensor: `n`-dim Tensor of weights for each point. """ options = model_gen_options or {} acf_options = options.get("acqiusition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=self.model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) if linear_constraints is not None: A, b = linear_constraints inequality_constraints = [] k, d = A.shape for i in range(k): indicies = A[i, :].nonzero().squeeze() coefficients = -A[i, indicies] rhs = -b[i, 0] inequality_constraints.append((indicies, coefficients, rhs)) else: inequality_constraints = None candidates = self.acqf_optimizer( # pyre-ignore: [28] acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=inequality_constraints, fixed_features=fixed_features, rounding_func=rounding_func, **optimizer_options, ) return candidates.detach().cpu(), torch.ones(n, dtype=self.dtype)
def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]: options = model_gen_options or {} acf_options = options.get(Keys.ACQF_KWARGS, {}) optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {}) if target_fidelities: raise NotImplementedError( "target_fidelities not implemented for base BotorchModel" ) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = self.model # subset model only to the outcomes we need for the optimization if options.get(Keys.SUBSET_MODEL, True): model, objective_weights, outcome_constraints = subset_model( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) botorch_rounding_func = get_rounding_func(rounding_func) # pyre-ignore: [28] candidates, expected_acquisition_value = self.acqf_optimizer( acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints ), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) return ( candidates.detach().cpu(), torch.ones(n, dtype=self.dtype), {"expected_acquisition_value": expected_acquisition_value.tolist()}, None, )
def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]: options = model_gen_options or {} acf_options = options.get(Keys.ACQF_KWARGS, {}) optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {}) if target_fidelities: raise NotImplementedError( "target_fidelities not implemented for base BotorchModel") X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = self.model # subset model only to the outcomes we need for the optimization 357 if options.get(Keys.SUBSET_MODEL, True): subset_model_results = subset_model( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) model = subset_model_results.model objective_weights = subset_model_results.objective_weights outcome_constraints = subset_model_results.outcome_constraints bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) botorch_rounding_func = get_rounding_func(rounding_func) # The following logic is to work around the limitation of PyTorch's Sobol # sampler to <1111 dimensions. # TODO: Remove once https://github.com/pytorch/pytorch/issues/41489 is resolved. from botorch.exceptions.errors import UnsupportedError def make_and_optimize_acqf( override_qmc: bool = False) -> Tuple[Tensor, Tensor]: add_kwargs = {"qmc": False} if override_qmc else {} acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, **add_kwargs, ) acquisition_function = checked_cast(AcquisitionFunction, acquisition_function) # pyre-ignore: [28] candidates, expected_acquisition_value = self.acqf_optimizer( acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) return candidates, expected_acquisition_value try: candidates, expected_acquisition_value = make_and_optimize_acqf() except UnsupportedError as e: if "SobolQMCSampler only supports dimensions q * o <= 1111" in str( e): # dimension too large for Sobol, let's use IID candidates, expected_acquisition_value = make_and_optimize_acqf( override_qmc=True) else: raise e return ( candidates.detach().cpu(), torch.ones(n, dtype=self.dtype), { "expected_acquisition_value": expected_acquisition_value.tolist() }, None, )
def gen( self, n: int, bounds: List, objective_weights: Tensor, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, List[TCandidateMetadata]]: if linear_constraints is not None or outcome_constraints is not None: raise UnsupportedError( "Constraints are not yet supported by max-value entropy search!" ) if len(objective_weights) > 1: raise UnsupportedError( "Models with multiple outcomes are not yet supported by MES!" ) options = model_gen_options or {} acf_options = options.get("acquisition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = self.model # subset model only to the outcomes we need for the optimization if options.get("subset_model", True): model, objective_weights, outcome_constraints, _ = subset_model( model=model, # pyre-ignore [6] objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) # get the acquisition function num_fantasies = acf_options.get("num_fantasies", 16) num_mv_samples = acf_options.get("num_mv_samples", 10) num_y_samples = acf_options.get("num_y_samples", 128) candidate_size = acf_options.get("candidate_size", 1000) num_restarts = optimizer_options.get("num_restarts", 40) raw_samples = optimizer_options.get("raw_samples", 1024) # generate the discrete points in the design space to sample max values bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) candidate_set = torch.rand(candidate_size, bounds_.size(1)) candidate_set = bounds_[0] + (bounds_[1] - bounds_[0]) * candidate_set acq_function = _instantiate_MES( model=model, # pyre-ignore [6] candidate_set=candidate_set, num_fantasies=num_fantasies, num_trace_observations=options.get("num_trace_observations", 0), num_mv_samples=num_mv_samples, num_y_samples=num_y_samples, X_pending=X_pending, maximize=True if objective_weights[0] == 1 else False, target_fidelities=target_fidelities, fidelity_weights=options.get("fidelity_weights"), cost_intercept=self.cost_intercept, ) # optimize and get new points botorch_rounding_func = get_rounding_func(rounding_func) candidates, _ = optimize_acqf( acq_function=acq_function, bounds=bounds_, q=n, inequality_constraints=None, fixed_features=fixed_features, post_processing_func=botorch_rounding_func, num_restarts=num_restarts, raw_samples=raw_samples, options={ "batch_limit": optimizer_options.get("batch_limit", 8), "maxiter": optimizer_options.get("maxiter", 200), "method": "L-BFGS-B", "nonnegative": optimizer_options.get("nonnegative", False), }, sequential=True, ) new_x = candidates.detach().cpu() # pyre-fixme[7]: Expected `Tuple[Tensor, Tensor, Dict[str, typing.Any], # List[Optional[Dict[str, typing.Any]]]]` but got `Tuple[Tensor, typing.Any, # Dict[str, typing.Any], None]`. return new_x, torch.ones(n, dtype=self.dtype), {}, None
def __init__( self, surrogate: Surrogate, search_space_digest: SearchSpaceDigest, objective_weights: Tensor, botorch_acqf_class: Type[AcquisitionFunction], options: Optional[Dict[str, Any]] = None, pending_observations: Optional[List[Tensor]] = None, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, objective_thresholds: Optional[Tensor] = None, ) -> None: self.surrogate = surrogate self.options = options or {} X_pending, X_observed = _get_X_pending_and_observed( Xs=self.surrogate.training_data.Xs, objective_weights=objective_weights, bounds=search_space_digest.bounds, pending_observations=pending_observations, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, fixed_features=fixed_features, ) # Store objective thresholds for all outcomes (including non-objectives). self._objective_thresholds = objective_thresholds self._full_objective_weights = objective_weights full_outcome_constraints = outcome_constraints # Subset model only to the outcomes we need for the optimization. if self.options.get(Keys.SUBSET_MODEL, True): subset_model_results = subset_model( model=self.surrogate.model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, ) model = subset_model_results.model objective_weights = subset_model_results.objective_weights outcome_constraints = subset_model_results.outcome_constraints objective_thresholds = subset_model_results.objective_thresholds subset_idcs = subset_model_results.indices else: model = self.surrogate.model subset_idcs = None # If objective weights suggest multiple objectives but objective # thresholds are not specified, infer them using the model that # has already been subset to avoid re-subsetting it within # `inter_objective_thresholds`. if (objective_weights.nonzero().numel() > 1 and self._objective_thresholds is None): self._objective_thresholds = infer_objective_thresholds( model=model, objective_weights=self._full_objective_weights, outcome_constraints=full_outcome_constraints, X_observed=X_observed, subset_idcs=subset_idcs, ) objective_thresholds = ( not_none(self._objective_thresholds)[subset_idcs] if subset_idcs is not None else self._objective_thresholds) objective, posterior_transform = self.get_botorch_objective_and_transform( botorch_acqf_class=botorch_acqf_class, model=model, objective_weights=objective_weights, objective_thresholds=objective_thresholds, outcome_constraints=outcome_constraints, X_observed=X_observed, ) model_deps = self.compute_model_dependencies( surrogate=surrogate, search_space_digest=search_space_digest, objective_weights=objective_weights, pending_observations=pending_observations, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, fixed_features=fixed_features, options=self.options, ) input_constructor_kwargs = { "X_baseline": X_observed, "X_pending": X_pending, "objective_thresholds": objective_thresholds, "outcome_constraints": outcome_constraints, **model_deps, **self.options, } input_constructor = get_acqf_input_constructor(botorch_acqf_class) acqf_inputs = input_constructor( model=model, training_data=self.surrogate.training_data, objective=objective, posterior_transform=posterior_transform, **input_constructor_kwargs, ) self.acqf = botorch_acqf_class(**acqf_inputs) # pyre-ignore [45]
def infer_objective_thresholds( model: Model, objective_weights: Tensor, # objective_directions bounds: Optional[List[Tuple[float, float]]] = None, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, subset_idcs: Optional[Tensor] = None, Xs: Optional[List[Tensor]] = None, X_observed: Optional[Tensor] = None, ) -> Tensor: """Infer objective thresholds. This method uses the model-estimated Pareto frontier over the in-sample points to infer absolute (not relativized) objective thresholds. This uses a heuristic that sets the objective threshold to be a scaled nadir point, where the nadir point is scaled back based on the range of each objective across the current in-sample Pareto frontier. See `botorch.utils.multi_objective.hypervolume.infer_reference_point` for details on the heuristic. Args: model: A fitted botorch Model. objective_weights: The objective is to maximize a weighted sum of the columns of f(x). These are the weights. These should not be subsetted. bounds: A list of (lower, upper) tuples for each column of X. outcome_constraints: A tuple of (A, b). For k outcome constraints and m outputs at f(x), A is (k x m) and b is (k x 1) such that A f(x) <= b. These should not be subsetted. linear_constraints: A tuple of (A, b). For k linear constraints on d-dimensional x, A is (k x d) and b is (k x 1) such that A x <= b. fixed_features: A map {feature_index: value} for features that should be fixed to a particular value during generation. subset_idcs: The indices of the outcomes that are modeled by the provided model. If subset_idcs not None, this method infers whether the model is subsetted. Xs: A list of m (k_i x d) feature tensors X. Number of rows k_i can vary from i=1,...,m. X_observed: A `n x d`-dim tensor of in-sample points to use for determining the current in-sample Pareto frontier. Returns: A `m`-dim tensor of objective thresholds, where the objective threshold is `nan` if the outcome is not an objective. """ if X_observed is None: if bounds is None: raise ValueError("bounds is required if X_observed is None.") elif Xs is None: raise ValueError("Xs is required if X_observed is None.") _, X_observed = _get_X_pending_and_observed( Xs=Xs, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) num_outcomes = objective_weights.shape[0] if subset_idcs is None: # check if only a subset of outcomes are modeled nonzero = objective_weights != 0 if outcome_constraints is not None: A, _ = outcome_constraints nonzero = nonzero | torch.any(A != 0, dim=0) expected_subset_idcs = nonzero.nonzero().view(-1) # pyre-ignore [16] if model.num_outputs > expected_subset_idcs.numel(): # subset the model so that we only compute the posterior # over the relevant outcomes subset_model_results = subset_model( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) model = subset_model_results.model objective_weights = subset_model_results.objective_weights outcome_constraints = subset_model_results.outcome_constraints subset_idcs = subset_model_results.indices else: # model is already subsetted. subset_idcs = expected_subset_idcs # subset objective weights and outcome constraints objective_weights = objective_weights[subset_idcs] if outcome_constraints is not None: outcome_constraints = ( outcome_constraints[0][:, subset_idcs], outcome_constraints[1], ) else: objective_weights = objective_weights[subset_idcs] if outcome_constraints is not None: outcome_constraints = ( outcome_constraints[0][:, subset_idcs], outcome_constraints[1], ) with torch.no_grad(): pred = not_none(model).posterior(not_none(X_observed)).mean if outcome_constraints is not None: cons_tfs = get_outcome_constraint_transforms(outcome_constraints) # pyre-ignore [16] feas = torch.stack([c(pred) <= 0 for c in cons_tfs], dim=-1).all(dim=-1) pred = pred[feas] if pred.shape[0] == 0: raise AxError("There are no feasible observed points.") obj_mask = objective_weights.nonzero().view(-1) obj_weights_subset = objective_weights[obj_mask] obj = pred[..., obj_mask] * obj_weights_subset pareto_obj = obj[is_non_dominated(obj)] objective_thresholds = infer_reference_point( pareto_Y=pareto_obj, scale=0.1, ) # multiply by objective weights to return objective thresholds in the # unweighted space objective_thresholds = objective_thresholds * obj_weights_subset full_objective_thresholds = torch.full( (num_outcomes, ), float("nan"), dtype=objective_weights.dtype, device=objective_weights.device, ) full_objective_thresholds[subset_idcs] = objective_thresholds.clone() return full_objective_thresholds
def __init__( self, surrogate: Surrogate, bounds: List[Tuple[float, float]], objective_weights: Tensor, botorch_acqf_class: Optional[Type[AcquisitionFunction]] = None, options: Optional[Dict[str, Any]] = None, pending_observations: Optional[List[Tensor]] = None, outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> None: if not botorch_acqf_class and not self.default_botorch_acqf_class: raise ValueError( f"Acquisition class {self.__class__} does not specify a default " "BoTorch `AcquisitionFunction`, so `botorch_acqf_class` " "argument must be specified." ) self._botorch_acqf_class = not_none( botorch_acqf_class or self.default_botorch_acqf_class ) self.surrogate = surrogate self.options = options or {} trd = self._extract_training_data(surrogate=surrogate) Xs = ( # Assumes 1-D objective_weights, which should be safe. [trd.X for o in range(objective_weights.shape[0])] if isinstance(trd, TrainingData) else [i.X for i in trd.values()] ) X_pending, X_observed = _get_X_pending_and_observed( Xs=Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) # Subset model only to the outcomes we need for the optimization. if self.options.get(Keys.SUBSET_MODEL, True): model, objective_weights, outcome_constraints, _ = subset_model( self.surrogate.model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, ) else: model = self.surrogate.model objective = self._get_botorch_objective( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, ) model_deps = self.compute_model_dependencies( surrogate=surrogate, bounds=bounds, objective_weights=objective_weights, pending_observations=pending_observations, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, fixed_features=fixed_features, target_fidelities=target_fidelities, options=self.options, ) X_baseline = X_observed overriden_X_baseline = model_deps.get(Keys.X_BASELINE) if overriden_X_baseline is not None: X_baseline = overriden_X_baseline model_deps.pop(Keys.X_BASELINE) self.acqf = self._botorch_acqf_class( # pyre-ignore[28]: Some kwargs are # not expected in base `AcquisitionFunction` but are expected in # its subclasses. model=model, objective=objective, X_pending=X_pending, X_baseline=X_baseline, **self.options, **model_deps, )
def gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: Tensor, # objective_directions outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None, objective_thresholds: Optional[Tensor] = None, linear_constraints: Optional[Tuple[Tensor, Tensor]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[Tensor]] = None, model_gen_options: Optional[TConfig] = None, rounding_func: Optional[Callable[[Tensor], Tensor]] = None, target_fidelities: Optional[Dict[int, float]] = None, ) -> Tuple[Tensor, Tensor, TGenMetadata, Optional[List[TCandidateMetadata]]]: options = model_gen_options or {} acf_options = options.get("acquisition_function_kwargs", {}) optimizer_options = options.get("optimizer_kwargs", {}) if target_fidelities: raise NotImplementedError( "target_fidelities not implemented for base BotorchModel" ) if ( objective_thresholds is not None and objective_weights.shape[0] != objective_thresholds.shape[0] ): raise AxError( "Objective weights and thresholds most both contain an element for" " each modeled metric." ) X_pending, X_observed = _get_X_pending_and_observed( Xs=self.Xs, pending_observations=pending_observations, objective_weights=objective_weights, outcome_constraints=outcome_constraints, bounds=bounds, linear_constraints=linear_constraints, fixed_features=fixed_features, ) model = not_none(self.model) full_objective_thresholds = objective_thresholds full_objective_weights = objective_weights full_outcome_constraints = outcome_constraints # subset model only to the outcomes we need for the optimization if options.get(Keys.SUBSET_MODEL, True): subset_model_results = subset_model( model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, ) model = subset_model_results.model objective_weights = subset_model_results.objective_weights outcome_constraints = subset_model_results.outcome_constraints objective_thresholds = subset_model_results.objective_thresholds idcs = subset_model_results.indices else: idcs = None if objective_thresholds is None: full_objective_thresholds = infer_objective_thresholds( model=model, X_observed=not_none(X_observed), objective_weights=full_objective_weights, outcome_constraints=full_outcome_constraints, subset_idcs=idcs, ) # subset the objective thresholds objective_thresholds = ( full_objective_thresholds if idcs is None else full_objective_thresholds[idcs].clone() ) bounds_ = torch.tensor(bounds, dtype=self.dtype, device=self.device) bounds_ = bounds_.transpose(0, 1) botorch_rounding_func = get_rounding_func(rounding_func) if acf_options.get("random_scalarization", False) or acf_options.get( "chebyshev_scalarization", False ): # If using a list of acquisition functions, the algorithm to generate # that list is configured by acquisition_function_kwargs. objective_weights_list = [ randomize_objective_weights(objective_weights, **acf_options) for _ in range(n) ] acquisition_function_list = [ self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) for objective_weights in objective_weights_list ] acquisition_function_list = [ checked_cast(AcquisitionFunction, acq_function) for acq_function in acquisition_function_list ] # Multiple acquisition functions require a sequential optimizer # always use scipy_optimizer_list. # TODO(jej): Allow any optimizer. candidates, expected_acquisition_value = scipy_optimizer_list( acq_function_list=acquisition_function_list, bounds=bounds_, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints ), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) else: acquisition_function = self.acqf_constructor( # pyre-ignore: [28] model=model, objective_weights=objective_weights, objective_thresholds=objective_thresholds, outcome_constraints=outcome_constraints, X_observed=X_observed, X_pending=X_pending, **acf_options, ) acquisition_function = checked_cast( AcquisitionFunction, acquisition_function ) # pyre-ignore: [28] candidates, expected_acquisition_value = self.acqf_optimizer( acq_function=checked_cast(AcquisitionFunction, acquisition_function), bounds=bounds_, n=n, inequality_constraints=_to_inequality_constraints( linear_constraints=linear_constraints ), fixed_features=fixed_features, rounding_func=botorch_rounding_func, **optimizer_options, ) gen_metadata = { "expected_acquisition_value": expected_acquisition_value.tolist(), "objective_thresholds": not_none(full_objective_thresholds).cpu(), "objective_weights": full_objective_weights.cpu(), } return ( candidates.detach().cpu(), torch.ones(n, dtype=self.dtype), gen_metadata, None, )