def _model_best_point( self, bounds: List[Tuple[float, float]], objective_weights: np.ndarray, outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]], linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]], fixed_features: Optional[Dict[int, float]], model_gen_options: Optional[TConfig], target_fidelities: Optional[Dict[int, float]], ) -> Optional[np.ndarray]: # pragma: no cover if not self.model: # pragma: no cover raise ValueError(FIT_MODEL_ERROR.format(action="_model_gen")) obj_w, oc_c, l_c, _ = validate_and_apply_final_transform( objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, pending_observations=None, final_transform=self._array_to_tensor, ) try: # pyre-fixme[16]: `Optional` has no attribute `best_point`. X = self.model.best_point( bounds=bounds, objective_weights=obj_w, outcome_constraints=oc_c, linear_constraints=l_c, fixed_features=fixed_features, model_gen_options=model_gen_options, target_fidelities=target_fidelities, ) return None if X is None else X.detach().cpu().clone().numpy() except NotImplementedError: return None
def infer_objective_thresholds( self, search_space: Optional[SearchSpace] = None, optimization_config: Optional[OptimizationConfig] = None, fixed_features: Optional[ObservationFeatures] = None, ) -> List[ObjectiveThreshold]: """Infer objective thresholds. This method is only applicable for Multi-Objective optimization problems. This method uses the model-estimated Pareto frontier over the in-sample points to infer absolute (not relativized) objective thresholds. This uses a heuristic that sets the objective threshold to be a scaled nadir point, where the nadir point is scaled back based on the range of each objective across the current in-sample Pareto frontier. """ assert ( self.is_moo_problem ), "Objective thresholds are only supported for multi-objective optimization." search_space = (search_space or self._model_space).clone() base_gen_args = self._get_transformed_gen_args( search_space=search_space, optimization_config=optimization_config, fixed_features=fixed_features, ) # Get transformed args from ArrayModelbridge. array_model_gen_args = self._get_transformed_model_gen_args( search_space=base_gen_args.search_space, fixed_features=base_gen_args.fixed_features, pending_observations={}, optimization_config=base_gen_args.optimization_config, ) # Get transformed args from TorchModelbridge. obj_w, oc_c, l_c, pend_obs, _ = validate_and_apply_final_transform( objective_weights=array_model_gen_args.objective_weights, outcome_constraints=array_model_gen_args.outcome_constraints, pending_observations=None, linear_constraints=array_model_gen_args.linear_constraints, final_transform=self._array_to_tensor, ) # Infer objective thresholds. model = checked_cast(MultiObjectiveBotorchModel, self.model) obj_thresholds_arr = infer_objective_thresholds( model=not_none(model.model), objective_weights=obj_w, bounds=array_model_gen_args.search_space_digest.bounds, outcome_constraints=oc_c, linear_constraints=l_c, fixed_features=array_model_gen_args.fixed_features, Xs=model.Xs, ) return self._untransform_objective_thresholds( objective_thresholds=obj_thresholds_arr, objective_weights=obj_w, bounds=array_model_gen_args.search_space_digest.bounds, fixed_features=array_model_gen_args.fixed_features, )
def _model_gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: np.ndarray, outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]], linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]], fixed_features: Optional[Dict[int, float]], pending_observations: Optional[List[np.ndarray]], model_gen_options: Optional[TConfig], rounding_func: Callable[[np.ndarray], np.ndarray], target_fidelities: Optional[Dict[int, float]], objective_thresholds: Optional[np.ndarray] = None, ) -> Tuple[np.ndarray, np.ndarray, TGenMetadata, List[TCandidateMetadata]]: if not self.model: # pragma: no cover raise ValueError(FIT_MODEL_ERROR.format(action="_model_gen")) ( obj_w, oc_c, l_c, pend_obs, obj_t, ) = validate_and_apply_final_transform( objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, pending_observations=pending_observations, objective_thresholds=objective_thresholds, final_transform=self._array_to_tensor, ) tensor_rounding_func = self._array_callable_to_tensor_callable( rounding_func) augmented_model_gen_options = { **self._default_model_gen_options, **(model_gen_options or {}), } # pyre-fixme[16]: `Optional` has no attribute `gen`. X, w, gen_metadata, candidate_metadata = self.model.gen( n=n, bounds=bounds, objective_weights=obj_w, outcome_constraints=oc_c, objective_thresholds=obj_t, linear_constraints=l_c, fixed_features=fixed_features, pending_observations=pend_obs, model_gen_options=augmented_model_gen_options, rounding_func=tensor_rounding_func, target_fidelities=target_fidelities, ) return ( X.detach().cpu().clone().numpy(), w.detach().cpu().clone().numpy(), gen_metadata, candidate_metadata, )
def _model_evaluate_acquisition_function( self, X: np.ndarray, search_space_digest: SearchSpaceDigest, objective_weights: np.ndarray, objective_thresholds: Optional[np.ndarray] = None, outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None, linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]] = None, fixed_features: Optional[Dict[int, float]] = None, pending_observations: Optional[List[np.ndarray]] = None, acq_options: Optional[Dict[str, Any]] = None, ) -> np.ndarray: if not self.model: # pragma: no cover raise ValueError( FIT_MODEL_ERROR.format(action="_model_evaluate_acquisition_function") ) obj_w, oc_c, l_c, pend_obs, obj_thresh = validate_and_apply_final_transform( objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, pending_observations=pending_observations, objective_thresholds=objective_thresholds, final_transform=self._array_to_tensor, ) evals = not_none(self.model).evaluate_acquisition_function( X=self._array_to_tensor(X), search_space_digest=search_space_digest, objective_weights=obj_w, objective_thresholds=obj_thresh, outcome_constraints=oc_c, linear_constraints=l_c, fixed_features=fixed_features, pending_observations=pend_obs, acq_options=acq_options, ) return evals.detach().cpu().clone().numpy()
def _pareto_frontier( self, objective_thresholds: Optional[TRefPoint] = None, observation_features: Optional[List[ObservationFeatures]] = None, observation_data: Optional[List[ObservationData]] = None, optimization_config: Optional[MultiObjectiveOptimizationConfig] = None, ) -> List[ObservationData]: # TODO(jej): This method should be refactored to move tensor # conversions into a separate utility, and eventually should be # moved into base.py. # The reason this method is currently implemented in array.py is to # allow the broadest possible set of models to call frontier and # hypervolume evaluation functions given the current API. X = (self.transform_observation_features(observation_features) if observation_features else None) X = self._array_to_tensor(X) if X is not None else None Y, Yvar = (None, None) if observation_data: Y, Yvar = self.transform_observation_data(observation_data) if Y is not None and Yvar is not None: Y, Yvar = (self._array_to_tensor(Y), self._array_to_tensor(Yvar)) # Optimization_config mooc = optimization_config or checked_cast_optional( MultiObjectiveOptimizationConfig, self._optimization_config) if not mooc: raise ValueError( ("experiment must have an existing optimization_config " "of type MultiObjectiveOptimizationConfig " "or `optimization_config` must be passed as an argument.")) if not isinstance(mooc, MultiObjectiveOptimizationConfig): mooc = not_none( MultiObjectiveOptimizationConfig.from_opt_conf(mooc)) if objective_thresholds: mooc = mooc.clone_with_args( objective_thresholds=objective_thresholds) optimization_config = mooc # Transform OptimizationConfig. optimization_config = self.transform_optimization_config( optimization_config=optimization_config, fixed_features=ObservationFeatures(parameters={}), ) # Extract weights, constraints, and objective_thresholds objective_weights = extract_objective_weights( objective=optimization_config.objective, outcomes=self.outcomes) outcome_constraints = extract_outcome_constraints( outcome_constraints=optimization_config.outcome_constraints, outcomes=self.outcomes, ) objective_thresholds_arr = extract_objective_thresholds( objective_thresholds=optimization_config.objective_thresholds, outcomes=self.outcomes, ) # Transform to tensors. obj_w, oc_c, _, _ = validate_and_apply_final_transform( objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=None, pending_observations=None, final_transform=self._array_to_tensor, ) obj_t = self._array_to_tensor(objective_thresholds_arr) frontier_evaluator = self._get_frontier_evaluator() # pyre-ignore[28]: Unexpected keyword `model` to anonymous call f, cov = frontier_evaluator( model=self.model, X=X, Y=Y, Yvar=Yvar, objective_thresholds=obj_t, objective_weights=obj_w, outcome_constraints=oc_c, ) f, cov = f.detach().cpu().clone().numpy(), cov.detach().cpu().clone( ).numpy() frontier_observation_data = array_to_observation_data( f=f, cov=cov, outcomes=not_none(self.outcomes)) # Untransform observations for t in reversed(self.transforms.values()): # noqa T484 frontier_observation_data = t.untransform_observation_data( frontier_observation_data, []) return frontier_observation_data
def infer_objective_thresholds( self, search_space: Optional[SearchSpace] = None, optimization_config: Optional[OptimizationConfig] = None, fixed_features: Optional[ObservationFeatures] = None, ) -> List[ObjectiveThreshold]: """Infer objective thresholds. This method uses the model-estimated Pareto frontier over the in-sample points to infer absolute (not relativized) objective thresholds. This uses a heuristic that sets the objective threshold to be a scaled nadir point, where the nadir point is scaled back based on the range of each objective across the current in-sample Pareto frontier. """ if search_space is None: search_space = self._model_space search_space = search_space.clone() base_gen_args = self._get_transformed_gen_args( search_space=search_space, optimization_config=optimization_config, fixed_features=fixed_features, ) # get transformed args from ArrayModelbridge array_model_gen_args = self._get_transformed_model_gen_args( search_space=base_gen_args.search_space, fixed_features=base_gen_args.fixed_features, pending_observations={}, optimization_config=base_gen_args.optimization_config, ) # get transformed args from TorchModelbridge obj_w, oc_c, l_c, pend_obs, _ = validate_and_apply_final_transform( objective_weights=array_model_gen_args.objective_weights, outcome_constraints=array_model_gen_args.outcome_constraints, pending_observations=None, linear_constraints=array_model_gen_args.linear_constraints, final_transform=self._array_to_tensor, ) # infer objective thresholds model = not_none(self.model) try: torch_model = model.model # pyre-ignore [16] Xs = model.Xs # pyre-ignore [16] except AttributeError: raise AxError( "infer_objective_thresholds requires a TorchModel with model " "and Xs attributes.") obj_thresholds_arr = infer_objective_thresholds( model=torch_model, objective_weights=obj_w, bounds=array_model_gen_args.search_space_digest.bounds, outcome_constraints=oc_c, linear_constraints=l_c, fixed_features=array_model_gen_args.fixed_features, Xs=Xs, ) return self.untransform_objective_thresholds( objective_thresholds=obj_thresholds_arr, objective_weights=obj_w, bounds=array_model_gen_args.search_space_digest.bounds, fixed_features=array_model_gen_args.fixed_features, )
def _model_gen( self, n: int, bounds: List[Tuple[float, float]], objective_weights: np.ndarray, outcome_constraints: Optional[Tuple[np.ndarray, np.ndarray]], linear_constraints: Optional[Tuple[np.ndarray, np.ndarray]], fixed_features: Optional[Dict[int, float]], pending_observations: Optional[List[np.ndarray]], model_gen_options: Optional[TConfig], rounding_func: Callable[[np.ndarray], np.ndarray], target_fidelities: Optional[Dict[int, float]], objective_thresholds: Optional[np.ndarray] = None, ) -> Tuple[np.ndarray, np.ndarray, TGenMetadata, List[TCandidateMetadata]]: if not self.model: # pragma: no cover raise ValueError(FIT_MODEL_ERROR.format(action="_model_gen")) obj_w, oc_c, l_c, pend_obs, obj_t = validate_and_apply_final_transform( objective_weights=objective_weights, outcome_constraints=outcome_constraints, linear_constraints=linear_constraints, pending_observations=pending_observations, objective_thresholds=objective_thresholds, final_transform=self._array_to_tensor, ) tensor_rounding_func = self._array_callable_to_tensor_callable(rounding_func) augmented_model_gen_options = { **self._default_model_gen_options, **(model_gen_options or {}), } # TODO(ehotaj): For some reason, we're getting models which do not support MOO # even when optimization_config has multiple objectives, so we can't use # self.is_moo_problem here. is_moo_problem = self.is_moo_problem and isinstance( self.model, (BoTorchModel, MultiObjectiveBotorchModel) ) extra_kwargs = {"objective_thresholds": obj_t} if is_moo_problem else {} X, w, gen_metadata, candidate_metadata = not_none(self.model).gen( n=n, bounds=bounds, objective_weights=obj_w, outcome_constraints=oc_c, linear_constraints=l_c, fixed_features=fixed_features, pending_observations=pend_obs, model_gen_options=augmented_model_gen_options, rounding_func=tensor_rounding_func, target_fidelities=target_fidelities, **extra_kwargs ) if is_moo_problem: # If objective_thresholds are supplied by the user, then the transformed # user-specified objective thresholds are in gen_metadata. Otherwise, # inferred objective thresholds are in gen_metadata. gen_metadata[ "objective_thresholds" ] = self._untransform_objective_thresholds( objective_thresholds=gen_metadata["objective_thresholds"], objective_weights=obj_w, bounds=bounds, fixed_features=fixed_features, ) return ( X.detach().cpu().clone().numpy(), w.detach().cpu().clone().numpy(), gen_metadata, candidate_metadata, )