class MESMO_Optimizer2(AcquisitionFunctionMaximizer): """Implements Scipy optimizer for MESMO. Only on continuous dims Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional """ def __init__(self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rng: Union[bool, np.random.RandomState] = None, num_mc=1000, num_opt=1000, rand_prob=0.0): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) self.num_mc = num_mc self.num_opt = num_opt self.minimizer = scipy.optimize.minimize def maximize( self, runhistory: HistoryContainer, num_points: int, # todo useless **kwargs) -> Iterable[Configuration]: """Maximize acquisition function using ``_maximize``. Parameters ---------- runhistory: ~openbox.utils.history_container.HistoryContainer runhistory object num_points: int number of points to be sampled **kwargs passed to acquisition function Returns ------- Iterable[Configuration] to be concrete: ~openbox.ei_optimization.ChallengerList """ def inverse_acquisition(x): # shape of x = (d,) return -self.acquisition_function(x, convert=False)[0] # shape=(1,) d = len(self.config_space.get_hyperparameters()) bound = (0.0, 1.0) # todo only on continuous dims (int, float) now bounds = [bound] * d acq_configs = [] # MC x_tries = self.rng.uniform(bound[0], bound[1], size=(self.num_mc, d)) acq_tries = self.acquisition_function(x_tries, convert=False) for i in range(x_tries.shape[0]): # convert array to Configuration config = Configuration(self.config_space, vector=x_tries[i]) config.origin = 'Random Search' acq_configs.append((acq_tries[i], config)) # L-BFGS-B x_seed = self.rng.uniform(low=bound[0], high=bound[1], size=(self.num_opt, d)) for i in range(x_seed.shape[0]): x0 = x_seed[i].reshape(1, -1) result = self.minimizer(inverse_acquisition, x0=x0, method='L-BFGS-B', bounds=bounds) if not result.success: continue # convert array to Configuration config = Configuration(self.config_space, vector=result.x) config.origin = 'Scipy' acq_val = self.acquisition_function(result.x, convert=False) # [0] acq_configs.append((acq_val, config)) # shuffle for random tie-break self.rng.shuffle(acq_configs) # sort according to acq value acq_configs.sort(reverse=True, key=lambda x: x[0]) configs = [_[1] for _ in acq_configs] challengers = ChallengerList(configs, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class USeMO_Optimizer(AcquisitionFunctionMaximizer): """Implements USeMO optimizer Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional """ def __init__(self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rng: Union[bool, np.random.RandomState] = None, rand_prob=0.0): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) def maximize( self, runhistory: HistoryContainer, num_points: int, # useless in USeMO **kwargs) -> Iterable[Configuration]: """Maximize acquisition function using ``_maximize``. Parameters ---------- runhistory: ~openbox.utils.history_container.HistoryContainer runhistory object num_points: int number of points to be sampled **kwargs passed to acquisition function Returns ------- Iterable[Configuration] to be concrete: ~openbox.ei_optimization.ChallengerList """ acq_vals = np.asarray(self.acquisition_function.uncertainties) candidates = np.asarray(self.acquisition_function.candidates) assert len(acq_vals.shape) == 1 and len(candidates.shape) == 2 \ and acq_vals.shape[0] == candidates.shape[0] acq_configs = [] for i in range(acq_vals.shape[0]): # convert array to Configuration todo config = Configuration(self.config_space, vector=candidates[i]) acq_configs.append((acq_vals[i], config)) # shuffle for random tie-break self.rng.shuffle(acq_configs) # sort according to acq value acq_configs.sort(reverse=True, key=lambda x: x[0]) configs = [_[1] for _ in acq_configs] challengers = ChallengerList(configs, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class ScipyGlobalOptimizer(AcquisitionFunctionMaximizer): """ Wraps scipy global optimizer. Only on continuous dims. Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional """ def __init__( self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rand_prob: float = 0.0, rng: Union[bool, np.random.RandomState] = None, ): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) types, bounds = get_types(self.config_space) assert all(types == 0) self.bounds = bounds def maximize(self, runhistory: HistoryContainer, initial_config=None, **kwargs) -> List[Tuple[float, Configuration]]: def negative_acquisition(x): # shape of x = (d,) return -self.acquisition_function(x, convert=False)[0] # shape=(1,) acq_configs = [] result = scipy.optimize.differential_evolution( func=negative_acquisition, bounds=self.bounds) if not result.success: self.logger.debug( 'Scipy differential evolution optimizer failed. Info:\n%s' % (result, )) try: config = Configuration(self.config_space, vector=result.x) acq = self.acquisition_function(result.x, convert=False) acq_configs.append((acq, config)) except Exception: pass if not acq_configs: # empty self.logger.warning( 'Scipy differential evolution optimizer failed. Return empty config list. Info:\n%s' % (result, )) challengers = ChallengerList([config for _, config in acq_configs], self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class StagedBatchScipyOptimizer(AcquisitionFunctionMaximizer): """ todo constraints Use batch scipy.optimize with start points chosen by specific method. Only on continuous dims. Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace num_random : Number of random chosen points num_restarts : The number of starting points for multistart acquisition function optimization raw_samples : The number of samples for initialization batch_limit : Number of points in a batch optimized jointly by scipy minimizer scipy_maxiter : Maximum number of scipy minimizer iterations to perform rand_prob : Probability of choosing random config rng : np.random.RandomState or int, optional """ def __init__( self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, num_random: int = 1000, num_restarts: int = 20, raw_samples: int = 1024, batch_limit: int = 5, scipy_maxiter: int = 200, rand_prob: float = 0.0, rng: Union[bool, np.random.RandomState] = None, ): super().__init__(acquisition_function, config_space, rng) self.num_random = num_random self.num_restarts = num_restarts self.raw_samples = raw_samples self.batch_limit = batch_limit self.scipy_max_iter = scipy_maxiter self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) self.minimizer = scipy.optimize.minimize self.method = "L-BFGS-B" self.dim = len(self.config_space.get_hyperparameters()) self.bound = (0.0, 1.0 ) # todo only on continuous dims (int, float) now def gen_initial_points(self, num_restarts, raw_samples): # todo other strategy random_points = self.rng.uniform(self.bound[0], self.bound[1], size=(raw_samples, self.dim)) acq_random = self.acquisition_function(random_points, convert=False).reshape(-1) idx = np.argsort(acq_random)[::-1][:num_restarts] return random_points[idx] def gen_batch_scipy_points(self, initial_points: np.ndarray): #count = 0 # todo remove def f(X_flattened): # nonlocal count # count += 1 X = X_flattened.reshape(shapeX) joint_acq = -self.acquisition_function(X, convert=False).sum().item() return joint_acq shapeX = initial_points.shape x0 = initial_points.reshape(-1) bounds = [self.bound] * x0.shape[0] result = self.minimizer( f, x0=x0, method=self.method, bounds=bounds, options=dict(maxiter=self.scipy_max_iter), ) #print('count=', count) # todo remove # return result.x even failed. may because 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT' # if not result.success: # self.logger.warning('Scipy minimizer %s failed in this round: %s.' % (self.method, result)) # return None #print(result.x.reshape(shapeX)) # todo remove return result.x.reshape(shapeX) def maximize( self, runhistory: HistoryContainer, num_points: int, # todo useless **kwargs) -> List[Tuple[float, Configuration]]: # print('start optimize') # todo remove # import time # t0 = time.time() acq_configs = [] # random points random_points = self.rng.uniform(self.bound[0], self.bound[1], size=(self.num_random, self.dim)) acq_random = self.acquisition_function(random_points, convert=False) for i in range(random_points.shape[0]): # convert array to Configuration config = Configuration(self.config_space, vector=random_points[i]) config.origin = 'Random Search' acq_configs.append((acq_random[i], config)) # scipy points initial_points = self.gen_initial_points( num_restarts=self.num_restarts, raw_samples=self.raw_samples) for start_idx in range(0, self.num_restarts, self.batch_limit): end_idx = min(start_idx + self.batch_limit, self.num_restarts) # optimize using random restart optimization scipy_points = self.gen_batch_scipy_points( initial_points[start_idx:end_idx]) if scipy_points is None: continue acq_scipy = self.acquisition_function(scipy_points, convert=False) for i in range(scipy_points.shape[0]): # convert array to Configuration config = Configuration(self.config_space, vector=scipy_points[i]) config.origin = 'Batch Scipy' acq_configs.append((acq_scipy[i], config)) # shuffle for random tie-break self.rng.shuffle(acq_configs) # sort according to acq value acq_configs.sort(reverse=True, key=lambda x: x[0]) configs = [_[1] for _ in acq_configs] challengers = ChallengerList(configs, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() # t1 = time.time() # todo remove # print('==time total=%.2f' % (t1-t0,)) # for x1 in np.linspace(0, 1, 20): # optimal_point = np.array([x1.item()] + [0.5] * (self.dim-1)) # print('optimal_point acq=', self.acquisition_function(optimal_point, convert=False)) # print('best point acq=', acq_configs[0]) # time.sleep(2) return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class RandomScipyOptimizer(AcquisitionFunctionMaximizer): """ Use scipy.optimize with start points chosen by random search. Only on continuous dims. Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional """ def __init__( self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rand_prob: float = 0.0, rng: Union[bool, np.random.RandomState] = None, ): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) self.random_search = InterleavedLocalAndRandomSearch( acquisition_function=acquisition_function, config_space=config_space, rng=rng) self.scipy_optimizer = ScipyOptimizer( acquisition_function=acquisition_function, config_space=config_space, rng=rng) def maximize(self, runhistory: HistoryContainer, num_points: int, num_trials=10, **kwargs) -> List[Tuple[float, Configuration]]: acq_configs = [] initial_configs = self.random_search.maximize(runhistory, num_points, **kwargs).challengers initial_acqs = self.acquisition_function(initial_configs) acq_configs.extend(zip(initial_acqs, initial_configs)) success_count = 0 for config in initial_configs[:num_trials]: scipy_configs = self.scipy_optimizer.maximize( runhistory, initial_config=config).challengers if not scipy_configs: # empty continue scipy_acqs = self.acquisition_function(scipy_configs) acq_configs.extend(zip(scipy_acqs, scipy_configs)) success_count += 1 if success_count == 0: self.logger.warning( 'None of Scipy optimizations are successful in RandomScipyOptimizer.' ) # shuffle for random tie-break self.rng.shuffle(acq_configs) # sort according to acq value acq_configs.sort(reverse=True, key=lambda x: x[0]) configs = [_[1] for _ in acq_configs] challengers = ChallengerList(configs, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class InterleavedLocalAndRandomSearch(AcquisitionFunctionMaximizer): """Implements openbox's default acquisition function optimization. This acq_maximizer performs local search from the previous best points according, to the acquisition function, uses the acquisition function to sort randomly sampled configurations and interleaves unsorted, randomly sampled configurations in between. Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional max_steps: int [LocalSearch] Maximum number of steps that the local search will perform n_steps_plateau_walk: int [LocalSearch] number of steps during a plateau walk before local search terminates n_sls_iterations: int [Local Search] number of local search iterations """ def __init__(self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rng: Union[bool, np.random.RandomState] = None, max_steps: Optional[int] = None, n_steps_plateau_walk: int = 10, n_sls_iterations: int = 10, rand_prob=0.25): super().__init__(acquisition_function, config_space, rng) self.random_search = RandomSearch( acquisition_function=acquisition_function, config_space=config_space, rng=rng) self.local_search = LocalSearch( acquisition_function=acquisition_function, config_space=config_space, rng=rng, max_steps=max_steps, n_steps_plateau_walk=n_steps_plateau_walk) self.n_sls_iterations = n_sls_iterations self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) # ======================================================================= # self.local_search = DiffOpt( # acquisition_function=acquisition_function, # config_space=config_space, # rng=rng # ) # ======================================================================= def maximize(self, runhistory: HistoryContainer, num_points: int, random_configuration_chooser=None, **kwargs) -> Iterable[Configuration]: """Maximize acquisition function using ``_maximize``. Parameters ---------- runhistory: ~openbox.utils.history_container.HistoryContainer runhistory object num_points: int number of points to be sampled random_configuration_chooser: ~openbox.acq_maximizer.random_configuration_chooser.RandomConfigurationChooser part of the returned ChallengerList such that we can interleave random configurations by a scheme defined by the random_configuration_chooser; random_configuration_chooser.next_smbo_iteration() is called at the end of this function **kwargs passed to acquisition function Returns ------- Iterable[Configuration] to be concrete: ~openbox.ei_optimization.ChallengerList """ next_configs_by_local_search = self.local_search._maximize( runhistory, self.n_sls_iterations, **kwargs) # Get configurations sorted by EI next_configs_by_random_search_sorted = self.random_search._maximize( runhistory, num_points - len(next_configs_by_local_search), _sorted=True, ) # Having the configurations from random search, sorted by their # acquisition function value is important for the first few iterations # of openbox. As long as the random forest predicts constant value, we # want to use only random configurations. Having them at the begging of # the list ensures this (even after adding the configurations by local # search, and then sorting them) next_configs_by_acq_value = (next_configs_by_random_search_sorted + next_configs_by_local_search) next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0]) self.logger.debug( "First 10 acq func (origin) values of selected configurations: %s", str([[_[0], _[1].origin] for _ in next_configs_by_acq_value[:10]])) next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value] challengers = ChallengerList(next_configs_by_acq_value, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class CMAESOptimizer(AcquisitionFunctionMaximizer): def __init__( self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rng: Union[bool, np.random.RandomState] = None, rand_prob=0.25, ): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError() def maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: try: from cma import CMAEvolutionStrategy except ImportError: raise ImportError("Package cma is not installed!") types, bounds = get_types(self.config_space) assert all(types == 0) # Check Constant Hyperparameter const_idx = list() for i, bound in enumerate(bounds): if np.isnan(bound[1]): const_idx.append(i) hp_num = len(bounds) - len(const_idx) es = CMAEvolutionStrategy(hp_num * [0], 0.99, inopts={'bounds': [0, 1]}) eval_num = 0 next_configs_by_acq_value = list() while eval_num < num_points: X = es.ask(number=es.popsize) _X = X.copy() for i in range(len(_X)): for index in const_idx: _X[i] = np.insert(_X[i], index, 0) _X = np.asarray(_X) values = self.acquisition_function._compute(_X) values = np.reshape(values, (-1, )) es.tell(X, values) next_configs_by_acq_value.extend([(values[i], _X[i]) for i in range(es.popsize)]) eval_num += es.popsize next_configs_by_acq_value.sort(reverse=True, key=lambda x: x[0]) next_configs_by_acq_value = [_[1] for _ in next_configs_by_acq_value] next_configs_by_acq_value = [ Configuration(self.config_space, vector=array) for array in next_configs_by_acq_value ] challengers = ChallengerList(next_configs_by_acq_value, self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers
class batchMCOptimizer(AcquisitionFunctionMaximizer): def __init__(self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rng: Union[bool, np.random.RandomState] = None, batch_size=None, rand_prob=0.0): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) if batch_size is None: types, bounds = get_types(self.config_space) dim = np.sum(types == 0) self.batch_size = min(5000, max(2000, 200 * dim)) else: self.batch_size = batch_size def maximize(self, runhistory: Union[HistoryContainer, MultiStartHistoryContainer], num_points: int, _sorted: bool = True, **kwargs) -> List[Tuple[float, Configuration]]: """Randomly sampled configurations Parameters ---------- runhistory: ~openbox.utils.history_container.HistoryContainer runhistory object num_points: int number of points to be sampled _sorted: bool whether random configurations are sorted according to acquisition function **kwargs turbo_state: TurboState provide turbo state to use trust region Returns ------- iterable An iterable consistng of tuple(acqusition_value, :class:`openbox.config_space.Configuration`). """ from openbox.utils.samplers import SobolSampler cur_idx = 0 config_acq = list() weight_seed = self.rng.randint( 0, int(1e8)) # The same weight seed each iteration while cur_idx < num_points: batch_size = min(self.batch_size, num_points - cur_idx) turbo_state = kwargs.get('turbo_state', None) if turbo_state is None: lower_bounds = None upper_bounds = None else: assert isinstance(runhistory, MultiStartHistoryContainer) if runhistory.num_objs > 1: # TODO implement adaptive strategy to choose trust region center for MO raise NotImplementedError() else: x_center = random.choice( runhistory.get_incumbents())[0].get_array() lower_bounds = x_center - turbo_state.length / 2.0 upper_bounds = x_center + turbo_state.length / 2.0 sobol_sampler = SobolSampler(self.config_space, batch_size, lower_bounds, upper_bounds, random_state=self.rng.randint( 0, int(1e8))) _configs = sobol_sampler.generate(return_config=True) _acq_values = self.acquisition_function(_configs, seed=weight_seed) config_acq.extend([(_configs[idx], _acq_values[idx]) for idx in range(len(_configs))]) cur_idx += self.batch_size config_acq.sort(reverse=True, key=lambda x: x[1]) challengers = ChallengerList([_[0] for _ in config_acq], self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()
class ScipyOptimizer(AcquisitionFunctionMaximizer): """ Wraps scipy optimizer. Only on continuous dims. Parameters ---------- acquisition_function : ~openbox.acquisition_function.acquisition.AbstractAcquisitionFunction config_space : ~openbox.config_space.ConfigurationSpace rng : np.random.RandomState or int, optional """ def __init__( self, acquisition_function: AbstractAcquisitionFunction, config_space: ConfigurationSpace, rand_prob: float = 0.0, rng: Union[bool, np.random.RandomState] = None, ): super().__init__(acquisition_function, config_space, rng) self.random_chooser = ChooserProb(prob=rand_prob, rng=rng) types, bounds = get_types( self.config_space) # todo: support constant hp in scipy optimizer assert all( types == 0 ), 'Scipy optimizer (L-BFGS-B) only supports Integer and Float parameters.' self.bounds = bounds options = dict(disp=False, maxiter=1000) self.scipy_config = dict(tol=None, method='L-BFGS-B', options=options) def maximize(self, runhistory: HistoryContainer, initial_config=None, **kwargs) -> List[Tuple[float, Configuration]]: def negative_acquisition(x): # shape of x = (d,) x = np.clip(x, 0.0, 1.0) # fix numerical problem in L-BFGS-B return -self.acquisition_function(x, convert=False)[0] # shape=(1,) if initial_config is None: initial_config = self.config_space.sample_configuration() init_point = initial_config.get_array() acq_configs = [] result = scipy.optimize.minimize(fun=negative_acquisition, x0=init_point, bounds=self.bounds, **self.scipy_config) # if result.success: # acq_configs.append((result.fun, Configuration(self.config_space, vector=result.x))) if not result.success: self.logger.debug('Scipy optimizer failed. Info:\n%s' % (result, )) try: x = np.clip(result.x, 0.0, 1.0) # fix numerical problem in L-BFGS-B config = Configuration(self.config_space, vector=x) acq = self.acquisition_function(x, convert=False) acq_configs.append((acq, config)) except Exception: pass if not acq_configs: # empty self.logger.warning( 'Scipy optimizer failed. Return empty config list. Info:\n%s' % (result, )) challengers = ChallengerList([config for _, config in acq_configs], self.config_space, self.random_chooser) self.random_chooser.next_smbo_iteration() return challengers def _maximize(self, runhistory: HistoryContainer, num_points: int, **kwargs) -> Iterable[Tuple[float, Configuration]]: raise NotImplementedError()