def _optimize(self, acquisition: Acquisition , context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]: """ See AcquisitionOptimizerBase._optimizer for parameter descriptions. Optimize an acqusition function using a GA """ # initialize population of strings random_design = RandomDesign(self.space) population = random_design.get_samples(self.population_size) # clac fitness for current population fitness_pop = acquisition.evaluate(population) standardized_fitness_pop = fitness_pop / sum(fitness_pop) # initialize best location and score so far X_max = population[np.argmax(fitness_pop)].reshape(-1,1) acq_max = np.max(fitness_pop).reshape(-1,1) iteration_bests=[] _log.info("Starting local optimization of acquisition function {}".format(type(acquisition))) for step in range(self.num_evolutions): _log.info("Performing evolution step {}".format(step)) # evolve populations population = self._evolve(population,standardized_fitness_pop) # recalc fitness fitness_pop = acquisition.evaluate(population) standardized_fitness_pop = fitness_pop / sum(fitness_pop) # update best location and score (if found better solution) acq_pop_max = np.max(fitness_pop) iteration_bests.append(acq_pop_max) _log.info("best acqusition score in the new population".format(acq_pop_max)) if acq_pop_max > acq_max[0][0]: acq_max[0][0] = acq_pop_max X_max[0] = population[np.argmax(fitness_pop)] # if dynamic then keep running (stop when no improvement over most recent 10 iterations) stop = False i=self.num_evolutions while not stop: _log.info("Performing evolution step {}".format(step)) # evolve populations population = self._evolve(population,standardized_fitness_pop) # recalc fitness fitness_pop = acquisition.evaluate(population) standardized_fitness_pop = fitness_pop / sum(fitness_pop) # update best location and score (if found better solution) acq_pop_max = np.max(fitness_pop) iteration_bests.append(acq_pop_max) _log.info("best acqusition score in the new population".format(acq_pop_max)) if acq_pop_max > acq_max[0][0]: acq_max[0][0] = acq_pop_max X_max[0] = population[np.argmax(fitness_pop)] if acq_max[0][0]==max(iteration_bests[:-10]): stop=True # also stop if ran for 100 evolutions in total if i==100: stop=True i+=1 # return best solution from the whole optimization return X_max, acq_max
def _optimize(self, acquisition: Acquisition, context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]: """ Implementation of abstract method. Taking into account gradients if acquisition supports them. See AcquisitionOptimizerBase._optimizer for parameter descriptions. See class docstring for implementation details. """ x_min, fx_min = None, None for i in range(self.batch_size): acquisition.reset(acquisition.model) x_min_i, fx_min_i = super()._optimize(acquisition, context_manager) if x_min is None: x_min, fx_min = x_min_i, fx_min_i else: x_min, fx_min = np.concatenate((x_min, x_min_i), axis=1), fx_min + fx_min_i return x_min, fx_min
def _optimize(self, acquisition: Acquisition, context_manager: ContextManager) -> Tuple[np.ndarray, np.ndarray]: """ Implementation of abstract method. Taking into account gradients if acquisition supports them. See AcquisitionOptimizerBase._optimizer for parameter descriptions. See class docstring for implementation details. """ # Take negative of acquisition function because they are to be maximised and the optimizers minimise f = lambda x: -acquisition.evaluate(x) # Context validation if len(context_manager.contextfree_space.parameters) == 0: _log.warning("All parameters are fixed through context") x = np.array(context_manager.context_values)[None, :] return x, f(x) if acquisition.has_gradients: def f_df(x): f_value, df_value = acquisition.evaluate_with_gradients(x) return -f_value, -df_value else: f_df = None optimizer = self._get_optimizer(context_manager) anchor_points_generator = ObjectiveAnchorPointsGenerator(self.space, acquisition, self.num_anchor_points) # Select the anchor points (with context) anchor_points = anchor_points_generator.get(num_anchor=1, context_manager=context_manager) _log.info("Starting gradient-based optimization of acquisition function {}".format(type(acquisition))) optimized_points = [] for a in anchor_points: optimized_point = apply_optimizer(optimizer, a, space=self.space, f=f, df=None, f_df=f_df, context_manager=context_manager) optimized_points.append(optimized_point) x_min, fx_min = min(optimized_points, key=lambda t: t[1]) return x_min, -fx_min