def _next(self): # all place visited so far _X, _F, _evaluated_by_algorithm = self.evaluator.history.get("X", "F", "algorithm") # collect attributes from each algorithm and determine whether it has to be replaced or not pop, F, n_evals = [], [], [] for k, algorithm in enumerate(self.algorithms): # collect some data from the current algorithms _pop = algorithm.pop # if the algorithm has terminated or not has_finished = algorithm.termination.has_terminated(algorithm) # if the area was already explored before closest_dist_to_others = vectorized_cdist(_pop.get("X"), _X[_evaluated_by_algorithm != algorithm], func_dist=norm_euclidean_distance(self.problem)) too_close_to_others = (closest_dist_to_others.min(axis=1) < 1e-3).all() # whether the algorithm is the current best - if yes it will not be replaced current_best = self.evaluator.opt.get("F") == _pop.get("F").min() # algorithm not really useful anymore if not current_best and (has_finished or too_close_to_others): # find a suitable x0 which is far from other or has good expectations self.sampling.criterion = lambda X: vectorized_cdist(X, _X).min() X = self.sampling.do(self.problem, self.n_initial_samples).get("X") # distance in x space to other existing points x_dist = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1) f_pred, f_uncert = predict_by_nearest_neighbors(_X, _F, X, 5, self.problem) fronts = NonDominatedSorting().do(np.column_stack([- x_dist, f_pred, f_uncert])) I = np.random.choice(fronts[0]) # I = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1).argmax() # choose the one with the largest distance to current solutions x0 = X[[I]] # replace the current algorithm algorithm = get_algorithm("nelder-mead", problem=self.problem, x0=x0, termination=NelderAndMeadTermination(x_tol=1e-3, f_tol=1e-3), evaluator=self.evaluator, ) algorithm.initialize() self.algorithms[k] = algorithm pop.append(algorithm.pop) F.append(algorithm.pop.get("F")) n_evals.append(self.evaluator.algorithms[algorithm]) # get the values of all algorithms as arrays F, n_evals = np.array(F), np.array(n_evals) rewards = 1 - normalize(F.min(axis=1))[:, 0] n_evals_total = self.evaluator.n_eval - self.evaluator.algorithms[self] # calculate the upper confidence bound ucb = rewards + 0.95 * np.sqrt(np.log(n_evals_total) / n_evals) I = ucb.argmax() self.algorithms[I].next() # create the population object with all algorithms self.pop = Population.create(*pop) # update the current optimum self.opt = self.evaluator.opt
def _step(self, optimizer, X, scalings): obj, grad = value_and_grad(calc_potential_energy)(scalings, X) scalings = optimizer.next(scalings, np.array(grad)) scalings = normalize(scalings, xl=0, xu=scalings.max()) return scalings, obj