def _adapt(self): pop = self.pop X, F, best = pop.get("X", "F", "best") best = Population.create(*best) w, c1, c2, = self.w, self.c1, self.c2 # get the average distance from one to another for normalization D = norm_eucl_dist(self.problem, X, X) mD = D.sum(axis=1) / (len(pop) - 1) _min, _max = mD.min(), mD.max() # get the average distance to the global best g_D = norm_euclidean_distance(self.problem)(best.get("X"), X).mean() f = (g_D - _min) / (_max - _min + 1e-32) S = np.array([ S1_exploration(f), S2_exploitation(f), S3_convergence(f), S4_jumping_out(f) ]) strategy = S.argmax() + 1 delta = 0.05 + (np.random.random() * 0.05) if strategy == 1: c1 += delta c2 -= delta elif strategy == 2: c1 += 0.5 * delta c2 -= 0.5 * delta elif strategy == 3: c1 += 0.5 * delta c2 += 0.5 * delta elif strategy == 4: c1 -= delta c2 += delta c1 = max(1.5, min(2.5, c1)) c2 = max(1.5, min(2.5, c2)) if c1 + c2 > 4.0: c1 = 4.0 * (c1 / (c1 + c2)) c2 = 4.0 * (c2 / (c1 + c2)) w = 1 / (1 + 1.5 * np.exp(-2.6 * f)) self.f = f self.strategy = strategy self.c1 = c1 self.c2 = c2 self.w = w
def predict_by_nearest_neighbors(X, F, X_pred, n_nearest, problem): D = vectorized_cdist(X_pred, X, func_dist=norm_euclidean_distance(problem)) nearest = np.argsort(D, axis=1)[:, :n_nearest] I = np.arange(len(D))[None, :].repeat(n_nearest, axis=0).T dist_to_nearest = D[I, nearest] w = dist_to_nearest / dist_to_nearest.sum(axis=1)[:, None] F_pred = (F[:, 0][nearest] * w).sum(axis=1) F_uncert = dist_to_nearest.mean(axis=1) return F_pred, F_uncert
def _next(self): # all place visited so far _X, _F, _evaluated_by_algorithm = self.evaluator.history.get("X", "F", "algorithm") # collect attributes from each algorithm and determine whether it has to be replaced or not pop, F, n_evals = [], [], [] for k, algorithm in enumerate(self.algorithms): # collect some data from the current algorithms _pop = algorithm.pop # if the algorithm has terminated or not has_finished = algorithm.termination.has_terminated(algorithm) # if the area was already explored before closest_dist_to_others = vectorized_cdist(_pop.get("X"), _X[_evaluated_by_algorithm != algorithm], func_dist=norm_euclidean_distance(self.problem)) too_close_to_others = (closest_dist_to_others.min(axis=1) < 1e-3).all() # whether the algorithm is the current best - if yes it will not be replaced current_best = self.evaluator.opt.get("F") == _pop.get("F").min() # algorithm not really useful anymore if not current_best and (has_finished or too_close_to_others): # find a suitable x0 which is far from other or has good expectations self.sampling.criterion = lambda X: vectorized_cdist(X, _X).min() X = self.sampling.do(self.problem, self.n_initial_samples).get("X") # distance in x space to other existing points x_dist = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1) f_pred, f_uncert = predict_by_nearest_neighbors(_X, _F, X, 5, self.problem) fronts = NonDominatedSorting().do(np.column_stack([- x_dist, f_pred, f_uncert])) I = np.random.choice(fronts[0]) # I = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1).argmax() # choose the one with the largest distance to current solutions x0 = X[[I]] # replace the current algorithm algorithm = get_algorithm("nelder-mead", problem=self.problem, x0=x0, termination=NelderAndMeadTermination(x_tol=1e-3, f_tol=1e-3), evaluator=self.evaluator, ) algorithm.initialize() self.algorithms[k] = algorithm pop.append(algorithm.pop) F.append(algorithm.pop.get("F")) n_evals.append(self.evaluator.algorithms[algorithm]) # get the values of all algorithms as arrays F, n_evals = np.array(F), np.array(n_evals) rewards = 1 - normalize(F.min(axis=1))[:, 0] n_evals_total = self.evaluator.n_eval - self.evaluator.algorithms[self] # calculate the upper confidence bound ucb = rewards + 0.95 * np.sqrt(np.log(n_evals_total) / n_evals) I = ucb.argmax() self.algorithms[I].next() # create the population object with all algorithms self.pop = Population.create(*pop) # update the current optimum self.opt = self.evaluator.opt