def bgoa(): agents = helper.init() fitness = helper.fitness(agents) inds = fitness.argsort() agents = agents[inds] target = agents[0] maxscore = helper.fitness(np.asarray([target])) dist = np.zeros((meta.ns,meta.ns)) for l in range(meta.L): print("Iteration : ",l+1) c = meta.cmax - l * (meta.cmax-meta.cmin)/meta.L dmin = sys.float_info.max dmax = 0 for i in range(meta.ns): for j in range(meta.ns): if j==i: continue dist[i][j] = dist[j][i] = np.linalg.norm(agents[i]-agents[j]) dmax = max(dist[i][j],dmax) dmin = min(dist[i][j],dmin) dist = np.interp(dist,(dmin,dmax),(1,4)) dT = np.zeros((meta.ns,meta.dim)) for i in range(meta.ns): upd = np.zeros(meta.dim) for d in range(meta.dim): for j in range(meta.ns): if(i==j): continue upd[d] += social(math.fabs(agents[j][d] - agents[i][d])) * (agents[j][d] - agents[i][d]) / dist[i][j] upd = upd * (c*c*0.5) T = np.array([sigmoid(x) for x in upd]) dT[i] = T # print("Updating Vectors") for i in range(meta.ns): for j in range(meta.dim): r = np.random.rand(1) if(r>=dT[i][j]): agents[i][j] = 0 else: agents[i][j] = 1 fitness = helper.fitness(agents) inds = fitness.argsort() score = helper.fitness(np.asarray([agents[inds[0]]])) if(score > maxscore): target = agents[inds[0]] maxscore = score print(maxscore) return target
def randomized_improvement(coords, method, stopping_iteration=1000): nb_coords = len(coords) best_solution = list(range(nb_coords)) best_fitness = fitness(coords, best_solution) steps = [] for iteration in range(stopping_iteration): candidate_solution = method(best_solution) candidate_fitness = fitness(coords, candidate_solution) if candidate_fitness < best_fitness: best_fitness, best_solution = candidate_fitness, candidate_solution steps.append(best_solution) return best_fitness, best_solution, steps
def simulated_annealing(coords, stopping_iteration=1000, alpha = 0.9985): nb_coords = len(coords) T = nb_coords current_solution = list(range(nb_coords)) current_fitness = fitness(coords, current_solution) best_solution = current_solution best_fitness = current_fitness iteration = 1 steps = [] while iteration < stopping_iteration: # choose operation # if np.random.randint(2) == 0: # candidate_solution = transport(current_solution) # else: # candidate_solution = reverse(current_solution) candidate_solution = list(current_solution) l = np.random.randint(2, T + 1) # lower temperature - smaller changes i = np.random.randint(0, nb_coords) candidate_solution[i: (i + l)] = reversed(candidate_solution[i: (i + l)]) # Accept with probability 1 if candidate is better than current. # Accept with probability exp(-∆E/T) if candidate is worse. candidate_fitness = fitness(coords, candidate_solution) if candidate_fitness < current_fitness: current_fitness, current_solution = candidate_fitness, candidate_solution if candidate_fitness < best_fitness: best_fitness, best_solution = candidate_fitness, candidate_solution else: probability = np.exp(-(candidate_fitness - current_fitness) / T) if np.random.random() < probability: current_fitness, current_solution = candidate_fitness, candidate_solution T *= alpha iteration += 1 steps.append(best_solution) return best_fitness, best_solution, steps
def population_fitness(self, population): population_fitness = {} for i, individual in enumerate(population): # 1/fitness -> change to maximization problem population_fitness[i] = 1 / fitness(self.coords, individual) return { k: v for k, v in sorted(population_fitness.items(), key=lambda item: item[1], reverse=True) }
def genetic(coords, generations=500, population_size=100, elite_size=10, mutation_rate=0.01): genetic = Genetic(coords, population_size=population_size, elitist_factor=elite_size, mutation_rate=mutation_rate) population = genetic.initial_population() steps = [] for i in range(generations): population = genetic.next_generation(population) best_solution = genetic.best_solution(population) steps.append(best_solution) best_fitness = fitness(coords, best_solution) return best_fitness, best_solution, steps