def mutate(self, solution: Solution) -> Tuple[Solution, LocalMove]: s2 = solution.copy() vector = getattr(s2, self.attribute) for k in range(self.size): if random.random() <= self.probability_flip: new_arrity = random.choice(self.range_arrities[k]) vector[k] = new_arrity setattr(s2, self.attribute, vector) return s2, LocalMoveDefault(solution, s2)
def update(self, nv: Solution, fitness: float, improved_global: bool, improved_local: bool): self.nb_iteration += 1 if improved_global: self.nb_iteration_no_global_improve = 0 self.best_fitness = fitness self.solution_best = nv.copy() else: self.nb_iteration_no_global_improve += 1 if improved_local: self.nb_iteration_no_local_improve = 0 else: self.nb_iteration_no_local_improve += 1
def solve( self, initial_variable: Solution, nb_iteration_max: int, pickle_result=False, pickle_name="tsp", ) -> ResultLS: objective = self.aggreg_from_dict_values( self.evaluator.evaluate(initial_variable)) cur_variable = initial_variable.copy() cur_best_variable = initial_variable.copy() cur_objective = objective cur_best_objective = objective if self.store_solution: store = ResultStorage( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1000, ) else: store = ResultStorage( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1, ) self.restart_handler.best_fitness = objective iteration = 0 while iteration < nb_iteration_max: local_improvement = False global_improvement = False local_move_accepted = False if self.mode_mutation == ModeMutation.MUTATE: nv, move = self.mutator.mutate(cur_variable) objective = self.aggreg_from_dict_values( self.evaluator.evaluate(nv)) elif self.mode_mutation == ModeMutation.MUTATE_AND_EVALUATE: nv, move, objective = self.mutator.mutate_and_compute_obj( cur_variable) objective = self.aggreg_from_dict_values(objective) if self.mode_optim == ModeOptim.MINIMIZATION and objective < cur_objective: accept = True local_improvement = True global_improvement = objective < cur_best_objective elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective > cur_objective): accept = True local_improvement = True global_improvement = objective > cur_best_objective else: r = random.random() fac = 1 if self.mode_optim == ModeOptim.MAXIMIZATION else -1 p = math.exp(fac * (objective - cur_objective) / self.temperature_handler.temperature) accept = p > r local_move_accepted = accept if accept: cur_objective = objective cur_variable = nv # print("iter ", iteration) # print("acceptance ", objective) else: cur_variable = move.backtrack_local_move(nv) # print(move) # print("cur_variable", cur_variable) if self.store_solution: store.add_solution(nv.copy(), objective) if global_improvement: print("iter ", iteration) # print(cur_variable) print("new obj ", objective, " better than ", cur_best_objective) cur_best_objective = objective cur_best_variable = cur_variable.copy() if not self.store_solution: store.add_solution(cur_variable.copy(), objective) self.temperature_handler.next_temperature() # Update the temperature self.restart_handler.update(nv, objective, global_improvement, local_improvement) # Update info in restart handler cur_variable, cur_objective = self.restart_handler.restart( cur_variable, cur_objective) # possibly restart somewhere iteration += 1 if pickle_result and iteration % 20000 == 0: pickle.dump(cur_best_variable, open(pickle_name + ".pk", "wb")) store.finalize() return store
def solve( self, initial_variable: Solution, nb_iteration_max: int, update_iteration_pareto=1000, pickle_result=False, pickle_name="tsp", ) -> ResultLS: objective = self.aggreg_from_dict_values( self.evaluator.evaluate(initial_variable)) pareto_front = ParetoFront( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1000, ) cur_variable = initial_variable.copy() cur_best_variable = initial_variable.copy() cur_objective = objective cur_best_objective = objective self.restart_handler.best_fitness = objective iteration = 0 while iteration < nb_iteration_max: accept = False local_improvement = False global_improvement = False if iteration % update_iteration_pareto == 0: pareto_front.finalize() if self.mode_mutation == ModeMutation.MUTATE: nv, move = self.mutator.mutate(cur_variable) objective = self.aggreg_from_solution(nv) elif self.mode_mutation == ModeMutation.MUTATE_AND_EVALUATE: nv, move, objective = self.mutator.mutate_and_compute_obj( cur_variable) objective = self.aggreg_from_dict_values(objective) if self.mode_optim == ModeOptim.MINIMIZATION and objective < cur_objective: accept = True local_improvement = True global_improvement = objective < cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MINIMIZATION and objective == cur_objective): accept = True local_improvement = True global_improvement = objective == cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective > cur_objective): accept = True local_improvement = True global_improvement = objective > cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective == cur_objective): accept = True local_improvement = True global_improvement = objective == cur_best_objective pareto_front.add_solution(nv.copy(), objective) if accept: print("Accept : ", objective) cur_objective = objective cur_variable = nv else: cur_variable = move.backtrack_local_move(nv) if global_improvement: print("iter ", iteration) print("new obj ", objective, " better than ", cur_best_objective) cur_best_objective = objective cur_best_variable = cur_variable.copy() # Update the temperature self.restart_handler.update(nv, objective, global_improvement, local_improvement) print("Len pareto : ", pareto_front.len_pareto_front()) # Update info in restart handler cur_variable, cur_objective = self.restart_handler.restart( cur_variable, cur_objective) # possibly restart somewhere iteration += 1 # if pickle_result and iteration % 20000 == 0: # pickle.dump(cur_best_variable, open(pickle_name + ".pk", "wb")) pareto_front.finalize() return pareto_front
def __init__(self, nb_iteration_no_improvement, cur_solution: Solution, cur_objective): RestartHandler.__init__(self) self.nb_iteration_no_improvement = nb_iteration_no_improvement self.solution_best = cur_solution.copy() self.best_fitness = cur_objective