def __init__( self, problem: Problem, solution: Solution, attribute: str = None, proportion: float = 0.3, ): self.problem = problem self.register: EncodingRegister = solution.get_attribute_register( problem) self.attribute = attribute if attribute is None: print("none") attributes = [ k for k in self.register.dict_attribute_to_type for t in self.register.dict_attribute_to_type[k]["type"] if t == TypeAttribute.PERMUTATION ] print(attributes) if len(attributes) > 0: self.attribute = attributes[0] self.range_shuffle = self.register.dict_attribute_to_type[ self.attribute]["range"] self.n_to_move = int(proportion * len(self.range_shuffle)) self.range_int = list(range(self.n_to_move)) self.range_int_total = list(range(len(self.range_shuffle)))
def mutate(self, solution: Solution) -> Tuple[Solution, LocalMove]: s2 = solution.copy() vector = getattr(s2, self.attribute) for k in range(self.size): if random.random() <= self.probability_flip: new_arrity = random.choice(self.range_arrities[k]) vector[k] = new_arrity setattr(s2, self.attribute, vector) return s2, LocalMoveDefault(solution, s2)
def mutate(self, solution: Solution) -> Tuple[Solution, LocalMove]: previous = list(getattr(solution, self.attribute)) random.shuffle(self.range_int) new = [previous[i] for i in self.range_int] sol = solution.lazy_copy() setattr(sol, self.attribute, new) return sol, ShuffleMove(self.attribute, new_permutation=new, prev_permutation=previous)
def update(self, nv: Solution, fitness: float, improved_global: bool, improved_local: bool): self.nb_iteration += 1 if improved_global: self.nb_iteration_no_global_improve = 0 self.best_fitness = fitness self.solution_best = nv.copy() else: self.nb_iteration_no_global_improve += 1 if improved_local: self.nb_iteration_no_local_improve = 0 else: self.nb_iteration_no_local_improve += 1
def mutate(self, solution: Solution) -> Tuple[Solution, LocalMove]: previous = deepcopy(getattr(solution, self.attribute)) random.shuffle(self.range_int_total) int_to_move = self.range_int_total[:self.n_to_move] random.shuffle(self.range_int) new = getattr(solution, self.attribute) for k in range(self.n_to_move): # prevs += [new[int_to_move[k]]] new[int_to_move[k]] = previous[int_to_move[self.range_int[k]]] # news += [new[int_to_move[k]]] # inds += [int_to_move[k]] sol = solution.lazy_copy() setattr(sol, self.attribute, new) return sol, ShuffleMove(self.attribute, new, previous)
def __init__(self, problem: Problem, solution: Solution, attribute: str = None): self.problem = problem self.register: EncodingRegister = solution.get_attribute_register( problem) self.attribute = attribute if self.attribute is None: attributes = [ k for k in self.register.dict_attribute_to_type for t in self.register.dict_attribute_to_type[k]["type"] if t == TypeAttribute.PERMUTATION ] if len(attributes) > 0: self.attribute = attributes[0] self.range_shuffle = self.register.dict_attribute_to_type[ self.attribute]["range"] self.range_int = list(range(len(self.range_shuffle)))
def __init__( self, problem: Problem, solution: Solution, attribute: str = None, nb_swap: int = 1, ): self.problem = problem self.register: EncodingRegister = solution.get_attribute_register( problem) self.nb_swap = nb_swap self.attribute = attribute if self.attribute is None: attributes = [ k for k in self.register.dict_attribute_to_type for t in self.register.dict_attribute_to_type[k]["type"] if t == TypeAttribute.PERMUTATION ] if len(attributes) > 0: self.attribute = attributes[0] self.length = len( self.register.dict_attribute_to_type[self.attribute]["range"])
def solve( self, initial_variable: Solution, nb_iteration_max: int, pickle_result=False, pickle_name="tsp", ) -> ResultLS: objective = self.aggreg_from_dict_values( self.evaluator.evaluate(initial_variable)) cur_variable = initial_variable.copy() cur_best_variable = initial_variable.copy() cur_objective = objective cur_best_objective = objective if self.store_solution: store = ResultStorage( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1000, ) else: store = ResultStorage( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1, ) self.restart_handler.best_fitness = objective iteration = 0 while iteration < nb_iteration_max: local_improvement = False global_improvement = False local_move_accepted = False if self.mode_mutation == ModeMutation.MUTATE: nv, move = self.mutator.mutate(cur_variable) objective = self.aggreg_from_dict_values( self.evaluator.evaluate(nv)) elif self.mode_mutation == ModeMutation.MUTATE_AND_EVALUATE: nv, move, objective = self.mutator.mutate_and_compute_obj( cur_variable) objective = self.aggreg_from_dict_values(objective) if self.mode_optim == ModeOptim.MINIMIZATION and objective < cur_objective: accept = True local_improvement = True global_improvement = objective < cur_best_objective elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective > cur_objective): accept = True local_improvement = True global_improvement = objective > cur_best_objective else: r = random.random() fac = 1 if self.mode_optim == ModeOptim.MAXIMIZATION else -1 p = math.exp(fac * (objective - cur_objective) / self.temperature_handler.temperature) accept = p > r local_move_accepted = accept if accept: cur_objective = objective cur_variable = nv # print("iter ", iteration) # print("acceptance ", objective) else: cur_variable = move.backtrack_local_move(nv) # print(move) # print("cur_variable", cur_variable) if self.store_solution: store.add_solution(nv.copy(), objective) if global_improvement: print("iter ", iteration) # print(cur_variable) print("new obj ", objective, " better than ", cur_best_objective) cur_best_objective = objective cur_best_variable = cur_variable.copy() if not self.store_solution: store.add_solution(cur_variable.copy(), objective) self.temperature_handler.next_temperature() # Update the temperature self.restart_handler.update(nv, objective, global_improvement, local_improvement) # Update info in restart handler cur_variable, cur_objective = self.restart_handler.restart( cur_variable, cur_objective) # possibly restart somewhere iteration += 1 if pickle_result and iteration % 20000 == 0: pickle.dump(cur_best_variable, open(pickle_name + ".pk", "wb")) store.finalize() return store
def solve( self, initial_variable: Solution, nb_iteration_max: int, update_iteration_pareto=1000, pickle_result=False, pickle_name="tsp", ) -> ResultLS: objective = self.aggreg_from_dict_values( self.evaluator.evaluate(initial_variable)) pareto_front = ParetoFront( list_solution_fits=[(initial_variable, objective)], best_solution=initial_variable.copy(), limit_store=True, nb_best_store=1000, ) cur_variable = initial_variable.copy() cur_best_variable = initial_variable.copy() cur_objective = objective cur_best_objective = objective self.restart_handler.best_fitness = objective iteration = 0 while iteration < nb_iteration_max: accept = False local_improvement = False global_improvement = False if iteration % update_iteration_pareto == 0: pareto_front.finalize() if self.mode_mutation == ModeMutation.MUTATE: nv, move = self.mutator.mutate(cur_variable) objective = self.aggreg_from_solution(nv) elif self.mode_mutation == ModeMutation.MUTATE_AND_EVALUATE: nv, move, objective = self.mutator.mutate_and_compute_obj( cur_variable) objective = self.aggreg_from_dict_values(objective) if self.mode_optim == ModeOptim.MINIMIZATION and objective < cur_objective: accept = True local_improvement = True global_improvement = objective < cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MINIMIZATION and objective == cur_objective): accept = True local_improvement = True global_improvement = objective == cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective > cur_objective): accept = True local_improvement = True global_improvement = objective > cur_best_objective pareto_front.add_solution(nv.copy(), objective) elif (self.mode_optim == ModeOptim.MAXIMIZATION and objective == cur_objective): accept = True local_improvement = True global_improvement = objective == cur_best_objective pareto_front.add_solution(nv.copy(), objective) if accept: print("Accept : ", objective) cur_objective = objective cur_variable = nv else: cur_variable = move.backtrack_local_move(nv) if global_improvement: print("iter ", iteration) print("new obj ", objective, " better than ", cur_best_objective) cur_best_objective = objective cur_best_variable = cur_variable.copy() # Update the temperature self.restart_handler.update(nv, objective, global_improvement, local_improvement) print("Len pareto : ", pareto_front.len_pareto_front()) # Update info in restart handler cur_variable, cur_objective = self.restart_handler.restart( cur_variable, cur_objective) # possibly restart somewhere iteration += 1 # if pickle_result and iteration % 20000 == 0: # pickle.dump(cur_best_variable, open(pickle_name + ".pk", "wb")) pareto_front.finalize() return pareto_front
def __init__(self, nb_iteration_no_improvement, cur_solution: Solution, cur_objective): RestartHandler.__init__(self) self.nb_iteration_no_improvement = nb_iteration_no_improvement self.solution_best = cur_solution.copy() self.best_fitness = cur_objective