def solve(self, distance_matrix: np.ndarray, max_time: float = 10.0, start_cycle=None) -> Tuple[List[int], int]: problem_solver = CandidateSteepSearch() best_cycle: List[int] = problem_solver.solve(distance_matrix) best_cost: int = utils.calculate_path_length(distance_matrix, best_cycle) local_search_invocation_count = 1 time_start: float = time.time() duration = 0.0 while duration < max_time: temp_cycle = self.__perturb(distance_matrix, best_cycle) temp_cycle = problem_solver.solve(distance_matrix, temp_cycle) local_search_invocation_count += 1 cost = utils.calculate_path_length(distance_matrix, temp_cycle) if cost < best_cost: best_cost = cost best_cycle = temp_cycle duration = time.time() - time_start return best_cycle, local_search_invocation_count
def solve(self, distance_matrix: np.ndarray, max_time: float = 10.0, start_cycle=None) -> Tuple[List[int], int]: problem_solver = CandidateSteepSearch() random_problem_solver = RandomSearch() # Generate random population population: List[List[int]] = [] while len(population) < HybridEvolutionarySolver.POPULATION_SIZE: solution = random_problem_solver.solve(distance_matrix) if solution not in population: population.append(solution) population_costs = [utils.calculate_path_length(distance_matrix, solution) for solution in population] local_search_invocation_count = 0 time_start: float = time.time() duration = 0.0 while duration < max_time: # Pick random parents parent_1 = population[np.random.randint(HybridEvolutionarySolver.POPULATION_SIZE)] parent_2 = parent_1 while parent_2 == parent_1: parent_2 = population[np.random.randint(HybridEvolutionarySolver.POPULATION_SIZE)] # Recombine child = self.__recombine(parent_1, parent_2) # Improve child with local search child = problem_solver.solve(distance_matrix, child) local_search_invocation_count += 1 # Ignore if child already in population if child in population: continue # Search for worse solution than child child_cost = utils.calculate_path_length(distance_matrix, child) worst_cost = child_cost worst_index = -1 for i, cost in enumerate(population_costs): if cost > worst_cost: worst_index = i worst_cost = cost # If no worse solution found then ignore child if worst_index == -1: continue # Replace worse solution with child population[worst_index] = child population_costs[worst_index] = child_cost duration = time.time() - time_start best_index = np.argmin(population_costs) return population[best_index], local_search_invocation_count
def solve(self, distance_matrix: np.ndarray, start_cycle=None) -> List[int]: if self.gen_time == 0.0: return RandomSearch.__generate_random_cycle(distance_matrix) best_result_cycle = [] best_result_cycle_length = np.iinfo(distance_matrix.dtype).max time_start = time.time() while time.time() - time_start < self.gen_time: result_cycle = RandomSearch.__generate_random_cycle( distance_matrix) result_cycle_length = utils.calculate_path_length( distance_matrix, result_cycle) if result_cycle_length < best_result_cycle_length: best_result_cycle_length = result_cycle_length best_result_cycle = result_cycle return best_result_cycle
def __process_results(problem: StandardProblem, result_title: str, paths: list, times: list, search_invocations: list = None): distance_matrix: np.ndarray = utils.create_distance_matrix(problem) # Calculate min, max and average cycle lengths cycle_lengths = [utils.calculate_path_length(distance_matrix, path) for path in paths] minimum_length, shortest_cycle_index = min((val, idx) for (idx, val) in enumerate(cycle_lengths)) maximum_length = max(cycle_lengths) average_length = round(sum(cycle_lengths) / len(cycle_lengths)) maximum_time = max(times) minimum_time = min(times) average_time = round(sum(times) / len(times), 3) # Draw best cycle shortest_path = [index + 1 for index in paths[shortest_cycle_index]] result_title = f"{result_title}_{shortest_path[0]}" utils.draw_graph(problem, shortest_path, result_title, minimum_length) print(result_title) print(f"Path : {shortest_path}") print(f"Cycle length (min) : {minimum_length}") print(f"Cycle length (max) : {maximum_length}") print(f"Cycle length (avg) : {average_length}") print(f"Time (min) : {round(minimum_time * 1000.0)}ms") print(f"Time (max) : {round(maximum_time * 1000.0)}ms") print(f"Time (avg) : {round(average_time * 1000.0)}ms") if search_invocations: maximum_invocations = max(search_invocations) minimum_invocations = min(search_invocations) average_invocations = round(sum(search_invocations) / len(search_invocations), 3) print(f"Search invocations (min) : {minimum_invocations}") print(f"Search invocations (max) : {maximum_invocations}") print(f"Search invocations (avg) : {average_invocations}") print() return average_time
def _solve_single(distance_matrix: np.ndarray, problem_solver: SearchProblemSolver): cycle = problem_solver.solve(distance_matrix) cost = utils.calculate_path_length(distance_matrix, cycle) return cycle, cost
def global_convexity_tests(problem: StandardProblem, number_of_solutions: int = 1000, similarity_function=node_similarity, title: str = ""): distance_matrix = create_distance_matrix(problem) problem_solver = GreedyLocalSearch(use_node_swap=True) pool = Pool(processes=os.cpu_count()) pool_results = [] solutions = [] for i_ in range(number_of_solutions): if len(pool_results) == os.cpu_count(): for pool_res in pool_results: solution = pool_res.get() solutions.append(np.array(solution)) pool_results.clear() res = pool.apply_async(problem_solver.solve, (distance_matrix, )) pool_results.append(res) for pool_res in pool_results: solution = pool_res.get() solutions.append(np.array(solution)) pool_results.clear() pool.close() solution_cost = np.array([ calculate_path_length(distance_matrix, list(cycle)) for cycle in solutions ]) best_cost_index = np.argmin(solution_cost) best_solution = solutions[best_cost_index] similarity = \ np.array([similarity_function(cycle, best_solution) for cycle in solutions]) average_other_similarity = np.zeros(shape=similarity.shape) best_solution_similarity = np.zeros(shape=similarity.shape) for i in np.arange(start=0, stop=similarity.shape[0]): mask = np.ones(similarity.shape, bool) mask[i] = False average_other_similarity[i] = np.average(similarity[mask]) best_solution_similarity[i] = similarity_function( solutions[i], best_solution) correlation = np.corrcoef(solution_cost, average_other_similarity)[0][1] print(f"Correlation parameter ({title}) : {correlation}") indices = np.argsort(solution_cost) plt.scatter(solution_cost[indices], average_other_similarity[indices], label="Average other similarity") plt.scatter(solution_cost[indices], best_solution_similarity[indices], label="Best solution similarity") plt.legend() plt.title(title) plt.savefig(f"./graphs/{title}.pdf") plt.show()