def main(): regret, times = get_value() for instance_name in ['kroA100', 'kroB100']: instance = Instance(name = instance_name) instance.compute_matrix() #start_node = random.randrange(instance.matrix.shape[0]) list_of_solutions = list() best_greedy = None times = min(times, instance.matrix.shape[0]) node_to_choose = list(range(instance.matrix.shape[0])) for i in range(times): first_start_node = random.choice(node_to_choose) second_start_node = np.argmax(instance.matrix[first_start_node]) node_to_choose = list( set(node_to_choose) - set([first_start_node, second_start_node]) ) greedy = Greedy(instance = instance, regret = regret) greedy.solve(first_start_node, second_start_node) list_of_solutions.append(greedy.compute_total_cost()) if best_greedy is None or list_of_solutions[-1] < best_greedy.compute_total_cost(): best_greedy = greedy print(f'Instance: {instance_name}') print(f'Best First Path: {best_greedy.first_solution}') print(f'Best Second Path: {best_greedy.second_solution}') print(f'Best Cost: {best_greedy.compute_total_cost()}') print() print(f'Statistics: ') print(f'Min Cost: {min(list_of_solutions)}') print(f'Max Cost: {max(list_of_solutions)}') print(f'Average Cost: {np.mean(list_of_solutions)}') print() plot_title = f'{best_greedy.algorithm}, {best_greedy.instance.instance_name}, Regret: {best_greedy.regret}, Distance: {best_greedy.compute_total_cost()}' save_name = '{}-r_{}'.format(best_greedy.instance.instance_name, best_greedy.regret) plot_result(best_greedy, plot_title, save_name)
def main(): times_number = get_value() times = defaultdict(list) scores = defaultdict(list) for instance_name in ['kroA200', 'kroB200']: instance = Instance(name=instance_name) instance.compute_matrix() #################################### max_time = 600.0 print(f'MSLS time: {max_time}') for number in range(times_number): print(f'Global iteration number: {number+1}') ################################################################## print('\t-> OwnMethod') own_method = OwnMethod(instance) own_method.neighborhood = 'edges' own_method.first_solution = None own_method.second_solution = None time = own_method.solve(max_time=max_time) times[f'OwnMethod, {instance_name}'].append(time) scores[f'OwnMethod, {instance_name}'].append(deepcopy(own_method)) print(own_method.compute_total_cost()) save_string = '\n' worst_time = float('-inf') for key in scores.keys(): best = min(scores[key], key=lambda el: el.compute_total_cost()) costs = list(map(lambda el: el.compute_total_cost(), scores[key])) save_string += f'Version: {key}\nMean: {np.mean(costs)}\nMin: {min(costs)}\nMax: {max(costs)}\n\n|TIMES|\n' save_string += f'\nMean: {np.mean(times[key])}\nMin: {min(times[key])}\nMax: {max(times[key])}\n\n==========\n\n' if np.mean(times[key]) > worst_time: worst_time = np.mean(times[key]) plot_best(best, key) print(save_string) save_string_fn(save_string, 'OwnMethod_results', None)
url(r'^instance_history/' + '(?P<instance_id>%s)/' % uuid_match + 'status_history$', InstanceStatusHistoryDetail.as_view(), name='instance-history'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/tag$' % uuid_match, InstanceTagList.as_view(), name='instance-tag-list'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/tag/(?P<tag_slug>.*)$' % uuid_match, InstanceTagDetail.as_view(), name='instance-tag-detail'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/action$' % uuid_match, InstanceAction.as_view(), name='instance-action'), url(identity_specific + r'/instance/(?P<instance_id>%s)$' % uuid_match, Instance.as_view(), name='instance-detail'), url(identity_specific + r'/instance$', InstanceList.as_view(), name='instance-list'), url(r'^instance_action/$', InstanceActionList.as_view(), name='instance-action-list'), url(r'^instance_action/(?P<action_id>%s)$' % (id_match,), InstanceActionDetail.as_view(), name='instance-action-detail'), url(identity_specific + r'/size$', SizeList.as_view(), name='size-list'), url(identity_specific + r'/size/(?P<size_id>%s)$' % (id_match,), Size.as_view(), name='size-detail'),
InstanceStatusHistoryDetail.as_view(), name='instance-history'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/tag$' % uuid_match, InstanceTagList.as_view(), name='instance-tag-list'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/tag/(?P<tag_slug>.*)$' % uuid_match, InstanceTagDetail.as_view(), name='instance-tag-detail'), url(identity_specific + r'/instance/' + '(?P<instance_id>%s)/action$' % uuid_match, InstanceAction.as_view(), name='instance-action'), url(identity_specific + r'/instance/(?P<instance_id>%s)$' % uuid_match, Instance.as_view(), name='instance-detail'), url(identity_specific + r'/instance$', InstanceList.as_view(), name='instance-list'), url(r'^instance_action/$', InstanceActionList.as_view(), name='instance-action-list'), url(r'^instance_action/(?P<action_id>%s)$' % (id_match, ), InstanceActionDetail.as_view(), name='instance-action-detail'), url(identity_specific + r'/size$', SizeList.as_view(), name='size-list'), url(identity_specific + r'/size/(?P<size_id>%s)$' % (id_match, ), Size.as_view(),
from api.instance import Instance from strategies.destroy_repair.destroy_repair import DestroyRepairLocalSearch from strategies.iterated_local_search.iterated_local_search import IteratedLocalSearch from strategies.local_search.local_search import LocalSearch from strategies.ls_cache.local_search_with_cache import LocalSearchWitchCache from strategies.multiple_local_search.multiple_local_search import MultipleStartLocalSearch from utils.utils import draw_solution import pandas as pd import numpy as np sns.set() df = pd.DataFrame(columns=['version', 'instance', 'cost', 'time']) for instance_name in ['kroA200', 'kroB200']: for p in [40, 85]: instance = Instance(name=instance_name) solve_strategy: DestroyRepairLocalSearch = DestroyRepairLocalSearch( instance=instance, perturbation=p) solve_strategy.run(run_times=1) # raise KeyError costs = list(map(lambda x: x[1], solve_strategy.solutions)) times = list(map(lambda x: x[2], solve_strategy.solutions)) print(instance_name, min(costs), np.mean(times)) for s, cost, time in solve_strategy.solutions: df = df.append( pd.DataFrame([[p, instance_name, cost, time]], columns=['version', 'instance', 'cost', 'time'])) # draw_solution(
def main(): times_number = get_value() times = defaultdict(list) scores = defaultdict(list) for instance_name in ['kroA200', 'kroB200']: instance = Instance(name=instance_name) instance.compute_matrix() #################################### max_time = 733.0 print(f'MSLS time: {max_time}') unavailable_points = list() for number in range(times_number): solutions = [] for i in range(20): first_start_node = random.choice( list( set(list(instance.point_dict.keys())) - set(unavailable_points))) second_start_node = np.argmax( instance.matrix[first_start_node]) unavailable_points.append(first_start_node) unavailable_points.append(second_start_node) #Get random solution random_cycle = Random(instance, seed=None) random_cycle.solve(first_start_node, second_start_node) population = Replacement(random_cycle.first_solution[:], random_cycle.second_solution[:], random_cycle.compute_total_cost()) solutions.append(population) print(f'Global iteration number: {number+1}') ################################################################## #SteadyState + LS print('\t-> SteadyState + LS') steady_state = SteadyState(instance, deepcopy(solutions)) steady_state.neighborhood = 'edges' steady_state.first_solution = None steady_state.second_solution = None time = steady_state.solve(n_candidats=8, max_time=max_time, ls=True) times[f'SteadyState + LS, {instance_name}'].append(time) scores[f'SteadyState + LS, {instance_name}'].append( deepcopy(steady_state)) ################################################################## #ILS2 print('\t-> SteadyState - LS') steady_state = SteadyState(instance, deepcopy(solutions)) steady_state.neighborhood = 'edges' steady_state.first_solution = None steady_state.second_solution = None time = steady_state.solve(n_candidats=8, max_time=max_time, ls=False) times[f'SteadyState - LS, {instance_name}'].append(time) scores[f'SteadyState - LS, {instance_name}'].append( deepcopy(steady_state)) save_string = '\n' worst_time = float('-inf') for key in scores.keys(): best = min(scores[key], key=lambda el: el.compute_total_cost()) costs = list(map(lambda el: el.compute_total_cost(), scores[key])) save_string += f'Version: {key}\nMean: {np.mean(costs)}\nMin: {min(costs)}\nMax: {max(costs)}\n\n|TIMES|\n' save_string += f'\nMean: {np.mean(times[key])}\nMin: {min(times[key])}\nMax: {max(times[key])}\n\n==========\n\n' if np.mean(times[key]) > worst_time: worst_time = np.mean(times[key]) plot_best(best, key) print(save_string) save_string_fn(save_string, 'SteadyState_results', None)
r"^instance_history/" "(?P<instance_id>[a-zA-Z0-9-]+)$", InstanceHistoryDetail.as_view(), name="instance-history", ), url( r"^instance_history/" "(?P<instance_id>[a-zA-Z0-9-]+)/" "status_history$", InstanceStatusHistoryDetail.as_view(), name="instance-history", ), url( identity_specific + r"/instance/" + "(?P<instance_id>[a-zA-Z0-9-]+)/action$", InstanceAction.as_view(), name="instance-action", ), url( identity_specific + r"/instance/(?P<instance_id>[a-zA-Z0-9-]+)$", Instance.as_view(), name="instance-detail" ), url(identity_specific + r"/instance$", InstanceList.as_view(), name="instance-list"), url(identity_specific + r"/size$", SizeList.as_view(), name="size-list"), url(identity_specific + r"/size/(?P<size_id>\d+)$", Size.as_view(), name="size-detail"), url(identity_specific + r"/volume$", VolumeList.as_view(), name="volume-list"), url(identity_specific + r"/volume/(?P<volume_id>[a-zA-Z0-9-]+)$", Volume.as_view(), name="volume-detail"), url( identity_specific + r"/boot_volume(?P<volume_id>[a-zA-Z0-9-]+)?$", BootVolume.as_view(), name="boot-volume" ), url(identity_specific + r"/volume_snapshot$", VolumeSnapshot.as_view(), name="volume-snapshot"), url( identity_specific + r"/volume_snapshot/(?P<snapshot_id>[a-zA-Z0-9-]+)$", VolumeSnapshotDetail.as_view(), name="volume-snapshot-detail", ),
def main(): times_number = get_value() times = defaultdict(list) scores = defaultdict(list) for instance_name in ['kroA100', 'kroB100']: instance = Instance(name=instance_name) instance.compute_matrix() for number in range(times_number): first_start_node = random.choice(list(instance.point_dict.keys())) second_start_node = np.argmax(instance.matrix[first_start_node]) ##Get greedy solution #first_start_node = 92 if instance_name == 'kroA100' else 72 #Najlepsze startowe wierzchołki dla intsancji Greedy (poprzednie zadanie) greedy_cycle = Greedy(instance, regret=0) greedy_cycle.solve(first_start_node, second_start_node) print(f'Iteration number: {number+1}') #Get random solution random_cycle = Random(instance, seed=None) random_cycle.solve(first_start_node, second_start_node) #plot_random(random_cycle) #Local Search local_search = LocalSearch(instance) #steepest, nodes, greedy_cycle local_search.first_solution = greedy_cycle.first_solution local_search.second_solution = greedy_cycle.second_solution time = local_search.solve(method='steepest') times[f'steepest, nodes, greedy_cycle, {instance_name}'].append( time) scores[f'steepest, nodes, greedy_cycle, {instance_name}'].append( deepcopy(local_search)) #greedy, nodes, greedy_cycle local_search.first_solution = greedy_cycle.first_solution local_search.second_solution = greedy_cycle.second_solution time = local_search.solve(method='greedy') times[f'greedy, nodes, greedy_cycle, {instance_name}'].append(time) scores[f'greedy, nodes, greedy_cycle, {instance_name}'].append( deepcopy(local_search)) #steepest, nodes, random local_search.first_solution = random_cycle.first_solution local_search.second_solution = random_cycle.second_solution time = local_search.solve(method='steepest') times[f'steepest, nodes, random, {instance_name}'].append(time) scores[f'steepest, nodes, random, {instance_name}'].append( deepcopy(local_search)) #greedy, nodes, random local_search.first_solution = random_cycle.first_solution local_search.second_solution = random_cycle.second_solution time = local_search.solve(method='greedy') times[f'greedy, nodes, random, {instance_name}'].append(time) scores[f'greedy, nodes, random, {instance_name}'].append( deepcopy(local_search)) local_search.neighborhood = 'edges' #steepest, edges, greedy_cycle local_search.first_solution = greedy_cycle.first_solution local_search.second_solution = greedy_cycle.second_solution time = local_search.solve(method='steepest') times[f'steepest, edges, greedy_cycle, {instance_name}'].append( time) scores[f'steepest, edges, greedy_cycle, {instance_name}'].append( deepcopy(local_search)) #greedy, edges, greedy_cycle local_search.first_solution = greedy_cycle.first_solution local_search.second_solution = greedy_cycle.second_solution time = local_search.solve(method='greedy') times[f'greedy, edges, greedy_cycle, {instance_name}'].append(time) scores[f'greedy, edges, greedy_cycle, {instance_name}'].append( deepcopy(local_search)) #steepest, edges, random local_search.first_solution = random_cycle.first_solution local_search.second_solution = random_cycle.second_solution time = local_search.solve(method='steepest') times[f'steepest, edges, random, {instance_name}'].append(time) scores[f'steepest, edges, random, {instance_name}'].append( deepcopy(local_search)) #greedy, edges, random local_search.first_solution = random_cycle.first_solution local_search.second_solution = random_cycle.second_solution time = local_search.solve(method='greedy') times[f'greedy, edges, random, {instance_name}'].append(time) scores[f'greedy, edges, random, {instance_name}'].append( deepcopy(local_search)) save_string = '\n' worst_time = float('-inf') for key in scores.keys(): best = min(scores[key], key=lambda el: el.compute_total_cost()) costs = list(map(lambda el: el.compute_total_cost(), scores[key])) save_string += f'Version: {key}\nMean: {np.mean(costs)}\nMin: {min(costs)}\nMax: {max(costs)}\n\n|TIMES|\n' save_string += f'\nMean: {np.mean(times[key])}\nMin: {min(times[key])}\nMax: {max(times[key])}\n\n==========\n\n' if np.mean(times[key]) > worst_time: worst_time = np.mean(times[key]) plot_best(best, key) print(save_string) save_string_fn(save_string, 'computation_results', best.algorithm) for instance_name in ['kroA100', 'kroB100']: instance = Instance(name=instance_name) instance.compute_matrix() random_local_search = RandomLocalSearch(instance) random_local_search.first_solution = greedy_cycle.first_solution random_local_search.second_solution = greedy_cycle.second_solution random_local_search.solve(worst_time) plot_best(random_local_search, instance_name)
def main(): times_number = get_value() times = defaultdict(list) scores = defaultdict(list) for instance_name in ['kroA200', 'kroB200']: instance = Instance(name = instance_name) instance.compute_matrix() #################################### for number in range(10): print(f'-> Local iteration number: {number+1}') get_ls(times, scores, instance) ##################################### max_time = np.mean(times[f'MSLS, {instance_name}']) print(f'MSLS time: {max_time}') unavailable_points = list() for number in range(times_number): first_start_node = random.choice( list( set(list(instance.point_dict.keys())) - set(unavailable_points) ) ) second_start_node = np.argmax(instance.matrix[first_start_node]) unavailable_points.append(first_start_node) unavailable_points.append(second_start_node) #Get random solution random_cycle = Random(instance, seed = None) random_cycle.solve(first_start_node, second_start_node) print(f'Global iteration number: {number+1}') ################################################################## #ILS1 print('\t-> ILS1') local_search = LocalSearchIterated(instance) local_search.neighborhood = 'edges' #for num in range(3,25): local_search.first_solution = random_cycle.first_solution[:] local_search.second_solution = random_cycle.second_solution[:] time = local_search.solve_ils1(n_candidats = 8, num_moves = 20, max_time = max_time) times[f'ILS1, {instance_name}'].append(time) scores[f'ILS1, {instance_name}'].append(deepcopy(local_search)) ################################################################## #ILS2 print('\t-> ILS2 + LS') local_search = LocalSearchIterated(instance) local_search.neighborhood = 'edges' #for num in range(3,25): local_search.first_solution = random_cycle.first_solution[:] local_search.second_solution = random_cycle.second_solution[:] time = local_search.solve_ils2(n_candidats = 8, num_nodes = 75, max_time = max_time, ils2a = True) times[f'ILS2 + LS, {instance_name}'].append(time) scores[f'ILS2 + LS, {instance_name}'].append(deepcopy(local_search)) ################################################################## #ILS2 print('\t-> ILS2 - LS') local_search = LocalSearchIterated(instance) local_search.neighborhood = 'edges' #for num in range(3,25): local_search.first_solution = random_cycle.first_solution[:] local_search.second_solution = random_cycle.second_solution[:] time = local_search.solve_ils2(n_candidats = 8, num_nodes = 75, max_time = max_time, ils2a = False) times[f'ILS2 - LS, {instance_name}'].append(time) scores[f'ILS2 - LS, {instance_name}'].append(deepcopy(local_search)) save_string = '\n' worst_time = float('-inf') for key in scores.keys(): best = min(scores[key], key=lambda el: el.compute_total_cost()) costs = list( map(lambda el: el.compute_total_cost(), scores[key]) ) save_string += f'Version: {key}\nMean: {np.mean(costs)}\nMin: {min(costs)}\nMax: {max(costs)}\n\n|TIMES|\n' save_string += f'\nMean: {np.mean(times[key])}\nMin: {min(times[key])}\nMax: {max(times[key])}\n\n==========\n\n' if np.mean(times[key]) > worst_time: worst_time = np.mean(times[key]) plot_best(best, key) print(save_string) save_string_fn(save_string, 'ILSX_resultsX', None)
def main(): times_number = get_value() times = defaultdict(list) scores = defaultdict(list) for instance_name in ['kroA200', 'kroB200']: instance = Instance(name=instance_name) instance.compute_matrix() for number in range(times_number): first_start_node = random.choice(list(instance.point_dict.keys())) second_start_node = np.argmax(instance.matrix[first_start_node]) print(f'Iteration number: {number+1}') #Get random solution random_cycle = Random(instance, seed=None) random_cycle.solve(first_start_node, second_start_node) #plot_random(random_cycle) ################################################################## #Greedy cycle greedy_cycle = Greedy(instance, regret=0) time = greedy_cycle.solve(first_start_node, second_start_node) times[f'greedy, regret 0, {instance_name}'].append(time) scores[f'greedy, regret 0, {instance_name}'].append( deepcopy(greedy_cycle)) ################################################################## #Local Search local_search = LocalSearch(instance) local_search.neighborhood = 'edges' #steepest, edges, random local_search.first_solution = random_cycle.first_solution[:] local_search.second_solution = random_cycle.second_solution[:] time = local_search.solve(method='steepest') times[f'steepest, edges, random, {instance_name}'].append(time) scores[f'steepest, edges, random, {instance_name}'].append( deepcopy(local_search)) ################################################################## #LocalSearchWithList local_search_with_list = LocalSearchWithList(instance) local_search_with_list.neighborhood = 'edges' #steepest with LM,, edges, random local_search_with_list.first_solution = random_cycle.first_solution[:] local_search_with_list.second_solution = random_cycle.second_solution[:] time = local_search_with_list.solve() times[f'LM, edges, random, {instance_name}'].append(time) scores[f'LM, edges, random, {instance_name}'].append( deepcopy(local_search_with_list)) ################################################################## #LocalSearchCandidateMoves ls_candidates_moves = LocalSearchCandidateMoves(instance) ls_candidates_moves.neighborhood = 'edges' #candidates moves, random ls_candidates_moves.first_solution = random_cycle.first_solution[:] ls_candidates_moves.second_solution = random_cycle.second_solution[:] time = ls_candidates_moves.solve(n_candidats=8) times[f'CM, {instance_name}'].append(time) scores[f'CM, {instance_name}'].append( deepcopy(ls_candidates_moves)) save_string = '\n' worst_time = float('-inf') for key in scores.keys(): best = min(scores[key], key=lambda el: el.compute_total_cost()) costs = list(map(lambda el: el.compute_total_cost(), scores[key])) save_string += f'Version: {key}\nMean: {np.mean(costs)}\nMin: {min(costs)}\nMax: {max(costs)}\n\n|TIMES|\n' save_string += f'\nMean: {np.mean(times[key])}\nMin: {min(times[key])}\nMax: {max(times[key])}\n\n==========\n\n' if np.mean(times[key]) > worst_time: worst_time = np.mean(times[key]) plot_best(best, key) print(save_string) save_string_fn(save_string, 'local_search_and_candidates_moves_results', None)
url(r'^instance_history/' '(?P<instance_id>[a-zA-Z0-9-]+)/' 'status_history$', InstanceStatusHistoryDetail.as_view(), name='instance-history'), url(identity_specific + r'/instance/' + '(?P<instance_id>[a-za-z0-9-]+)/tag$', InstanceTagList.as_view(), name='instance-tag-list'), url(identity_specific + r'/instance/' + '(?P<instance_id>[a-zA-Z0-9-]+)/tag/(?P<tag_slug>.*)$', InstanceTagDetail.as_view(), name='instance-tag-detail'), url(identity_specific + r'/instance/' + '(?P<instance_id>[a-za-z0-9-]+)/action$', InstanceAction.as_view(), name='instance-action'), url(identity_specific + r'/instance/(?P<instance_id>[a-zA-Z0-9-]+)$', Instance.as_view(), name='instance-detail'), url(identity_specific + r'/instance$', InstanceList.as_view(), name='instance-list'), url(identity_specific + r'/size$', SizeList.as_view(), name='size-list'), url(identity_specific + r'/size/(?P<size_id>\d+)$', Size.as_view(), name='size-detail'), url(identity_specific + r'/volume$', VolumeList.as_view(), name='volume-list'), url(identity_specific + r'/volume/(?P<volume_id>[a-zA-Z0-9-]+)$', Volume.as_view(), name='volume-detail'), url(identity_specific + r'/boot_volume(?P<volume_id>[a-zA-Z0-9-]+)?$', BootVolume.as_view(), name='boot-volume'),