def server_sizing(repeats: int = 20): model_dist = AlibabaModelDist(20, 4) pretty_printer, server_scales = PrettyPrinter(), {} for mean_storage, mean_computation, mean_bandwidth in ((400, 50, 120), (400, 60, 150), (400, 70, 160)): model_dist.model['server distributions'] = [{ "name": "custom", "probability": 1, "storage mean": mean_storage, "storage std": 30, "computation mean": mean_computation, "computation std": 8, "bandwidth mean": mean_bandwidth, "bandwidth std": 15 }] model_results = [] for _ in range(repeats): tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer) non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=60) algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store() reset_model(non_elastic_tasks, servers) greedy_permutations(tasks, servers, algorithm_results) model_results.append(algorithm_results) server_scales[f'{mean_storage}, {mean_computation}, {mean_bandwidth}'] = model_results with open('server_scaling_3.json', 'w') as file: json.dump(server_scales, file)
def server_resource_ratio(model_dist: ModelDist, repeats: int = 25, run_elastic: bool = True, run_non_elastic: bool = True, non_elastic_time_limit: Optional[int] = None, ratios: Iterable[int] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): """ Evaluates the difference in social welfare when the ratio of computational to bandwidth capacity is changed between different algorithms: greedy, elastic optimal, non-elastic optimal and server relaxed optimal :param model_dist: The model distribution :param repeats: The number of repeats :param run_elastic: If to run the optimal elastic solver :param run_non_elastic: If to run the optimal non-elastic solver :param non_elastic_time_limit: The non-elastic optimal time limit :param ratios: List of ratios to test """ pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('resource_ratio', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') # Generate the tasks and servers tasks, servers, non_elastic_tasks, ratio_results = generate_evaluation_model(model_dist, pretty_printer) server_total_resources = {server: server.computation_capacity + server.bandwidth_capacity for server in servers} for ratio in ratios: algorithm_results = {} # Update server capacities for server in servers: server.update_capacities(int(server_total_resources[server] * ratio), int(server_total_resources[server] * (1 - ratio))) if run_elastic: # Finds the elastic optimal solution elastic_optimal_results = elastic_optimal(tasks, servers, time_limit=None) algorithm_results[elastic_optimal_results.algorithm] = elastic_optimal_results.store(ratio=ratio) pretty_printer.pprint(algorithm_results[elastic_optimal_results.algorithm]) reset_model(tasks, servers) if run_non_elastic: # Find the non-elastic optimal solution non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=non_elastic_time_limit) algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store(ratio=ratio) non_elastic_results.pretty_print() reset_model(non_elastic_tasks, servers) # Loop over all of the greedy policies permutations greedy_permutations(tasks, servers, algorithm_results) ratio_results[f'ratio {ratio}'] = algorithm_results model_results.append(ratio_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def foreknowledge_evaluation(model_dist: AlibabaModelDist, repeats: int = 50, run_elastic: bool = False): filename = results_filename('foreknowledge', model_dist) model_results = [] for _ in range(repeats): servers = [model_dist.generate_server(server_id) for server_id in range(model_dist.num_servers)] foreknowledge_tasks, requested_tasks = model_dist.generate_foreknowledge_requested_tasks( servers, model_dist.num_tasks) non_elastic_foreknowledge_tasks = generate_non_elastic_tasks(foreknowledge_tasks) non_elastic_requested_tasks = generate_non_elastic_tasks(requested_tasks) algorithm_results = { 'model': {'foreknowledge tasks': [foreknowledge_task.save() for foreknowledge_task in foreknowledge_tasks], 'requested tasks': [requested_task.save() for requested_task in requested_tasks], 'servers': [server.save() for server in servers]}} if run_elastic: results = elastic_optimal(foreknowledge_tasks, servers, time_limit=None) algorithm_results['foreknowledge elastic optimal'] = results.store() reset_model(foreknowledge_tasks, servers) results = elastic_optimal(requested_tasks, servers, time_limit=None) algorithm_results['requested elastic optimal'] = results.store() reset_model(requested_tasks, servers) results = non_elastic_optimal(non_elastic_foreknowledge_tasks, servers, time_limit=None) algorithm_results['foreknowledge non-elastic optimal'] = results.store() reset_model(non_elastic_foreknowledge_tasks, servers) results = non_elastic_optimal(non_elastic_requested_tasks, servers, time_limit=None) algorithm_results['requested non-elastic optimal'] = results.store() reset_model(non_elastic_requested_tasks, servers) greedy_permutations(foreknowledge_tasks, servers, algorithm_results, 'foreknowledge ') greedy_permutations(requested_tasks, servers, algorithm_results, 'requested ') model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished')
def test_model_tasks(num_servers: int = 8): greedy_results, non_elastic_results = [], [] for num_tasks in range(24, 60, 4): model = SyntheticModelDist(num_tasks, num_servers) tasks, servers = model.generate_oneshot() non_elastic_tasks = [ NonElasticTask(task, SumSpeedPowResourcePriority()) for task in tasks ] greedy_results.append([ num_tasks, greedy_algorithm(tasks, servers, UtilityDeadlinePerResourcePriority(), SumResources(), SumPercentage()) ]) reset_model(tasks, servers) non_elastic_results.append( [num_tasks, non_elastic_optimal(non_elastic_tasks, servers, 3)]) def print_results(results): """ Print the results of an algorithm :param results: List of results """ print( f'Num of Tasks | Percent Tasks | Social Welfare % | Storage usage | Comp usage | Bandwidth usage' ) for task_num, result in results: # noinspection PyTypeChecker print( f' {task_num:11} | {result.percentage_tasks_allocated:^13} | ' f'{result.percentage_social_welfare:^22} | ' f'{round(np.mean(list(result.server_storage_used.values())), 3):^13} | ' f'{round(np.mean(list(result.server_computation_used.values())), 3):^10} | ' f'{round(np.mean(list(result.server_bandwidth_used.values())), 3):10}' ) print('\n\n\tGreedy algorithm') print_results(greedy_results) print('\n\tNon-elastic optimal results') print_results(non_elastic_results) print(f'\nNum of Tasks | Difference | Greedy SW | Non-elastic SW') for (num_tasks, greedy_result), (_, non_elastic_result) in zip( greedy_results, non_elastic_results): print( f' {num_tasks:11} | {non_elastic_result.social_welfare - greedy_result.social_welfare:10.3f} | ' f'{greedy_result.social_welfare:9.3f} | {non_elastic_result.social_welfare:8.3f}' )
def test_optimal_solution(): model_dist = SyntheticModelDist(num_tasks=20, num_servers=4) tasks, servers = model_dist.generate_oneshot() non_elastic_tasks = generate_non_elastic_tasks(tasks) greedy_result = greedy_algorithm(tasks, servers, UtilityDeadlinePerResourcePriority(), SumResources(), SumPercentage()) print(f'\nGreedy - {greedy_result.social_welfare}') reset_model(tasks, servers) optimal_result = elastic_optimal(tasks, servers, 5) print(f'Optimal - {optimal_result.social_welfare}') reset_model(tasks, servers) server_relaxed_result = server_relaxed_elastic_optimal(tasks, servers, 5) print(f'Server relaxed - {server_relaxed_result.social_welfare}') reset_model(tasks, servers) non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks, servers, 5) print(f'Non-elastic Optimal - {non_elastic_optimal_result.social_welfare}') reset_model(non_elastic_tasks, servers)
def greedy_evaluation(model_dist: ModelDist, repeats: int = 50, run_elastic_optimal: bool = True, run_non_elastic_optimal: bool = True, run_server_relaxed_optimal: bool = True): """ Evaluation of different greedy algorithms :param model_dist: The model distribution :param repeats: Number of model runs :param run_elastic_optimal: If to run the optimal elastic solver :param run_non_elastic_optimal: If to run the optimal non-elastic solver :param run_server_relaxed_optimal: If to run the relaxed elastic solver """ print( f'Evaluates the greedy algorithms (plus elastic, non-elastic and server relaxed optimal solutions) ' f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers' ) pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('greedy', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model( model_dist, pretty_printer) if run_elastic_optimal: # Find the optimal solution elastic_optimal_result = elastic_optimal(tasks, servers, time_limit=None) algorithm_results[elastic_optimal_result. algorithm] = elastic_optimal_result.store() elastic_optimal_result.pretty_print() reset_model(tasks, servers) if run_server_relaxed_optimal: # Find the relaxed solution relaxed_result = server_relaxed_elastic_optimal(tasks, servers, time_limit=None) algorithm_results[ relaxed_result.algorithm] = relaxed_result.store() relaxed_result.pretty_print() reset_model(tasks, servers) if run_non_elastic_optimal: # Find the non-elastic solution non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks, servers, time_limit=None) algorithm_results[non_elastic_optimal_result. algorithm] = non_elastic_optimal_result.store() non_elastic_optimal_result.pretty_print() reset_model(non_elastic_tasks, servers) # Loop over all of the greedy policies permutations greedy_permutations(tasks, servers, algorithm_results) # Add the results to the data model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')