def non_uniform_server_heuristics(model_dist: ModelDist, repeats: int = 20, time_limit: int = 2, random_repeats: int = 10, price_change_mean: int = 4, price_change_std: int = 2, initial_price_mean: int = 25, initial_price_std: int = 4): """ Evaluates the effect of the server heuristics when they are non-uniform (all server's dont use the same value) :param model_dist: The model distribution :param repeats: The number of repeats :param time_limit: The time limit for the decentralised iterative auction :param random_repeats: The number of random repeats for each model generated :param price_change_mean: The mean price change value :param price_change_std: The standard deviation of the price change value :param initial_price_mean: The mean initial change value :param initial_price_std: The standard deviation of the initial change value """ print( f'DIA non-uniform heuristic investigation with initial price mean: {initial_price_mean} and ' f'std: {initial_price_std}, price change mean: {price_change_mean} and price change std: {price_change_std}, ' f'using {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers' ) pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('dia_non_uniform_heuristic', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model( model_dist, pretty_printer) set_server_heuristics(servers, price_change=price_change_mean, initial_price=initial_price_mean) dia_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit) algorithm_results['normal'] = dia_result.store() dia_result.pretty_print() reset_model(tasks, servers) for random_repeat in range(random_repeats): for server in servers: server.price_change = max( 1, int(gauss(price_change_mean, price_change_std))) server.initial_price = max( 1, int(gauss(initial_price_mean, initial_price_std))) dia_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit) algorithm_results[f'repeat {random_repeat}'] = dia_result.store() dia_result.pretty_print() reset_model(tasks, servers) model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def dia_social_welfare_test(model_dist: ModelDist, repeat: int, repeats: int = 20): """ Evaluates the results using the optimality :param model_dist: The model distribution :param repeat: The repeat of the testing :param repeats: The number of repeats """ data = [] filename = results_filename('testing', model_dist) for _ in range(repeats): tasks, servers = model_dist.generate_oneshot() model_results = {} optimal_result = elastic_optimal(tasks, servers, 30) model_results[optimal_result.algorithm] = optimal_result.store() reset_model(tasks, servers) for pos in range(5): set_server_heuristics(servers, price_change=3, initial_price=25) dia_result = optimal_decentralised_iterative_auction( tasks, servers, 2) model_results[f'DIA {pos}'] = dia_result reset_model(tasks, servers) data.append(model_results) # Save the results to the file with open(filename, 'w') as file: json.dump(data, file)
def auction_evaluation(model_dist: ModelDist, repeats: int = 50, dia_time_limit: int = 3, run_elastic: bool = True, run_non_elastic: bool = True): """ Evaluation of different auction algorithms :param model_dist: The model distribution :param repeats: The number of repeats :param dia_time_limit: Decentralised iterative auction time limit :param run_elastic: If to run the elastic vcg auction :param run_non_elastic: If to run the non-elastic vcg auction """ print(f'Evaluates the auction algorithms (cva, dia, elastic vcg, non-elastic vcg) for {model_dist.name} model with ' f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers') pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('auctions', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer) if run_elastic: # Elastic VCG Auctions vcg_result = elastic_vcg_auction(tasks, servers, time_limit=None) algorithm_results[vcg_result.algorithm] = vcg_result.store() vcg_result.pretty_print() reset_model(tasks, servers) if run_non_elastic: # Elastic VCG auction vcg_result = non_elastic_vcg_auction(non_elastic_tasks, servers, time_limit=None) algorithm_results[vcg_result.algorithm] = vcg_result.store() vcg_result.pretty_print() reset_model(non_elastic_tasks, servers) # Decentralised Iterative auction dia_result = optimal_decentralised_iterative_auction(tasks, servers, time_limit=dia_time_limit) algorithm_results[dia_result.algorithm] = dia_result.store() dia_result.pretty_print() reset_model(tasks, servers) # Critical Value Auction for task_priority in task_priority_functions: for server_selection_policy in server_selection_functions: for resource_allocation_policy in resource_allocation_functions: critical_value_result = critical_value_auction(tasks, servers, task_priority, server_selection_policy, resource_allocation_policy) algorithm_results[critical_value_result.algorithm] = critical_value_result.store() critical_value_result.pretty_print() reset_model(tasks, servers) # Add the results to the data model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def server_resource_ratio(model_dist: ModelDist, repeats: int = 25, run_elastic: bool = True, run_non_elastic: bool = True, non_elastic_time_limit: Optional[int] = None, ratios: Iterable[int] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)): """ Evaluates the difference in social welfare when the ratio of computational to bandwidth capacity is changed between different algorithms: greedy, elastic optimal, non-elastic optimal and server relaxed optimal :param model_dist: The model distribution :param repeats: The number of repeats :param run_elastic: If to run the optimal elastic solver :param run_non_elastic: If to run the optimal non-elastic solver :param non_elastic_time_limit: The non-elastic optimal time limit :param ratios: List of ratios to test """ pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('resource_ratio', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') # Generate the tasks and servers tasks, servers, non_elastic_tasks, ratio_results = generate_evaluation_model(model_dist, pretty_printer) server_total_resources = {server: server.computation_capacity + server.bandwidth_capacity for server in servers} for ratio in ratios: algorithm_results = {} # Update server capacities for server in servers: server.update_capacities(int(server_total_resources[server] * ratio), int(server_total_resources[server] * (1 - ratio))) if run_elastic: # Finds the elastic optimal solution elastic_optimal_results = elastic_optimal(tasks, servers, time_limit=None) algorithm_results[elastic_optimal_results.algorithm] = elastic_optimal_results.store(ratio=ratio) pretty_printer.pprint(algorithm_results[elastic_optimal_results.algorithm]) reset_model(tasks, servers) if run_non_elastic: # Find the non-elastic optimal solution non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=non_elastic_time_limit) algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store(ratio=ratio) non_elastic_results.pretty_print() reset_model(non_elastic_tasks, servers) # Loop over all of the greedy policies permutations greedy_permutations(tasks, servers, algorithm_results) ratio_results[f'ratio {ratio}'] = algorithm_results model_results.append(ratio_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def dia_heuristic_grid_search(model_dist: ModelDist, repeats: int = 50, time_limit: int = 4, initial_prices: Iterable[int] = (0, 4, 8, 12), price_changes: Iterable[int] = (1, 2, 4, 6)): """ Evaluates the difference in results with the decentralised iterative auction uses different price changes and initial price variables :param model_dist: The model distribution :param repeats: The number of repeats :param time_limit: The time limit for the DIA Auction :param initial_prices: The initial price for auctions :param price_changes: The price change of the servers """ print( f'DIA Heuristic grid search with initial prices: {initial_prices}, price changes: {price_changes}' f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers' ) pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('dia_heuristic_grid_search', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model( model_dist, pretty_printer) for initial_price in initial_prices: for price_change in price_changes: set_server_heuristics(servers, price_change=price_change, initial_price=initial_price) results = optimal_decentralised_iterative_auction( tasks, servers, time_limit) algorithm_results[ f'IP: {initial_price}, PC: {price_change}'] = results.store( **{ 'initial price': initial_price, 'price change': price_change }) results.pretty_print() reset_model(tasks, servers) model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def dia_repeat(model_dist: ModelDist, repeats: int = 25, auction_repeats: int = 5, time_limit: int = 2, price_change: int = 3, initial_price: int = 25): """ Tests the Decentralised iterative auction by repeating the auction to see if the same local / global maxima is achieved :param model_dist: The model distribution :param repeats: The number of repeats :param auction_repeats: The number of auction repeats :param time_limit: The auction time limit :param price_change: Price change :param initial_price: The initial price """ print(f'Evaluation of DIA by repeating the auction') pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('repeat_dia', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, repeat_results = generate_evaluation_model( model_dist, pretty_printer) set_server_heuristics(servers, price_change=price_change, initial_price=initial_price) for auction_repeat in range(auction_repeats): reset_model(tasks, servers) auction_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit=time_limit) auction_result.pretty_print() repeat_results[f'repeat {auction_repeat}'] = auction_result.store() model_results.append(repeat_results) # Save all of the results to a file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def foreknowledge_evaluation(model_dist: AlibabaModelDist, repeats: int = 50, run_elastic: bool = False): filename = results_filename('foreknowledge', model_dist) model_results = [] for _ in range(repeats): servers = [model_dist.generate_server(server_id) for server_id in range(model_dist.num_servers)] foreknowledge_tasks, requested_tasks = model_dist.generate_foreknowledge_requested_tasks( servers, model_dist.num_tasks) non_elastic_foreknowledge_tasks = generate_non_elastic_tasks(foreknowledge_tasks) non_elastic_requested_tasks = generate_non_elastic_tasks(requested_tasks) algorithm_results = { 'model': {'foreknowledge tasks': [foreknowledge_task.save() for foreknowledge_task in foreknowledge_tasks], 'requested tasks': [requested_task.save() for requested_task in requested_tasks], 'servers': [server.save() for server in servers]}} if run_elastic: results = elastic_optimal(foreknowledge_tasks, servers, time_limit=None) algorithm_results['foreknowledge elastic optimal'] = results.store() reset_model(foreknowledge_tasks, servers) results = elastic_optimal(requested_tasks, servers, time_limit=None) algorithm_results['requested elastic optimal'] = results.store() reset_model(requested_tasks, servers) results = non_elastic_optimal(non_elastic_foreknowledge_tasks, servers, time_limit=None) algorithm_results['foreknowledge non-elastic optimal'] = results.store() reset_model(non_elastic_foreknowledge_tasks, servers) results = non_elastic_optimal(non_elastic_requested_tasks, servers, time_limit=None) algorithm_results['requested non-elastic optimal'] = results.store() reset_model(non_elastic_requested_tasks, servers) greedy_permutations(foreknowledge_tasks, servers, algorithm_results, 'foreknowledge ') greedy_permutations(requested_tasks, servers, algorithm_results, 'requested ') model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished')
def lower_bound_testing(model_dist: ModelDist, repeats: int = 50): """ Testing is to compare the lower bound of the greedy to the best greedy algorithm :param model_dist: Model distribution :param repeats: Repeat number """ print(f'Evaluates the greedy algorithm for {model_dist.name} model with ' f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers') pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('lower_bound', model_dist) lb_task_functions = task_priority_functions + [ValuePriority()] for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model( model_dist, pretty_printer) # Loop over all of the greedy policies permutations for task_priority in lb_task_functions: for server_selection in server_selection_functions: for resource_allocation in resource_allocation_functions: greedy_result = greedy_algorithm(tasks, servers, task_priority, server_selection, resource_allocation) algorithm_results[ greedy_result.algorithm] = greedy_result.store() greedy_result.pretty_print() reset_model(tasks, servers) # Add the results to the data model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def algorithm_sizes(model_dist: ModelDist, repeats: int = 30): """ Runs the greedy algorithm for a range of model sizes :param model_dist: The model distributions :param repeats: The number of repeats for each model size """ print('Evaluates the greedy algorithm over a range of model sizes') pretty_printer, scale_results = PrettyPrinter(), {} filename = results_filename('model_sizes', model_dist) for num_tasks, num_servers in ((10, 2), (15, 3), (20, 4), (30, 6), (40, 8), (80, 16), (160, 32)): print( f'Numbers of tasks: {num_tasks}, number of servers: {num_servers}') model_dist.num_tasks = num_tasks model_dist.num_servers = num_servers model_results = [] for _ in range(repeats): tasks, servers = model_dist.generate_oneshot() results = greedy_algorithm( tasks, servers, task_priority=UtilityDeadlinePerResourcePriority( ResourceSumPriority()), server_selection=ProductResources(), resource_allocation=SumPowPercentage()) model_results.append(results.store()) scale_results[ f'{num_tasks} tasks, {num_servers} servers'] = model_results # Save the results to the file with open(filename, 'w') as file: json.dump(scale_results, file)
def full_task_mutation(model_dist: ModelDist, repeats: int = 25, time_limit: int = 2, price_change: int = 3, initial_price: int = 25, model_mutations: int = 15, mutate_percent: float = 0.15): """ Evaluates the effectiveness of a task mutations on if the mutated task is allocated and if so the difference in price between the mutated and normal task :param model_dist: Model distribution to generate tasks and servers :param repeats: The number of model repeats :param time_limit: The time limit for the decentralised iterative auction results :param price_change: The price change of the servers :param initial_price: The initial price of tasks for the servers :param model_mutations: The number of model mutations to attempt :param mutate_percent: The percentage of the model that it can be mutated by """ print( f'Evaluates the possibility of tasks mutating resulting in a lower price' ) pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('task_mutation', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers = model_dist.generate_oneshot() set_server_heuristics(servers, price_change=price_change, initial_price=initial_price) mutation_results = { 'model': { 'tasks': [task.save() for task in tasks], 'servers': [server.save() for server in servers] } } pretty_printer.pprint(mutation_results) # Calculate the results without any mutation no_mutation_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit=time_limit) no_mutation_result.pretty_print() mutation_results['no mutation'] = no_mutation_result.store() # Save the task prices and server revenues task_prices = {task: task.price for task in tasks} allocated_tasks = { task: task.running_server is not None for task in tasks } to_mutate_tasks = [ task for task, allocated in allocated_tasks.items() ] # if allocated todo future testing reset_model(tasks, servers) # Loop each time mutating a task or server and find the auction results and compare to the unmutated result for model_mutation in range(min(model_mutations, len(to_mutate_tasks))): # Choice a random task and mutate it task: ElasticTask = to_mutate_tasks.pop( rnd.randint(0, len(to_mutate_tasks) - 1)) mutant_task = task.mutate(mutate_percent) # Replace the task with the mutant task in the task list list_item_replacement(tasks, task, mutant_task) assert mutant_task in tasks assert task not in tasks # Find the result with the mutated task mutant_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit) mutation_results[f'mutation {model_mutation}'] = mutant_result.store( **{ 'task price': task_prices[task], 'task allocated': allocated_tasks[task], 'mutant price': mutant_task.price, 'mutant task allocated': mutant_task.running_server is not None, 'mutant task name': task.name, 'mutant task deadline': mutant_task.deadline, 'mutant task value': mutant_task.value, 'mutant task storage': mutant_task.required_storage, 'mutant task computation': mutant_task.required_computation, 'mutant task results data': mutant_task.required_results_data, }) pretty_printer.pprint( mutation_results[f'mutation {model_mutation}']) # Replace the mutant task with the task in the task list list_item_replacement(tasks, mutant_task, task) assert mutant_task not in tasks assert task in tasks # Append the results to the data list model_results.append(mutation_results) # Save all of the results to a file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def value_only_mutation(model_dist: ModelDist, repeats: int = 25, time_limit: int = 2, price_change: int = 3, initial_price: int = 25, model_mutations: int = 15, value_mutations: Iterable[int] = (1, 2, 3, 4)): """ Evaluates the value only mutation of tasks :param model_dist: Model distribution to generate tasks and servers :param repeats: The number of model repeats :param time_limit: DIA time limit :param price_change: Server price change :param initial_price: Server initial price :param model_mutations: The number of model mutation attempts :param value_mutations: The value difference to do testing with """ print(f'Evaluates the value mutation of tasks') pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('value_mutation', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers = model_dist.generate_oneshot() set_server_heuristics(servers, price_change=price_change, initial_price=initial_price) mutation_results = { 'model': { 'tasks': [task.save() for task in tasks], 'servers': [server.save() for server in servers] } } pretty_printer.pprint(mutation_results) # Calculate the results without any mutation no_mutation_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit=time_limit) no_mutation_result.pretty_print() mutation_results['no mutation'] = no_mutation_result.store() # Save the task prices and server revenues to_mutate_tasks = tasks[:] reset_model(tasks, servers) # Loop each time mutating a task or server and find the auction results and compare to the unmutated result for model_mutation in range(min(model_mutations, len(to_mutate_tasks))): # Choice a random task and mutate it task: ElasticTask = to_mutate_tasks.pop( rnd.randint(0, len(to_mutate_tasks) - 1)) task_value = task.value task_mutation_results = {} for value in value_mutations: task.value = task_value - value # Find the result with the mutated task mutant_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit) task_mutation_results[f'value {value}'] = mutant_result.store( **{ 'price': task.price, 'allocated': task.running_server is not None, 'value': task.value }) pretty_printer.pprint(task_mutation_results[f'value {value}']) reset_model(tasks, servers) task.value = task_value mutation_results[f'task {task.name}'] = task_mutation_results # Append the results to the data list model_results.append(mutation_results) # Save all of the results to a file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def mutation_grid_search(model_dist: ModelDist, percent: float = 0.10, time_limit: int = 3, price_change: int = 3, initial_price: int = 30): """ Attempts a grid search version of the mutation testing above where a single task is mutated in every possible way within a particular way to keep that the random testing is not missing anything :param model_dist: The model distribution to generate servers and tasks :param percent: The percentage by which mutations can occur within :param time_limit: The time limit for the optimal decentralised iterative auction :param price_change: The price change of the servers :param initial_price: The initial price for the servers """ print(f'Completes a grid search of a task known to achieve better results') filename = results_filename('mutation_grid_search', model_dist) positive_percent, negative_percent = 1 + percent, 1 - percent # Generate the tasks and servers tasks, servers = model_dist.generate_oneshot() set_server_heuristics(servers, price_change=price_change, initial_price=initial_price) # The mutation results mutation_results = { 'model': { 'tasks': [task.save() for task in tasks], 'servers': [server.save() for server in servers] } } no_mutation_dia = optimal_decentralised_iterative_auction( tasks, servers, time_limit=time_limit) no_mutation_dia.pretty_print() task = next(task for task in tasks if task.running_server is not None) mutation_results['no mutation'] = no_mutation_dia.store( **{ 'allocated': task.running_server is not None, 'task price': task.price }) # The original task not mutated that is randomly selected (given the tasks are already randomly generated) permutations = ((int(task.required_storage * positive_percent) + 1) - task.required_storage) * \ ((int(task.required_computation * positive_percent) + 1) - task.required_computation) * \ ((int(task.required_results_data * positive_percent) + 1) - task.required_results_data) * \ ((task.deadline + 1) - int(task.deadline * negative_percent)) print( f'Number of permutations: {permutations}, original solve time: {no_mutation_dia.solve_time}, ' f'estimated time: {round(permutations * no_mutation_dia.solve_time / 60, 1)} minutes' ) reset_model(tasks, servers) mutation_pos = 0 # Loop over all of the permutations that the task requirement resources have up to the mutate percentage for required_storage in range( task.required_storage, int(task.required_storage * positive_percent) + 1): for required_computation in range( task.required_computation, int(task.required_computation * positive_percent) + 1): for required_results_data in range( task.required_results_data, int(task.required_results_data * positive_percent) + 1): for deadline in range(int(task.deadline * negative_percent), task.deadline + 1): # Create the new mutated task and create new tasks list with the mutant task replacing the task mutant_task = ElasticTask( f'mutated {task.name}', required_storage=required_storage, required_computation=required_computation, required_results_data=required_results_data, deadline=deadline, value=task.value) tasks.append(mutant_task) # Calculate the task price with the mutated task mutated_result = optimal_decentralised_iterative_auction( tasks, servers, time_limit) mutated_result.pretty_print() mutation_results[ f'Mutation {mutation_pos}'] = mutated_result.store( **{ 'mutated task': task.name, 'task price': mutant_task.price, 'required storage': required_storage, 'required computation': required_computation, 'required results data': required_results_data, 'deadline': deadline, 'allocated': mutant_task.running_server is not None }) mutation_pos += 1 # Remove the mutant task and read the task to the list of tasks and reset the model tasks.remove(mutant_task) reset_model(tasks, servers) # Save all of the results to a file with open(filename, 'w') as file: json.dump(mutation_results, file) print('Finished running')
def online_evaluation(model_dist: ModelDist, repeats: int = 20, time_steps: int = 200, mean_arrival_rate: float = 1, std_arrival_rate: float = 2, task_priority=UtilityDeadlinePerResourcePriority( ResourceSumPriority()), server_selection=ProductResources(), resource_allocation=SumPowPercentage()): """ Evaluates the batch online :param model_dist: The model distribution :param repeats: The number of repeats :param time_steps: Total number of time steps :param mean_arrival_rate: Mean arrival rate of tasks :param std_arrival_rate: Standard deviation arrival rate of tasks :param task_priority: The task prioritisation function :param server_selection: Server selection policy :param resource_allocation: Resource allocation policy """ print( f'Evaluates difference in performance between batch and online algorithm for {model_dist.name} model with ' f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers') print( f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate} with std: {std_arrival_rate}' ) model_results = [] filename = results_filename('online_resource_allocation', model_dist) greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, {resource_allocation.name}' batch_length = 1 for repeat in range(repeats): print(f'\nRepeat: {repeat}') # Generate the tasks and servers tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate, std_arrival_rate) algorithm_results = { 'model': { 'tasks': [task.save() for task in tasks], 'servers': [server.save() for server in servers] } } non_elastic_tasks = generate_non_elastic_tasks(tasks) valid_elastic_tasks = [ task for task in tasks if batch_length < task.deadline ] batched_elastic_tasks = generate_batch_tasks(valid_elastic_tasks, batch_length, time_steps) valid_non_elastic_tasks = [ task for task in non_elastic_tasks if batch_length < task.deadline ] batched_non_elastic_tasks = generate_batch_tasks( valid_non_elastic_tasks, batch_length, time_steps) # Flatten the tasks flattened_elastic_tasks = [ task for tasks in batched_elastic_tasks for task in tasks ] flattened_non_elastic_tasks = [ task for tasks in batched_non_elastic_tasks for task in tasks ] elastic_optimal_result = online_batch_solver(batched_elastic_tasks, servers, batch_length, 'Elastic Optimal', elastic_optimal_solver, time_limit=None) algorithm_results[ elastic_optimal_result.algorithm] = elastic_optimal_result.store() reset_model(flattened_elastic_tasks, servers) non_elastic_optimal_result = online_batch_solver( batched_non_elastic_tasks, servers, batch_length, 'Non-elastic Optimal', non_elastic_optimal_solver, time_limit=None) algorithm_results[non_elastic_optimal_result. algorithm] = non_elastic_optimal_result.store() reset_model(flattened_non_elastic_tasks, servers) # Loop over all of the greedy policies permutations greedy_result = online_batch_solver( batched_elastic_tasks, servers, batch_length, greedy_name, greedy_algorithm, task_priority=task_priority, server_selection=server_selection, resource_allocation=resource_allocation) algorithm_results[greedy_result.algorithm] = greedy_result.store() reset_model(flattened_elastic_tasks, servers) # Add the results to the data model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def greedy_permutations(model_dist: ModelDist, repeats: int = 20, time_steps: int = 1000, mean_arrival_rate: float = 2, std_arrival_rate: float = 2, batch_length: int = 1): """ Evaluates the performance between greedy algorithms with different module functions :param model_dist: The model distribution used to test with :param repeats: The number of testing repeats that are computed :param time_steps: The total number of time steps for tasks to arrive at :param mean_arrival_rate: The mean arrival rate of tasks :param std_arrival_rate: The standard deviation of the arrival rate for the task :param batch_length: The batch length of the testing setting """ print(f'Evaluates performance between different greedy permutations') print( f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate}, std: {std_arrival_rate}' ) model_results = [] filename = results_filename('online_greedy_permutations', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') # Generate the tasks and servers tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate, std_arrival_rate) algorithm_results = { 'model': { 'tasks': [task.save() for task in tasks], 'servers': [server.save() for server in servers] } } valid_tasks = [task for task in tasks if batch_length < task.deadline] batched_tasks = generate_batch_tasks(valid_tasks, batch_length, time_steps) flattened_tasks = [task for tasks in batched_tasks for task in tasks] for task_priority, server_selection, resource_allocation in [ (UtilityDeadlinePerResourcePriority(ResourceSumPriority()), ProductResources(), SumPowPercentage()), (UtilityDeadlinePerResourcePriority(ResourceSumPriority()), ProductResources(True), SumPowPercentage()), (UtilityDeadlinePerResourcePriority(ResourceProductPriority()), ProductResources(), SumPowPercentage()), (UtilityDeadlinePerResourcePriority(ResourceProductPriority()), ProductResources(True), SumPowPercentage()), (ValuePriority(), ProductResources(), SumPowPercentage()), (ValuePriority(), ProductResources(), SumPowPercentage()), (UtilityDeadlinePerResourcePriority(ResourceSumPriority()), SumResources(), SumPowPercentage()), (UtilityDeadlinePerResourcePriority(ResourceSumPriority()), SumResources(True), SumPowPercentage()) ]: greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, ' \ f'{resource_allocation.name}' greedy_result = online_batch_solver( batched_tasks, servers, batch_length, greedy_name, greedy_algorithm, task_priority=task_priority, server_selection=server_selection, resource_allocation=resource_allocation) algorithm_results[greedy_result.algorithm] = greedy_result.store() print(greedy_name) reset_model(flattened_tasks, servers) model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')
def greedy_evaluation(model_dist: ModelDist, repeats: int = 50, run_elastic_optimal: bool = True, run_non_elastic_optimal: bool = True, run_server_relaxed_optimal: bool = True): """ Evaluation of different greedy algorithms :param model_dist: The model distribution :param repeats: Number of model runs :param run_elastic_optimal: If to run the optimal elastic solver :param run_non_elastic_optimal: If to run the optimal non-elastic solver :param run_server_relaxed_optimal: If to run the relaxed elastic solver """ print( f'Evaluates the greedy algorithms (plus elastic, non-elastic and server relaxed optimal solutions) ' f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers' ) pretty_printer, model_results = PrettyPrinter(), [] filename = results_filename('greedy', model_dist) for repeat in range(repeats): print(f'\nRepeat: {repeat}') tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model( model_dist, pretty_printer) if run_elastic_optimal: # Find the optimal solution elastic_optimal_result = elastic_optimal(tasks, servers, time_limit=None) algorithm_results[elastic_optimal_result. algorithm] = elastic_optimal_result.store() elastic_optimal_result.pretty_print() reset_model(tasks, servers) if run_server_relaxed_optimal: # Find the relaxed solution relaxed_result = server_relaxed_elastic_optimal(tasks, servers, time_limit=None) algorithm_results[ relaxed_result.algorithm] = relaxed_result.store() relaxed_result.pretty_print() reset_model(tasks, servers) if run_non_elastic_optimal: # Find the non-elastic solution non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks, servers, time_limit=None) algorithm_results[non_elastic_optimal_result. algorithm] = non_elastic_optimal_result.store() non_elastic_optimal_result.pretty_print() reset_model(non_elastic_tasks, servers) # Loop over all of the greedy policies permutations greedy_permutations(tasks, servers, algorithm_results) # Add the results to the data model_results.append(algorithm_results) # Save the results to the file with open(filename, 'w') as file: json.dump(model_results, file) print('Finished running')