def allocate_tasks(tasks: List[ElasticTask], servers: List[Server], server_selection_policy: ServerSelection, resource_allocation_policy: ResourceAllocation, debug_allocation: bool = False): """ Allocate the tasks to the servers based on the server selection policy and resource allocation policies :param tasks: The list of tasks :param servers: The list of servers :param server_selection_policy: The server selection policy :param resource_allocation_policy: The resource allocation policy :param debug_allocation: The task allocation debug """ # Loop through all of the task in order of values for task in tasks: # Allocate the server using the allocation policy function allocated_server = server_selection_policy.select(task, servers) # If an optimal server is found then calculate the bid allocation function if allocated_server: s, w, r = resource_allocation_policy.allocate( task, allocated_server) server_task_allocation(allocated_server, task, s, w, r) if debug_allocation: print_task_allocation(tasks)
def allocate_task(new_task, task_price, server, unallocated_tasks, task_speeds): """ Allocates a task to a server :param new_task: The new task to allocate to the server :param task_price: The price for the task :param server: The server to be allocated to the server :param unallocated_tasks: List of unallocated tasks :param task_speeds: Dictionary of task speeds """ server.reset_allocations() # For each of the task, if the task is allocated then allocate the task or reset the task new_task.price = task_price for task, (loading, compute, sending, allocated) in task_speeds.items(): if allocated: task.reset_allocation(forget_price=False) server_task_allocation(server, task, loading, compute, sending) else: task.reset_allocation() unallocated_tasks.append(task)
def greedy_task_price(new_task: ElasticTask, server: Server, price_density: PriceDensity, resource_allocation_policy: ResourceAllocation, debug_revenue: bool = False): """ Calculates the task price using greedy algorithm :param new_task: The new task :param server: Server :param price_density: Price density function :param resource_allocation_policy: Resource allocation policy :param debug_revenue: If to debug the revenue :return: Tuple of task price and possible speeds """ assert new_task.price == 0 current_speeds = {task: (task.loading_speed, task.compute_speed, task.sending_speed) for task in server.allocated_tasks} tasks = server.allocated_tasks[:] server_revenue = server.revenue reset_model(server.allocated_tasks, (server,), forget_prices=False) s, w, r = resource_allocation_policy.allocate(new_task, server) server_task_allocation(server, new_task, s, w, r) for task in sorted(tasks, key=lambda task: price_density.evaluate(task), reverse=True): if server.can_run(task): s, w, r = resource_allocation_policy.allocate(task, server) server_task_allocation(server, task, s, w, r) task_price = max(server_revenue - server.revenue + server.price_change, server.initial_price) debug(f'Original revenue: {server_revenue}, new revenue: {server.revenue}, price change: {server.price_change}', debug_revenue) possible_speeds = { task: (task.loading_speed, task.compute_speed, task.sending_speed, task.running_server is not None) for task in tasks + [new_task]} reset_model(current_speeds.keys(), (server,), forget_prices=False) new_task.reset_allocation() for task, (loading, compute, sending) in current_speeds.items(): server_task_allocation(server, task, loading, compute, sending) return task_price, possible_speeds
def elastic_optimal_solver(tasks: List[ElasticTask], servers: List[Server], time_limit: Optional[int]): """ Elastic Optimal algorithm solver using cplex :param tasks: List of tasks :param servers: List of servers :param time_limit: Time limit for cplex :return: the results of the algorithm """ assert time_limit is None or 0 < time_limit, f'Time limit: {time_limit}' model = CpoModel('Elastic Optimal') # The resource speed variables and the allocation variables loading_speeds, compute_speeds, sending_speeds, task_allocation = {}, {}, {}, {} # Loop over each task to allocate the variables and add the deadline constraints max_bandwidth = max(server.bandwidth_capacity for server in servers) max_computation = max(server.computation_capacity for server in servers) runnable_tasks = [ task for task in tasks if any( server.can_run_empty(task) for server in servers) ] for task in runnable_tasks: # Check if the task can be run on any server even if empty loading_speeds[task] = model.integer_var( min=1, max=max_bandwidth - 1, name=f'{task.name} loading speed') compute_speeds[task] = model.integer_var( min=1, max=max_computation, name=f'{task.name} compute speed') sending_speeds[task] = model.integer_var( min=1, max=max_bandwidth - 1, name=f'{task.name} sending speed') model.add((task.required_storage / loading_speeds[task]) + (task.required_computation / compute_speeds[task]) + (task.required_results_data / sending_speeds[task]) <= task.deadline) # The task allocation variables and add the allocation constraint for server in servers: task_allocation[(task, server)] = model.binary_var( name=f'{task.name} Task - {server.name} Server') model.add( sum(task_allocation[(task, server)] for server in servers) <= 1) # For each server, add the resource constraint for server in servers: model.add( sum(task.required_storage * task_allocation[(task, server)] for task in runnable_tasks) <= server.available_storage) model.add( sum(compute_speeds[task] * task_allocation[(task, server)] for task in runnable_tasks) <= server.available_computation) model.add( sum((loading_speeds[task] + sending_speeds[task]) * task_allocation[(task, server)] for task in runnable_tasks) <= server.available_bandwidth) # The optimisation statement model.maximize( sum(task.value * task_allocation[(task, server)] for task in runnable_tasks for server in servers)) # Solve the cplex model with time limit try: model_solution: CpoSolveResult = model.solve(log_output=None, TimeLimit=time_limit) except CpoSolverException as e: print(f'Solver Exception: ', e) return None # Check that it is solved if model_solution.get_solve_status() != SOLVE_STATUS_FEASIBLE and \ model_solution.get_solve_status() != SOLVE_STATUS_OPTIMAL: print(f'Optimal solver failed', file=sys.stderr) print_model_solution(model_solution) print_model(tasks, servers) return None # Generate the allocation of the tasks and servers try: for task in runnable_tasks: for server in servers: if model_solution.get_value(task_allocation[(task, server)]): server_task_allocation( server, task, model_solution.get_value(loading_speeds[task]), model_solution.get_value(compute_speeds[task]), model_solution.get_value(sending_speeds[task])) break if abs(model_solution.get_objective_values()[0] - sum(t.value for t in tasks if t.running_server)) > 0.1: print( 'Elastic optimal different objective values - ' f'cplex: {model_solution.get_objective_values()[0]} and ' f'running task values: {sum(task.value for task in tasks if task.running_server)}', file=sys.stderr) return model_solution except (AssertionError, KeyError) as e: print('Error: ', e, file=sys.stderr) print_model_solution(model_solution)
def non_elastic_optimal_solver(tasks: List[NonElasticTask], servers: List[Server], time_limit: Optional[int]): """ Finds the optimal solution :param tasks: A list of tasks :param servers: A list of servers :param time_limit: The time limit to solve with :return: The results """ assert time_limit is None or 0 < time_limit, f'Time limit: {time_limit}' model = CpoModel('vcg') # As no resource speeds then only assign binary variables for the allocation allocations = { (task, server): model.binary_var(name=f'{task.name} task {server.name} server') for task in tasks for server in servers } # Allocation constraint for task in tasks: model.add(sum(allocations[(task, server)] for server in servers) <= 1) # Server resource speeds constraints for server in servers: model.add( sum(task.required_storage * allocations[(task, server)] for task in tasks) <= server.available_storage) model.add( sum(task.compute_speed * allocations[(task, server)] for task in tasks) <= server.available_computation) model.add( sum((task.loading_speed + task.sending_speed) * allocations[(task, server)] for task in tasks) <= server.available_bandwidth) # Optimisation problem model.maximize( sum(task.value * allocations[(task, server)] for task in tasks for server in servers)) # Solve the cplex model with time limit model_solution = model.solve(log_output=None, TimeLimit=time_limit) # Check that the model is solved if model_solution.get_solve_status() != SOLVE_STATUS_FEASIBLE and \ model_solution.get_solve_status() != SOLVE_STATUS_OPTIMAL: print('Non-elastic optimal failure', file=sys.stderr) print_model_solution(model_solution) return None # Allocate all of the tasks to the servers try: for task in tasks: for server in servers: if model_solution.get_value(allocations[(task, server)]): server_task_allocation(server, task, task.loading_speed, task.compute_speed, task.sending_speed) break if abs(model_solution.get_objective_values()[0] - sum(t.value for t in tasks if t.running_server)) > 0.1: print( 'Non-elastic optimal different objective values - ' f'cplex: {model_solution.get_objective_values()[0]} and ' f'running task values: {sum(task.value for task in tasks if task.running_server)}', file=sys.stderr) except (KeyError, AssertionError) as e: print('Assertion error in non-elastic optimal algorithm: ', e, file=sys.stderr) print_model_solution(model_solution) return None return model_solution
def critical_value_auction(tasks: List[ElasticTask], servers: List[Server], value_density: TaskPriority, server_selection_policy: ServerSelection, resource_allocation_policy: ResourceAllocation, debug_initial_allocation: bool = False, debug_critical_value: bool = False) -> Result: """ Run the Critical value auction :param tasks: List of tasks :param servers: List of servers :param value_density: Value density function :param server_selection_policy: Server selection function :param resource_allocation_policy: Resource allocation function :param debug_initial_allocation: If to debug the initial allocation :param debug_critical_value: If to debug the critical value :return: The results from the auction """ start_time = time() valued_tasks: Dict[ElasticTask, float] = { task: value_density.evaluate(task) for task in tasks } ranked_tasks: List[ElasticTask] = sorted(valued_tasks, key=lambda j: valued_tasks[j], reverse=True) # Runs the greedy algorithm allocate_tasks(ranked_tasks, servers, server_selection_policy, resource_allocation_policy) allocation_data: Dict[ElasticTask, Tuple[int, int, int, Server]] = { task: (task.loading_speed, task.compute_speed, task.sending_speed, task.running_server) for task in ranked_tasks if task.running_server } if debug_initial_allocation: max_name_len = max(len(task.name) for task in tasks) print(f"{'Task':<{max_name_len}} | s | w | r | server") for task, (s, w, r, server) in allocation_data.items(): print(f'{task:<{max_name_len}}|{s:3f}|{w:3f}|{r:3f}|{server.name}') reset_model(tasks, servers) # Loop through each task allocated and find the critical value for the task for critical_task in allocation_data.keys(): # Remove the task from the ranked tasks and save the original position critical_pos = ranked_tasks.index(critical_task) ranked_tasks.remove(critical_task) # Loop though the tasks in order checking if the task can be allocated at any point for task_pos, task in enumerate(ranked_tasks): # If any of the servers can allocate the critical task then allocate the current task to a server if any(server.can_run(critical_task) for server in servers): server = server_selection_policy.select(task, servers) if server: # There may not be a server that can allocate the task s, w, r = resource_allocation_policy.allocate(task, server) server_task_allocation(server, task, s, w, r) else: # If critical task isn't able to be allocated therefore the last task's density is found # and the inverse of the value density is calculated with the last task's density. # If the task can always run then the price is zero, the default price so no changes need to be made critical_task_density = valued_tasks[ranked_tasks[task_pos - 1]] critical_task.price = round( value_density.inverse(critical_task, critical_task_density), 3) break debug( f'{critical_task.name} Task critical value: {critical_task.price:.3f}', debug_critical_value) # Read the task back into the ranked task in its original position and reset the model but not forgetting the # new critical task's price ranked_tasks.insert(critical_pos, critical_task) reset_model(tasks, servers, forget_prices=False) # Allocate the tasks and set the price to the critical value for task, (s, w, r, server) in allocation_data.items(): server_task_allocation(server, task, s, w, r) algorithm_name = f'Critical Value Auction {value_density.name}, ' \ f'{server_selection_policy.name}, {resource_allocation_policy.name}' return Result(algorithm_name, tasks, servers, time() - start_time, is_auction=True, **{ 'value density': value_density.name, 'server selection': server_selection_policy.name, 'resource allocation': resource_allocation_policy.name })
def test_greedy_task_price(): print() model = SyntheticModelDist(20, 3) tasks, servers = model.generate_oneshot() server = servers[0] resource_allocation_policy = SumPercentage() for _ in range(10): task = tasks.pop(rnd.randint(0, len(tasks) - 1)) if server.can_run(task): s, w, r = resource_allocation_policy.allocate(task, server) server_task_allocation(server, task, s, w, r, price=rnd.randint(1, 10)) copy_tasks = [copy(task) for task in server.allocated_tasks] copy_server = copy(server) print( f'Server revenue: {server.revenue} - ' f'Task prices : {" ".join([str(task.price) for task in server.allocated_tasks])}' ) new_task = tasks.pop(0) task_price, speeds = greedy_task_price(new_task, server, PriceResourcePerDeadline(), resource_allocation_policy, debug_revenue=True) print(f'Task Price: {task_price}') assert len(copy_tasks) == len(server.allocated_tasks) assert [ task.loading_speed == copy_task.loading_speed and task.compute_speed == copy_task.compute_speed and task.sending_speed == copy_task.sending_speed and task.price == copy_task.price and task.name == copy_task.name and task.value == copy_task.value for copy_task, task in zip(copy_tasks, server.allocated_tasks) ] assert server.revenue == copy_server.revenue and server.available_storage == copy_server.available_storage and \ server.available_computation == copy_server.available_computation and \ server.available_bandwidth == copy_server.available_bandwidth unallocated_tasks = [] allocate_task(new_task, task_price, server, unallocated_tasks, speeds) assert copy_server.revenue + 1 == server.revenue assert new_task.price == task_price assert all(task.loading_speed == 0 and task.compute_speed == 0 and task.sending_speed == 0 and task.price == 0 for task in unallocated_tasks) for task in server.allocated_tasks: copy_task = next( (copy_task for copy_task in copy_tasks if copy_task.name == task.name), None) if copy_task: assert task.loading_speed == copy_task.loading_speed and task.compute_speed == copy_task.compute_speed and \ task.sending_speed == copy_task.sending_speed and task.price == copy_task.price and \ task.value == copy_task.value and task.name == copy_task.name else: assert task.loading_speed == new_task.loading_speed and task.compute_speed == new_task.compute_speed and \ task.sending_speed == new_task.sending_speed and task.price == new_task.price and \ task.value == new_task.value and task.name == new_task.name
def vcg_solver(tasks: List[ElasticTask], servers: List[Server], solver: Callable, debug_running: bool = False) -> Optional[CpoSolveResult]: """ VCG auction solver :param tasks: List of tasks :param servers: List of servers :param solver: Solver to find solution :param debug_running: If to debug the running algorithm :return: Total solve time """ # Price information task_prices: Dict[ElasticTask, float] = {} # Find the optimal solution debug('Running optimal solution', debug_running) optimal_results = solver(tasks, servers) if optimal_results is None: print(f'Optimal solver failed') return None optimal_social_welfare = sum(task.value for task in tasks if task.running_server) debug(f'Optimal social welfare: {optimal_social_welfare}', debug_running) # Save the task and server information from the optimal solution allocated_tasks = [task for task in tasks if task.running_server] task_allocation: Dict[ElasticTask, Tuple[int, int, int, Server]] = { task: (task.loading_speed, task.compute_speed, task.sending_speed, task.running_server) for task in allocated_tasks } debug( f"Allocated tasks: {', '.join([task.name for task in allocated_tasks])}", debug_running) # For each allocated task, find the sum of values if the task doesnt exist for task in allocated_tasks: # Reset the model and remove the task from the task list reset_model(tasks, servers) tasks_prime = list_copy_remove(tasks, task) # Find the optimal solution where the task doesnt exist debug(f'Solving for without task {task.name}', debug_running) prime_results = solver(tasks_prime, servers) if prime_results is None: print(f'Failed for task: {task.name}') return None else: task_prices[task] = optimal_social_welfare - sum( task.value for task in tasks_prime if task.running_server) debug( f'{task.name} Task: £{task_prices[task]:.1f}, Value: {task.value} ', debug_running) # Reset the model and allocates all of the their info from the original optimal solution reset_model(tasks, servers) for task, (s, w, r, server) in task_allocation.items(): server_task_allocation(server, task, s, w, r, price=task_prices[task]) return optimal_results