def foreknowledge_non_elastic_optimal(
        tasks: List[NonElasticTask],
        servers: List[Server],
        time_limit: Optional[int] = 15) -> Optional[Result]:
    """
    Runs the foreknowledge Non-elastic optimal cplex algorithm solver with a time limit

    :param tasks: List of Non-elastic tasks
    :param servers: List of servers
    :param time_limit: Cplex time limit
    :return: Optional results
    """
    model_solution = non_elastic_optimal_solver(tasks,
                                                servers,
                                                time_limit=time_limit)
    if model_solution:
        return Result(
            'Foreknowledge Non-elastic Optimal', tasks, servers,
            round(model_solution.get_solve_time(), 2), **{
                'solve status': model_solution.get_solve_status(),
                'cplex objective': model_solution.get_objective_values()[0]
            })
    else:
        print(f'Foreknowledge Non-elastic optimal error', file=sys.stderr)
        return Result('Foreknowledge Non-elastic Optimal',
                      tasks,
                      servers,
                      0,
                      limited=True)
def server_relaxed_elastic_optimal(
        tasks: List[ElasticTask],
        servers: List[Server],
        time_limit: Optional[int] = 15) -> Optional[Result]:
    """
    Runs the relaxed task allocation solver

    :param tasks: List of tasks
    :param servers: List of servers
    :param time_limit: The time limit for the solver
    :return: Optional relaxed results
    """
    super_server = SuperServer(servers)
    model_solution = elastic_optimal_solver(tasks, [super_server], time_limit)
    if model_solution:
        return Result(
            'Server Relaxed Elastic Optimal', tasks, [super_server],
            round(model_solution.get_solve_time(), 2), **{
                'solve status': model_solution.get_solve_status(),
                'cplex objective': model_solution.get_objective_values()[0]
            })
    else:
        print(f'Server Relaxed Elastic Optimal error', file=sys.stderr)
        return Result('Server Relaxed Elastic Optimal',
                      tasks,
                      servers,
                      0,
                      limited=True)
示例#3
0
def elastic_vcg_auction(tasks: List[ElasticTask],
                        servers: List[Server],
                        time_limit: Optional[int] = 5,
                        debug_results: bool = False) -> Optional[Result]:
    """
    VCG auction algorithm

    :param tasks: List of tasks
    :param servers: List of servers
    :param time_limit: The time limit of the optimal solver
    :param debug_results: If to debug results
    :return: The results of the VCG auction
    """
    optimal_solver_fn = functools.partial(elastic_optimal_solver,
                                          time_limit=time_limit)

    global_model_solution = vcg_solver(tasks, servers, optimal_solver_fn,
                                       debug_results)
    if global_model_solution:
        return Result('Elastic VCG Auction',
                      tasks,
                      servers,
                      round(global_model_solution.get_solve_time(), 2),
                      is_auction=True,
                      **{
                          'solve status':
                          global_model_solution.get_solve_status(),
                          'cplex objective':
                          global_model_solution.get_objective_values()[0]
                      })
    else:
        print(f'Elastic VCG Auction error', file=sys.stderr)
        return Result('Elastic VCG Auction', tasks, servers, 0, limited=True)
def minimal_resources_elastic_optimal_solver(tasks: List[ElasticTask],
                                             servers: List[Server],
                                             solver_time_limit: int = 3,
                                             minimise_time_limit: int = 2):
    """
    Minimise the resources used by elastic optimal solver

    :param tasks: List of tasks
    :param servers: List of servers
    :param solver_time_limit: Solver time limit
    :param minimise_time_limit: Minimise solver time limit
    """
    valid_servers = [
        server for server in servers if 1 <= server.available_computation
        and 1 <= server.available_bandwidth
    ]
    model_solution = elastic_optimal_solver(tasks, valid_servers,
                                            solver_time_limit)
    if model_solution:
        # Find the minimum resource that could be allocation
        minimal_allocated_resources_solver(tasks, valid_servers,
                                           minimise_time_limit)
        return Result(
            'Elastic Optimal', tasks, servers,
            round(model_solution.get_solve_time(), 2), **{
                'solve status': model_solution.get_solve_status(),
                'cplex objective': model_solution.get_objective_values()[0]
            })
    else:
        print(f'Elastic Optimal error', file=sys.stderr)
        return Result('Elastic Optimal', tasks, servers, 0, limited=True)
def elastic_optimal(tasks: List[ElasticTask],
                    servers: List[Server],
                    time_limit: Optional[int] = 15) -> Optional[Result]:
    """
    Runs the optimal task allocation algorithm solver for the time limit given the list of tasks and servers

    :param tasks: List of tasks
    :param servers: List of servers
    :param time_limit: The time limit for the cplex solver
    :return: Optimal results find setting is valid
    """
    model_solution = elastic_optimal_solver(tasks, servers, time_limit)
    if model_solution:
        return Result(
            'Elastic Optimal', tasks, servers,
            round(model_solution.get_solve_time(), 2), **{
                'solve status': model_solution.get_solve_status(),
                'cplex objective': model_solution.get_objective_values()[0]
            })
    else:
        print(f'Elastic Optimal error', file=sys.stderr)
        return Result('Elastic Optimal', tasks, servers, 0, limited=True)
def greedy_algorithm(tasks: List[ElasticTask],
                     servers: List[Server],
                     task_priority: TaskPriority,
                     server_selection: ServerSelection,
                     resource_allocation: ResourceAllocation,
                     debug_task_values: bool = False,
                     debug_task_allocation: bool = False) -> Result:
    """
    A greedy algorithm to allocate tasks to servers aiming to maximise the total utility,
        the models is stored with the servers and tasks so no return is required

    :param tasks: List of tasks
    :param servers: List of servers
    :param task_priority: The task priority function
    :param server_selection: The selection policy function
    :param resource_allocation: The bid policy function
    :param debug_task_values: The task values debug
    :param debug_task_allocation: The task allocation debug
    """
    start_time = time()

    # Sorted list of task and task priority
    task_values = sorted((task for task in tasks),
                         key=lambda task: task_priority.evaluate(task),
                         reverse=True)
    if debug_task_values:
        print_task_values(
            sorted(((task, task_priority.evaluate(task)) for task in tasks),
                   key=lambda jv: jv[1],
                   reverse=True))

    # Run the allocation of the task with the sorted task by value
    allocate_tasks(task_values,
                   servers,
                   server_selection,
                   resource_allocation,
                   debug_allocation=debug_task_allocation)

    # The algorithm name
    algorithm_name = f'Greedy {task_priority.name}, {server_selection.name}, {resource_allocation.name}'
    return Result(
        algorithm_name, tasks, servers,
        time() - start_time, **{
            'task priority': task_priority.name,
            'server selection': server_selection.name,
            'resource allocation': resource_allocation.name
        })
示例#7
0
def optimal_decentralised_iterative_auction(tasks: List[ElasticTask], servers: List[Server], time_limit: int = 5,
                                            debug_allocation: bool = False) -> Result:
    """
    Runs the optimal decentralised iterative auction

    :param tasks: List of tasks
    :param servers: list of servers
    :param time_limit: The time limit for the dia solver
    :param debug_allocation: If to debug allocation
    :return: The results of the auction
    """
    solver = functools.partial(optimal_task_price, time_limit=time_limit)
    rounds, task_rounds, solve_time = decentralised_iterative_solver(tasks, servers, solver, debug_allocation)

    return Result('Optimal DIA', tasks, servers, solve_time, is_auction=True,
                  **{'server price change': {server.name: server.price_change for server in servers},
                     'server initial price': {server.name: server.initial_price for server in servers},
                     'rounds': rounds, 'task rounds': {task.name: rounds for task, rounds in task_rounds.items()}})
示例#8
0
def greedy_decentralised_iterative_auction(tasks: List[ElasticTask], servers: List[Server], price_density: PriceDensity,
                                           resource_allocation: ResourceAllocation,
                                           debug_allocation: bool = False) -> Result:
    """
    Runs the greedy decentralised iterative auction

    :param tasks: List of tasks
    :param servers: List of servers
    :param price_density: Price density policy
    :param resource_allocation: Resource allocation policy
    :param debug_allocation: If to debug allocation
    :return: The results of the auction
    """
    solver = functools.partial(greedy_task_price, price_density=price_density,
                               resource_allocation_policy=resource_allocation)
    rounds, task_rounds, solve_time = decentralised_iterative_solver(tasks, servers, solver, debug_allocation)

    return Result('Greedy DIA', tasks, servers, solve_time, is_auction=True,
                  **{'server price change': {server.name: server.price_change for server in servers},
                     'server initial price': {server.name: server.initial_price for server in servers},
                     'price density': price_density.name, 'resource allocation': resource_allocation.name,
                     'rounds': rounds, 'task rounds': {task.name: rounds for task, rounds in task_rounds.items()}})
示例#9
0
def branch_bound_algorithm(tasks: List[ElasticTask],
                           servers: List[Server],
                           feasibility=elastic_feasible_allocation,
                           debug_new_candidate: bool = False,
                           debug_checking_allocation: bool = False,
                           debug_update_lower_bound: bool = False,
                           debug_feasibility: bool = False) -> Result:
    """
    Branch and bound based algorithm

    :param tasks: A list of tasks
    :param servers: A list of servers
    :param feasibility: Feasibility function
    :param debug_new_candidate:
    :param debug_checking_allocation:
    :param debug_update_lower_bound:
    :param debug_feasibility:
    :return: The results from the search
    """
    start_time = time()

    # The best values for the lower bound, allocation and speeds
    best_lower_bound: float = 0
    best_allocation: Optional[Dict[Server, List[ElasticTask]]] = None
    best_speeds: Optional[Dict[ElasticTask, Tuple[int, int, int]]] = None

    # Generates the initial candidates
    def compare(candidate_1, candidate_2):
        """
        Compare two candidates

        :param candidate_1: Candidate 1
        :param candidate_2: Candidate 2
        :return: The comparison between the two
        """
        return Comparison.compare(candidate_1[0], candidate_2[0])

    def evaluate(candidate):
        """
        Evaluate the candidate

        :param candidate: The candidate
        :return: String for the candidate
        """
        return str(candidate[0])

    candidates = PriorityQueue(compare, evaluate)
    candidates.push_all(
        generate_candidates({server: []
                             for server in servers},
                            tasks,
                            servers,
                            0,
                            0,
                            sum(task.value for task in tasks),
                            debug_new_candidates=debug_new_candidate))

    # While candidates exist
    while candidates.size > 0:
        actual_lower_bound = max(candidate[0]
                                 for candidate in candidates.queue)
        lower_bound, upper_bound, allocation, pos = candidates.pop()
        assert actual_lower_bound == lower_bound

        if best_lower_bound < upper_bound:
            if debug_checking_allocation:
                print(
                    f'Checking - Lower bound: {lower_bound}, Upper bound: {upper_bound}, pos: {pos}'
                )
                # print_allocation(allocation)

            # Check if the allocation is feasible
            task_speeds = feasibility(allocation)
            if debug_feasibility:
                print(f'Allocation feasibility: {task_speeds is not None}')

            if task_speeds:
                # Update the lower bound if better
                if best_lower_bound < lower_bound:
                    if debug_update_lower_bound:
                        print(f'Update - New Lower bound: {lower_bound}')

                    best_allocation = allocation
                    best_speeds = task_speeds
                    best_lower_bound = lower_bound

                # Generate the new candidates as the allocation was successful
                if pos < len(tasks):
                    candidates.push_all(
                        generate_candidates(
                            allocation,
                            tasks,
                            servers,
                            pos,
                            lower_bound,
                            upper_bound,
                            debug_new_candidates=debug_new_candidate))

    # Search is finished so allocate the tasks
    for server, allocated_tasks in best_allocation.items():
        for allocated_task in allocated_tasks:
            allocated_task.allocate(best_speeds[allocated_task][0],
                                    best_speeds[allocated_task][1],
                                    best_speeds[allocated_task][2], server)
            server.allocate_task(allocated_task)

    return Result('Branch & Bound', tasks, servers, time() - start_time)
def critical_value_auction(tasks: List[ElasticTask],
                           servers: List[Server],
                           value_density: TaskPriority,
                           server_selection_policy: ServerSelection,
                           resource_allocation_policy: ResourceAllocation,
                           debug_initial_allocation: bool = False,
                           debug_critical_value: bool = False) -> Result:
    """
    Run the Critical value auction

    :param tasks: List of tasks
    :param servers: List of servers
    :param value_density: Value density function
    :param server_selection_policy: Server selection function
    :param resource_allocation_policy: Resource allocation function
    :param debug_initial_allocation: If to debug the initial allocation
    :param debug_critical_value: If to debug the critical value
    :return: The results from the auction
    """
    start_time = time()

    valued_tasks: Dict[ElasticTask, float] = {
        task: value_density.evaluate(task)
        for task in tasks
    }
    ranked_tasks: List[ElasticTask] = sorted(valued_tasks,
                                             key=lambda j: valued_tasks[j],
                                             reverse=True)

    # Runs the greedy algorithm
    allocate_tasks(ranked_tasks, servers, server_selection_policy,
                   resource_allocation_policy)
    allocation_data: Dict[ElasticTask, Tuple[int, int, int, Server]] = {
        task: (task.loading_speed, task.compute_speed, task.sending_speed,
               task.running_server)
        for task in ranked_tasks if task.running_server
    }

    if debug_initial_allocation:
        max_name_len = max(len(task.name) for task in tasks)
        print(f"{'Task':<{max_name_len}} | s | w | r | server")
        for task, (s, w, r, server) in allocation_data.items():
            print(f'{task:<{max_name_len}}|{s:3f}|{w:3f}|{r:3f}|{server.name}')

    reset_model(tasks, servers)

    # Loop through each task allocated and find the critical value for the task
    for critical_task in allocation_data.keys():
        # Remove the task from the ranked tasks and save the original position
        critical_pos = ranked_tasks.index(critical_task)
        ranked_tasks.remove(critical_task)

        # Loop though the tasks in order checking if the task can be allocated at any point
        for task_pos, task in enumerate(ranked_tasks):
            # If any of the servers can allocate the critical task then allocate the current task to a server
            if any(server.can_run(critical_task) for server in servers):
                server = server_selection_policy.select(task, servers)
                if server:  # There may not be a server that can allocate the task
                    s, w, r = resource_allocation_policy.allocate(task, server)
                    server_task_allocation(server, task, s, w, r)
            else:
                # If critical task isn't able to be allocated therefore the last task's density is found
                #   and the inverse of the value density is calculated with the last task's density.
                #   If the task can always run then the price is zero, the default price so no changes need to be made
                critical_task_density = valued_tasks[ranked_tasks[task_pos -
                                                                  1]]
                critical_task.price = round(
                    value_density.inverse(critical_task,
                                          critical_task_density), 3)
                break

        debug(
            f'{critical_task.name} Task critical value: {critical_task.price:.3f}',
            debug_critical_value)

        # Read the task back into the ranked task in its original position and reset the model but not forgetting the
        #   new critical task's price
        ranked_tasks.insert(critical_pos, critical_task)
        reset_model(tasks, servers, forget_prices=False)

    # Allocate the tasks and set the price to the critical value
    for task, (s, w, r, server) in allocation_data.items():
        server_task_allocation(server, task, s, w, r)

    algorithm_name = f'Critical Value Auction {value_density.name}, ' \
                     f'{server_selection_policy.name}, {resource_allocation_policy.name}'
    return Result(algorithm_name,
                  tasks,
                  servers,
                  time() - start_time,
                  is_auction=True,
                  **{
                      'value density': value_density.name,
                      'server selection': server_selection_policy.name,
                      'resource allocation': resource_allocation_policy.name
                  })
def online_batch_solver(batched_tasks: List[List[ElasticTask]],
                        servers: List[Server], batch_length: int,
                        solver_name: str, solver, **solver_args) -> Result:
    """
    Generic online batch solver

    :param batched_tasks: List of batch tasks
    :param servers: List of servers
    :param batch_length: Batch length
    :param solver_name: Solver name
    :param solver: Solver function
    :param solver_args: Solver function arguments
    :return: Online results
    """
    start_time = time()
    server_social_welfare = {server: 0 for server in servers}
    server_storage_usage = {server: [] for server in servers}
    server_computation_usage = {server: [] for server in servers}
    server_bandwidth_usage = {server: [] for server in servers}
    server_num_tasks_allocated = {server: [] for server in servers}

    for batch_num, batch_tasks in enumerate(batched_tasks):
        solver(batch_tasks, servers, **solver_args)

        for server in servers:
            # Save the current information for the server
            server_social_welfare[server] += sum(
                task.value for task in batch_tasks
                if task.running_server is server)
            server_storage_usage[server].append(
                resource_usage(server, 'storage'))
            server_computation_usage[server].append(
                resource_usage(server, 'computation'))
            server_bandwidth_usage[server].append(
                resource_usage(server, 'bandwidth'))
            server_num_tasks_allocated[server].append(
                len(server.allocated_tasks))

            # Get the current batch time step and the next batch time step
            current_time_step, next_time_step = batch_length * batch_num, batch_length * (
                batch_num + 1)

            # Update the server allocation task to only tasks within the next batch step time
            server.allocated_tasks = [
                task for task in server.allocated_tasks
                if next_time_step <= task.auction_time + task.deadline
            ]
            # Calculate how much of the batch, the task will be allocate for
            # batch_multiplier = {task: batch_length if next_time_step <= task.auction_time + task.deadline else
            #                  (task.auction_time + task.deadline - next_time_step) for task in server.allocated_tasks}
            # assert all(0 < multiplier <= batch_length for multiplier in batch_multiplier.values()), \
            #     list(batch_multiplier.values())

            # Update the server available resources
            server.available_storage = server.storage_capacity - \
                sum(task.required_storage for task in server.allocated_tasks)
            assert 0 <= server.available_storage <= server.storage_capacity, server.available_storage

            server.available_computation = server.computation_capacity - \
                ceil(sum(task.compute_speed for task in server.allocated_tasks))
            assert 0 <= server.available_computation <= server.computation_capacity, server.available_computation

            server.available_bandwidth = server.bandwidth_capacity - \
                ceil(sum((task.loading_speed + task.sending_speed) for task in server.allocated_tasks))
            assert 0 <= server.available_bandwidth <= server.bandwidth_capacity, server.available_bandwidth

    flatten_tasks = [task for tasks in batched_tasks for task in tasks]
    return Result(
        solver_name,
        flatten_tasks,
        servers,
        time() - start_time,
        limited=True,
        **{
            'server social welfare':
            {server.name: server_social_welfare[server]
             for server in servers},
            'server storage used':
            {server.name: server_storage_usage[server]
             for server in servers},
            'server computation used': {
                server.name: server_computation_usage[server]
                for server in servers
            },
            'server bandwidth used': {
                server.name: server_bandwidth_usage[server]
                for server in servers
            },
            'server num tasks allocated': {
                server.name: server_num_tasks_allocated[server]
                for server in servers
            }
        })