コード例 #1
0
def test_minimise_resources():
    model_dist = SyntheticModelDist(num_servers=8)
    tasks, servers = model_dist.generate_online(20, 4, 2)

    def custom_solver(_tasks: List[ElasticTask],
                      _servers: List[Server],
                      solver_time_limit: int = 3,
                      minimise_time_limit: int = 2):
        """
        A custom solver for the elastic optimal solver which then checks that resource allocation is valid then
            minimises resource allocation

        :param _tasks: List of tasks for the time interval
        :param _servers: List of servers
        :param solver_time_limit: elastic resource allocation time limit
        :param minimise_time_limit: Minimise resource allocation time limit
        """
        valid_servers = [
            server for server in servers if 1 <= server.available_computation
            and 1 <= server.available_bandwidth
        ]
        server_availability = {
            server: (server.available_computation, server.available_bandwidth)
            for server in servers
        }
        elastic_optimal_solver(_tasks, valid_servers, solver_time_limit)

        for server, (compute_availability,
                     bandwidth_availability) in server_availability.items():
            server_old_tasks = [
                task for task in server.allocated_tasks if task not in _tasks
            ]
            max_bandwidth = server.bandwidth_capacity - sum(
                task.loading_speed + task.sending_speed
                for task in server_old_tasks)
            max_computation = server.computation_capacity - sum(
                task.compute_speed for task in server_old_tasks)
            assert compute_availability == max_computation, \
                f'Availability: {compute_availability}, actual: {max_computation}'
            assert bandwidth_availability == max_bandwidth, \
                f'Availability: {bandwidth_availability}, actual: {max_bandwidth}'

        minimal_allocated_resources_solver(_tasks, valid_servers,
                                           minimise_time_limit)

    batched_tasks = generate_batch_tasks(tasks, 1, 20)
    optimal_result = online_batch_solver(batched_tasks,
                                         servers,
                                         1,
                                         'Online Elastic Optimal',
                                         custom_solver,
                                         solver_time_limit=2)
    print(f'Optimal - Social welfare: {optimal_result.social_welfare}')
    reset_model([], servers)
コード例 #2
0
def test_branch_bound():
    model = SyntheticModelDist(4, 2)
    tasks, servers = model.generate_oneshot()

    branch_bound_result = branch_bound_algorithm(tasks,
                                                 servers,
                                                 debug_update_lower_bound=True)
    branch_bound_result.pretty_print()

    reset_model(tasks, servers)

    optimal_result = elastic_optimal(tasks, servers, time_limit=200)
    optimal_result.pretty_print()
コード例 #3
0
def test_online_non_elastic_task():
    model_dist = SyntheticModelDist(num_servers=8)
    tasks, servers = model_dist.generate_online(20, 4, 2)
    non_elastic_tasks = [
        NonElasticTask(task, SumSpeedPowResourcePriority()) for task in tasks
    ]
    batched_non_elastic_tasks = generate_batch_tasks(non_elastic_tasks, 5, 20)

    for batch_non_elastic_tasks in batched_non_elastic_tasks:
        for non_elastic_task in batch_non_elastic_tasks:
            time_taken = non_elastic_task.required_storage * non_elastic_task.compute_speed * non_elastic_task.sending_speed + \
                         non_elastic_task.loading_speed * non_elastic_task.required_computation * non_elastic_task.sending_speed + \
                         non_elastic_task.loading_speed * non_elastic_task.compute_speed * non_elastic_task.required_results_data
            assert time_taken <= non_elastic_task.deadline * non_elastic_task.loading_speed * \
                   non_elastic_task.compute_speed * non_elastic_task.sending_speed
コード例 #4
0
def test_online_model_generation(model_dist=SyntheticModelDist(num_servers=8),
                                 time_steps: int = 250,
                                 batch_lengths: Iterable[int] = (1, 2, 4, 5),
                                 mean_arrival_rate: int = 4,
                                 std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    print(
        f'Number of tasks per time step: '
        f'{[len([task for task in tasks if task.auction_time == time_step]) for time_step in range(time_steps)]}'
    )

    for batch_length in batch_lengths:
        valid_tasks = [task for task in tasks if batch_length < task.deadline]
        batched_tasks = generate_batch_tasks(valid_tasks, batch_length,
                                             time_steps)
        print(
            f'Number of time steps: {time_steps}, batch length: {batch_length}, '
            f'number of batches: {len(batched_tasks)}')

        assert len(batched_tasks) == ceil(time_steps / batch_length)
        assert sum(len(batch_tasks)
                   for batch_tasks in batched_tasks) == len(valid_tasks)
        assert all(0 < task.value for _tasks in batched_tasks
                   for task in _tasks)
        assert all(0 < task.deadline for _tasks in batched_tasks for task in _tasks), \
            [str(task) for _tasks in batched_tasks for task in _tasks if task.deadline < 0]
        assert all(
            batch_num * batch_length <= task.auction_time < (batch_num + 1) *
            batch_length for batch_num, _tasks in enumerate(batched_tasks)
            for task in _tasks)
コード例 #5
0
def test_optimal_vs_greedy_dia(repeats: int = 5):
    print()
    model = SyntheticModelDist(7, 1)

    print(f' Optimal    | Greedy')
    print(f'Time  | SW  | Time   | SW')
    for repeat in range(repeats):
        tasks, servers = model.generate_oneshot()
        set_server_heuristics(servers, price_change=5)

        optimal_result = optimal_decentralised_iterative_auction(tasks,
                                                                 servers,
                                                                 time_limit=1)

        reset_model(tasks, servers)
        greedy_result = greedy_decentralised_iterative_auction(
            tasks, servers, PriceResourcePerDeadline(), SumPercentage())

        print(
            f'{optimal_result.solve_time} | {optimal_result.social_welfare} | '
            f'{greedy_result.solve_time} | {greedy_result.social_welfare}')
コード例 #6
0
def test_greedy_policies():
    print()
    model = SyntheticModelDist(20, 3)
    tasks, servers = model.generate_oneshot()

    policy_results = {}

    print('Policies')
    for value_density in task_priority_functions:
        for server_selection_policy in server_selection_functions:
            for resource_allocation_policy in resource_allocation_functions:
                reset_model(tasks, servers)

                result = greedy_algorithm(tasks, servers, value_density,
                                          server_selection_policy,
                                          resource_allocation_policy)
                print(
                    f'\t{result.algorithm} - {result.data["solve time"]} secs')
                if result.algorithm in policy_results:
                    policy_results[result.algorithm].append(result)
                else:
                    policy_results[result.algorithm] = [result]

    print('\n\nSorted policies by social welfare')
    for algorithm, results in policy_results.items():
        policy_results[algorithm] = (policy_results[algorithm],
                                     float(
                                         np.mean([
                                             r.social_welfare for r in results
                                         ])),
                                     float(
                                         np.mean(
                                             [r.solve_time for r in results])))
    print(f'Algorithm | Avg SW | Avg Time | Social Welfare')
    for algorithm, (results, avg_sw,
                    avg_time) in sorted(policy_results.items(),
                                        key=lambda r: r[1][1]):
        print(
            f'{algorithm} | {avg_sw} | {avg_time} | [{" ".join([str(result.social_welfare) for result in results])}]'
        )
コード例 #7
0
def test_batch_lengths(model_dist=SyntheticModelDist(num_servers=8),
                       batch_lengths: Iterable[int] = (1, 5, 10, 15),
                       time_steps: int = 100,
                       mean_arrival_rate: int = 4,
                       std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    original_server_capacities = {
        server: (server.computation_capacity, server.bandwidth_capacity)
        for server in servers
    }
    results = []
    # Batch greedy algorithm
    for batch_length in batch_lengths:
        batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        # Update the server capacities
        for server in servers:
            server.computation_capacity = original_server_capacities[server][
                0] * batch_length
            server.bandwidth_capacity = original_server_capacities[server][
                1] * batch_length

        task_priority = UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority())
        server_selection_policy = SumResources()
        resource_allocation_policy = SumPowPercentage()
        name = f'Greedy {task_priority.name}, {server_selection_policy.name}, ' \
               f'{resource_allocation_policy.name}'
        greedy_result = online_batch_solver(
            batched_tasks,
            servers,
            batch_length,
            name,
            greedy_algorithm,
            task_priority=task_priority,
            server_selection_policy=server_selection_policy,
            resource_allocation_policy=resource_allocation_policy)
        results.append(greedy_result)
        print(
            f'Batch length: {batch_length}, social welfare percent: {greedy_result.percentage_social_welfare}, '
            f'social welfare: {greedy_result.social_welfare}')
        reset_model(flattened_tasks, servers)
コード例 #8
0
def test_greedy_task_price():
    print()
    model = SyntheticModelDist(20, 3)
    tasks, servers = model.generate_oneshot()

    server = servers[0]

    resource_allocation_policy = SumPercentage()
    for _ in range(10):
        task = tasks.pop(rnd.randint(0, len(tasks) - 1))
        if server.can_run(task):
            s, w, r = resource_allocation_policy.allocate(task, server)
            server_task_allocation(server,
                                   task,
                                   s,
                                   w,
                                   r,
                                   price=rnd.randint(1, 10))

    copy_tasks = [copy(task) for task in server.allocated_tasks]
    copy_server = copy(server)
    print(
        f'Server revenue: {server.revenue} - '
        f'Task prices : {" ".join([str(task.price) for task in server.allocated_tasks])}'
    )

    new_task = tasks.pop(0)
    task_price, speeds = greedy_task_price(new_task,
                                           server,
                                           PriceResourcePerDeadline(),
                                           resource_allocation_policy,
                                           debug_revenue=True)
    print(f'Task Price: {task_price}')

    assert len(copy_tasks) == len(server.allocated_tasks)
    assert [
        task.loading_speed == copy_task.loading_speed
        and task.compute_speed == copy_task.compute_speed
        and task.sending_speed == copy_task.sending_speed
        and task.price == copy_task.price and task.name == copy_task.name
        and task.value == copy_task.value
        for copy_task, task in zip(copy_tasks, server.allocated_tasks)
    ]
    assert server.revenue == copy_server.revenue and server.available_storage == copy_server.available_storage and \
           server.available_computation == copy_server.available_computation and \
           server.available_bandwidth == copy_server.available_bandwidth

    unallocated_tasks = []
    allocate_task(new_task, task_price, server, unallocated_tasks, speeds)

    assert copy_server.revenue + 1 == server.revenue
    assert new_task.price == task_price
    assert all(task.loading_speed == 0 and task.compute_speed == 0
               and task.sending_speed == 0 and task.price == 0
               for task in unallocated_tasks)
    for task in server.allocated_tasks:
        copy_task = next(
            (copy_task
             for copy_task in copy_tasks if copy_task.name == task.name), None)
        if copy_task:
            assert task.loading_speed == copy_task.loading_speed and task.compute_speed == copy_task.compute_speed and \
                   task.sending_speed == copy_task.sending_speed and task.price == copy_task.price and \
                   task.value == copy_task.value and task.name == copy_task.name
        else:
            assert task.loading_speed == new_task.loading_speed and task.compute_speed == new_task.compute_speed and \
                   task.sending_speed == new_task.sending_speed and task.price == new_task.price and \
                   task.value == new_task.value and task.name == new_task.name
コード例 #9
0
def test_cp_optimality():
    model = SyntheticModelDist(20, 3)
    tasks, servers = model.generate_oneshot()

    results = elastic_optimal(tasks, servers, time_limit=10)
    print(results.store())