Exemple #1
0
def test_task_batching(model_dist=SyntheticModelDist(num_servers=8),
                       time_steps: int = 10,
                       mean_arrival_rate: int = 4,
                       std_arrival_rate: float = 2):
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)

    batched_tasks = generate_batch_tasks(tasks, 3, time_steps)
    for pos, batch_task in enumerate(batched_tasks):
        print(
            f'Time step: {3 * pos} - [{", ".join([str(task.auction_time) for task in batch_task])}]'
        )

    def flatten(tss):
        """
        Flatten all of the task from time based (2 dimensional) to 1 dimensional

        :param tss: Time series of batched tasks
        :return: List  of tasks
        """
        return [t for ts in tss for t in ts]

    batch1_tasks = flatten(generate_batch_tasks(tasks, 1, time_steps))
    batch2_tasks = flatten(generate_batch_tasks(tasks, 2, time_steps))
    batch3_tasks = flatten(generate_batch_tasks(tasks, 3, time_steps))

    for task_1, task_2, task_3 in zip(batch1_tasks, batch2_tasks,
                                      batch3_tasks):
        print(
            f'Task: {task_1.name}, deadlines: [{task_1.deadline}, {task_2.deadline}, {task_3.deadline}]'
        )
Exemple #2
0
def test_online_server_capacities(model_dist=SyntheticModelDist(num_servers=8),
                                  time_steps: int = 50,
                                  batch_length: int = 3,
                                  mean_arrival_rate: int = 4,
                                  std_arrival_rate: float = 2,
                                  capacities: float = 0.3):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    for server in servers:
        server.storage_capacity = int(server.storage_capacity * capacities)
        server.computation_capacity = int(server.computation_capacity *
                                          capacities)
        server.bandwidth_capacity = int(server.bandwidth_capacity * capacities)
    batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
    print(
        f'Tasks per batch time step: [{", ".join([str(len(batch_tasks)) for batch_tasks in batched_tasks])}]'
    )
    result = online_batch_solver(
        batched_tasks,
        servers,
        batch_length,
        'Greedy',
        greedy_algorithm,
        task_priority=UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority()),
        server_selection_policy=SumResources(),
        resource_allocation_policy=SumPowPercentage())
    print(f'Social welfare percentage: {result.percentage_social_welfare}')
    print(result.data)
Exemple #3
0
def test_online_model_generation(model_dist=SyntheticModelDist(num_servers=8),
                                 time_steps: int = 250,
                                 batch_lengths: Iterable[int] = (1, 2, 4, 5),
                                 mean_arrival_rate: int = 4,
                                 std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    print(
        f'Number of tasks per time step: '
        f'{[len([task for task in tasks if task.auction_time == time_step]) for time_step in range(time_steps)]}'
    )

    for batch_length in batch_lengths:
        valid_tasks = [task for task in tasks if batch_length < task.deadline]
        batched_tasks = generate_batch_tasks(valid_tasks, batch_length,
                                             time_steps)
        print(
            f'Number of time steps: {time_steps}, batch length: {batch_length}, '
            f'number of batches: {len(batched_tasks)}')

        assert len(batched_tasks) == ceil(time_steps / batch_length)
        assert sum(len(batch_tasks)
                   for batch_tasks in batched_tasks) == len(valid_tasks)
        assert all(0 < task.value for _tasks in batched_tasks
                   for task in _tasks)
        assert all(0 < task.deadline for _tasks in batched_tasks for task in _tasks), \
            [str(task) for _tasks in batched_tasks for task in _tasks if task.deadline < 0]
        assert all(
            batch_num * batch_length <= task.auction_time < (batch_num + 1) *
            batch_length for batch_num, _tasks in enumerate(batched_tasks)
            for task in _tasks)
Exemple #4
0
def test_optimal_solutions(model_dist=SyntheticModelDist(num_servers=8),
                           time_steps: int = 20,
                           mean_arrival_rate: int = 4,
                           std_arrival_rate: float = 2):
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    non_elastic_tasks = [
        NonElasticTask(task, SumSpeedPowResourcePriority()) for task in tasks
    ]

    # batched_tasks = generate_batch_tasks(tasks, 1, time_steps)
    # optimal_result = online_batch_solver(batched_tasks, servers, 1, 'Online Elastic Optimal',
    #                                      minimal_allocated_resources_solver, solver_time_limit=2)
    # print(f'Optimal - Social welfare: {optimal_result.social_welfare}')
    # reset_model([], servers)

    non_elastic_batched_tasks = generate_batch_tasks(non_elastic_tasks, 1,
                                                     time_steps)
    non_elastic_optimal_result = online_batch_solver(
        non_elastic_batched_tasks,
        servers,
        1,
        'Non-elastic Optimal',
        non_elastic_optimal_solver,
        time_limit=2)
    print(
        f'\nNon-elastic Optimal - Social welfare: {non_elastic_optimal_result.social_welfare}'
    )
    reset_model([], servers)

    batched_tasks = generate_batch_tasks(tasks, 4, time_steps)
    greedy_result = online_batch_solver(
        batched_tasks,
        servers,
        4,
        'Greedy',
        greedy_algorithm,
        task_priority=UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority()),
        server_selection_policy=SumResources(),
        resource_allocation_policy=SumPowPercentage())
    print(f'Greedy - Social welfare: {greedy_result.social_welfare}')
Exemple #5
0
def test_minimise_resources():
    model_dist = SyntheticModelDist(num_servers=8)
    tasks, servers = model_dist.generate_online(20, 4, 2)

    def custom_solver(_tasks: List[ElasticTask],
                      _servers: List[Server],
                      solver_time_limit: int = 3,
                      minimise_time_limit: int = 2):
        """
        A custom solver for the elastic optimal solver which then checks that resource allocation is valid then
            minimises resource allocation

        :param _tasks: List of tasks for the time interval
        :param _servers: List of servers
        :param solver_time_limit: elastic resource allocation time limit
        :param minimise_time_limit: Minimise resource allocation time limit
        """
        valid_servers = [
            server for server in servers if 1 <= server.available_computation
            and 1 <= server.available_bandwidth
        ]
        server_availability = {
            server: (server.available_computation, server.available_bandwidth)
            for server in servers
        }
        elastic_optimal_solver(_tasks, valid_servers, solver_time_limit)

        for server, (compute_availability,
                     bandwidth_availability) in server_availability.items():
            server_old_tasks = [
                task for task in server.allocated_tasks if task not in _tasks
            ]
            max_bandwidth = server.bandwidth_capacity - sum(
                task.loading_speed + task.sending_speed
                for task in server_old_tasks)
            max_computation = server.computation_capacity - sum(
                task.compute_speed for task in server_old_tasks)
            assert compute_availability == max_computation, \
                f'Availability: {compute_availability}, actual: {max_computation}'
            assert bandwidth_availability == max_bandwidth, \
                f'Availability: {bandwidth_availability}, actual: {max_bandwidth}'

        minimal_allocated_resources_solver(_tasks, valid_servers,
                                           minimise_time_limit)

    batched_tasks = generate_batch_tasks(tasks, 1, 20)
    optimal_result = online_batch_solver(batched_tasks,
                                         servers,
                                         1,
                                         'Online Elastic Optimal',
                                         custom_solver,
                                         solver_time_limit=2)
    print(f'Optimal - Social welfare: {optimal_result.social_welfare}')
    reset_model([], servers)
Exemple #6
0
def test_online_non_elastic_task():
    model_dist = SyntheticModelDist(num_servers=8)
    tasks, servers = model_dist.generate_online(20, 4, 2)
    non_elastic_tasks = [
        NonElasticTask(task, SumSpeedPowResourcePriority()) for task in tasks
    ]
    batched_non_elastic_tasks = generate_batch_tasks(non_elastic_tasks, 5, 20)

    for batch_non_elastic_tasks in batched_non_elastic_tasks:
        for non_elastic_task in batch_non_elastic_tasks:
            time_taken = non_elastic_task.required_storage * non_elastic_task.compute_speed * non_elastic_task.sending_speed + \
                         non_elastic_task.loading_speed * non_elastic_task.required_computation * non_elastic_task.sending_speed + \
                         non_elastic_task.loading_speed * non_elastic_task.compute_speed * non_elastic_task.required_results_data
            assert time_taken <= non_elastic_task.deadline * non_elastic_task.loading_speed * \
                   non_elastic_task.compute_speed * non_elastic_task.sending_speed
Exemple #7
0
def test_batch_length(model_dist=SyntheticModelDist(num_servers=8),
                      batch_lengths=(1, 2, 3),
                      time_steps: int = 20,
                      mean_arrival_rate: int = 4,
                      std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    original_server_capacities = {
        server: (server.computation_capacity, server.bandwidth_capacity)
        for server in servers
    }

    # Batch greedy algorithm
    for batch_length in batch_lengths:
        batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        # Update the server capacities
        for server in servers:
            server.computation_capacity = original_server_capacities[server][
                0] * batch_length
            server.bandwidth_capacity = original_server_capacities[server][
                1] * batch_length

        greedy_result = online_batch_solver(
            batched_tasks,
            servers,
            batch_length,
            '',
            greedy_algorithm,
            task_priority=UtilityDeadlinePerResourcePriority(
                SqrtResourcesPriority()),
            server_selection_policy=SumResources(),
            resource_allocation_policy=SumPowPercentage())
        print(
            f'Batch length: {batch_length} - social welfare: {greedy_result.social_welfare}, '
            f'percentage run: {greedy_result.percentage_tasks_allocated}')
        tasks_allocated = [
            task.name for task in flattened_tasks
            if task.running_server is not None
        ]
        print(
            f'Tasks allocated ({len(tasks_allocated)}): [{", ".join(tasks_allocated)}]'
        )
        reset_model(flattened_tasks, servers)
Exemple #8
0
def test_batch_lengths(model_dist=SyntheticModelDist(num_servers=8),
                       batch_lengths: Iterable[int] = (1, 5, 10, 15),
                       time_steps: int = 100,
                       mean_arrival_rate: int = 4,
                       std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    original_server_capacities = {
        server: (server.computation_capacity, server.bandwidth_capacity)
        for server in servers
    }
    results = []
    # Batch greedy algorithm
    for batch_length in batch_lengths:
        batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        # Update the server capacities
        for server in servers:
            server.computation_capacity = original_server_capacities[server][
                0] * batch_length
            server.bandwidth_capacity = original_server_capacities[server][
                1] * batch_length

        task_priority = UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority())
        server_selection_policy = SumResources()
        resource_allocation_policy = SumPowPercentage()
        name = f'Greedy {task_priority.name}, {server_selection_policy.name}, ' \
               f'{resource_allocation_policy.name}'
        greedy_result = online_batch_solver(
            batched_tasks,
            servers,
            batch_length,
            name,
            greedy_algorithm,
            task_priority=task_priority,
            server_selection_policy=server_selection_policy,
            resource_allocation_policy=resource_allocation_policy)
        results.append(greedy_result)
        print(
            f'Batch length: {batch_length}, social welfare percent: {greedy_result.percentage_social_welfare}, '
            f'social welfare: {greedy_result.social_welfare}')
        reset_model(flattened_tasks, servers)
Exemple #9
0
def online_evaluation(model_dist: ModelDist,
                      repeats: int = 20,
                      time_steps: int = 200,
                      mean_arrival_rate: float = 1,
                      std_arrival_rate: float = 2,
                      task_priority=UtilityDeadlinePerResourcePriority(
                          ResourceSumPriority()),
                      server_selection=ProductResources(),
                      resource_allocation=SumPowPercentage()):
    """
    Evaluates the batch online

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_steps: Total number of time steps
    :param mean_arrival_rate: Mean arrival rate of tasks
    :param std_arrival_rate: Standard deviation arrival rate of tasks
    :param task_priority: The task prioritisation function
    :param server_selection: Server selection policy
    :param resource_allocation: Resource allocation policy
    """
    print(
        f'Evaluates difference in performance between batch and online algorithm for {model_dist.name} model with '
        f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    print(
        f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate} with std: {std_arrival_rate}'
    )
    model_results = []

    filename = results_filename('online_resource_allocation', model_dist)
    greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, {resource_allocation.name}'
    batch_length = 1

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers = model_dist.generate_online(time_steps,
                                                    mean_arrival_rate,
                                                    std_arrival_rate)
        algorithm_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        non_elastic_tasks = generate_non_elastic_tasks(tasks)

        valid_elastic_tasks = [
            task for task in tasks if batch_length < task.deadline
        ]
        batched_elastic_tasks = generate_batch_tasks(valid_elastic_tasks,
                                                     batch_length, time_steps)

        valid_non_elastic_tasks = [
            task for task in non_elastic_tasks if batch_length < task.deadline
        ]
        batched_non_elastic_tasks = generate_batch_tasks(
            valid_non_elastic_tasks, batch_length, time_steps)

        # Flatten the tasks
        flattened_elastic_tasks = [
            task for tasks in batched_elastic_tasks for task in tasks
        ]
        flattened_non_elastic_tasks = [
            task for tasks in batched_non_elastic_tasks for task in tasks
        ]

        elastic_optimal_result = online_batch_solver(batched_elastic_tasks,
                                                     servers,
                                                     batch_length,
                                                     'Elastic Optimal',
                                                     elastic_optimal_solver,
                                                     time_limit=None)
        algorithm_results[
            elastic_optimal_result.algorithm] = elastic_optimal_result.store()
        reset_model(flattened_elastic_tasks, servers)

        non_elastic_optimal_result = online_batch_solver(
            batched_non_elastic_tasks,
            servers,
            batch_length,
            'Non-elastic Optimal',
            non_elastic_optimal_solver,
            time_limit=None)
        algorithm_results[non_elastic_optimal_result.
                          algorithm] = non_elastic_optimal_result.store()
        reset_model(flattened_non_elastic_tasks, servers)

        # Loop over all of the greedy policies permutations
        greedy_result = online_batch_solver(
            batched_elastic_tasks,
            servers,
            batch_length,
            greedy_name,
            greedy_algorithm,
            task_priority=task_priority,
            server_selection=server_selection,
            resource_allocation=resource_allocation)
        algorithm_results[greedy_result.algorithm] = greedy_result.store()
        reset_model(flattened_elastic_tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
Exemple #10
0
def greedy_permutations(model_dist: ModelDist,
                        repeats: int = 20,
                        time_steps: int = 1000,
                        mean_arrival_rate: float = 2,
                        std_arrival_rate: float = 2,
                        batch_length: int = 1):
    """
    Evaluates the performance between greedy algorithms with different module functions

    :param model_dist: The model distribution used to test with
    :param repeats: The number of testing repeats that are computed
    :param time_steps: The total number of time steps for tasks to arrive at
    :param mean_arrival_rate: The mean arrival rate of tasks
    :param std_arrival_rate: The standard deviation of the arrival rate for the task
    :param batch_length: The batch length of the testing setting
    """
    print(f'Evaluates performance between different greedy permutations')
    print(
        f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate}, std: {std_arrival_rate}'
    )
    model_results = []

    filename = results_filename('online_greedy_permutations', model_dist)
    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers = model_dist.generate_online(time_steps,
                                                    mean_arrival_rate,
                                                    std_arrival_rate)
        algorithm_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }

        valid_tasks = [task for task in tasks if batch_length < task.deadline]
        batched_tasks = generate_batch_tasks(valid_tasks, batch_length,
                                             time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        for task_priority, server_selection, resource_allocation in [
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             ProductResources(True), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceProductPriority()),
             ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceProductPriority()),
             ProductResources(True), SumPowPercentage()),
            (ValuePriority(), ProductResources(), SumPowPercentage()),
            (ValuePriority(), ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             SumResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             SumResources(True), SumPowPercentage())
        ]:
            greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, ' \
                          f'{resource_allocation.name}'
            greedy_result = online_batch_solver(
                batched_tasks,
                servers,
                batch_length,
                greedy_name,
                greedy_algorithm,
                task_priority=task_priority,
                server_selection=server_selection,
                resource_allocation=resource_allocation)
            algorithm_results[greedy_result.algorithm] = greedy_result.store()
            print(greedy_name)
            reset_model(flattened_tasks, servers)

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')