コード例 #1
0
def dia_social_welfare_test(model_dist: ModelDist,
                            repeat: int,
                            repeats: int = 20):
    """
    Evaluates the results using the optimality

    :param model_dist: The model distribution
    :param repeat: The repeat of the testing
    :param repeats: The number of repeats
    """
    data = []
    filename = results_filename('testing', model_dist)
    for _ in range(repeats):
        tasks, servers = model_dist.generate_oneshot()
        model_results = {}

        optimal_result = elastic_optimal(tasks, servers, 30)
        model_results[optimal_result.algorithm] = optimal_result.store()
        reset_model(tasks, servers)

        for pos in range(5):
            set_server_heuristics(servers, price_change=3, initial_price=25)
            dia_result = optimal_decentralised_iterative_auction(
                tasks, servers, 2)
            model_results[f'DIA {pos}'] = dia_result
            reset_model(tasks, servers)

        data.append(model_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(data, file)
コード例 #2
0
def server_sizing(repeats: int = 20):
    model_dist = AlibabaModelDist(20, 4)
    pretty_printer, server_scales = PrettyPrinter(), {}

    for mean_storage, mean_computation, mean_bandwidth in ((400, 50, 120), (400, 60, 150), (400, 70, 160)):
        model_dist.model['server distributions'] = [{
            "name": "custom",
            "probability": 1,
            "storage mean": mean_storage, "storage std": 30,
            "computation mean": mean_computation, "computation std": 8,
            "bandwidth mean": mean_bandwidth, "bandwidth std": 15
        }]
        model_results = []
        for _ in range(repeats):
            tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer)

            non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=60)
            algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store()
            reset_model(non_elastic_tasks, servers)

            greedy_permutations(tasks, servers, algorithm_results)

            model_results.append(algorithm_results)

        server_scales[f'{mean_storage}, {mean_computation}, {mean_bandwidth}'] = model_results

        with open('server_scaling_3.json', 'w') as file:
            json.dump(server_scales, file)
コード例 #3
0
def non_uniform_server_heuristics(model_dist: ModelDist,
                                  repeats: int = 20,
                                  time_limit: int = 2,
                                  random_repeats: int = 10,
                                  price_change_mean: int = 4,
                                  price_change_std: int = 2,
                                  initial_price_mean: int = 25,
                                  initial_price_std: int = 4):
    """
    Evaluates the effect of the server heuristics when they are non-uniform (all server's dont use the same value)

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the decentralised iterative auction
    :param random_repeats: The number of random repeats for each model generated
    :param price_change_mean: The mean price change value
    :param price_change_std: The standard deviation of the price change value
    :param initial_price_mean: The mean initial change value
    :param initial_price_std: The standard deviation of the initial change value
    """
    print(
        f'DIA non-uniform heuristic investigation with initial price mean: {initial_price_mean} and '
        f'std: {initial_price_std}, price change mean: {price_change_mean} and price change std: {price_change_std}, '
        f'using {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_non_uniform_heuristic', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        set_server_heuristics(servers,
                              price_change=price_change_mean,
                              initial_price=initial_price_mean)
        dia_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit)
        algorithm_results['normal'] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        for random_repeat in range(random_repeats):
            for server in servers:
                server.price_change = max(
                    1, int(gauss(price_change_mean, price_change_std)))
                server.initial_price = max(
                    1, int(gauss(initial_price_mean, initial_price_std)))

            dia_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit)
            algorithm_results[f'repeat {random_repeat}'] = dia_result.store()
            dia_result.pretty_print()
            reset_model(tasks, servers)
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #4
0
def server_resource_ratio(model_dist: ModelDist, repeats: int = 25, run_elastic: bool = True,
                          run_non_elastic: bool = True, non_elastic_time_limit: Optional[int] = None,
                          ratios: Iterable[int] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)):
    """
    Evaluates the difference in social welfare when the ratio of computational to bandwidth capacity is changed between
        different algorithms: greedy, elastic optimal, non-elastic optimal and server relaxed optimal

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param run_elastic: If to run the optimal elastic solver
    :param run_non_elastic: If to run the optimal non-elastic solver
    :param non_elastic_time_limit: The non-elastic optimal time limit
    :param ratios: List of ratios to test
    """
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('resource_ratio', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers, non_elastic_tasks, ratio_results = generate_evaluation_model(model_dist, pretty_printer)

        server_total_resources = {server: server.computation_capacity + server.bandwidth_capacity
                                  for server in servers}
        for ratio in ratios:
            algorithm_results = {}
            # Update server capacities
            for server in servers:
                server.update_capacities(int(server_total_resources[server] * ratio),
                                         int(server_total_resources[server] * (1 - ratio)))

            if run_elastic:
                # Finds the elastic optimal solution
                elastic_optimal_results = elastic_optimal(tasks, servers, time_limit=None)
                algorithm_results[elastic_optimal_results.algorithm] = elastic_optimal_results.store(ratio=ratio)
                pretty_printer.pprint(algorithm_results[elastic_optimal_results.algorithm])
                reset_model(tasks, servers)

            if run_non_elastic:
                # Find the non-elastic optimal solution
                non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=non_elastic_time_limit)
                algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store(ratio=ratio)
                non_elastic_results.pretty_print()
                reset_model(non_elastic_tasks, servers)

            # Loop over all of the greedy policies permutations
            greedy_permutations(tasks, servers, algorithm_results)

            ratio_results[f'ratio {ratio}'] = algorithm_results
        model_results.append(ratio_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #5
0
def test_minimise_resources():
    model_dist = SyntheticModelDist(num_servers=8)
    tasks, servers = model_dist.generate_online(20, 4, 2)

    def custom_solver(_tasks: List[ElasticTask],
                      _servers: List[Server],
                      solver_time_limit: int = 3,
                      minimise_time_limit: int = 2):
        """
        A custom solver for the elastic optimal solver which then checks that resource allocation is valid then
            minimises resource allocation

        :param _tasks: List of tasks for the time interval
        :param _servers: List of servers
        :param solver_time_limit: elastic resource allocation time limit
        :param minimise_time_limit: Minimise resource allocation time limit
        """
        valid_servers = [
            server for server in servers if 1 <= server.available_computation
            and 1 <= server.available_bandwidth
        ]
        server_availability = {
            server: (server.available_computation, server.available_bandwidth)
            for server in servers
        }
        elastic_optimal_solver(_tasks, valid_servers, solver_time_limit)

        for server, (compute_availability,
                     bandwidth_availability) in server_availability.items():
            server_old_tasks = [
                task for task in server.allocated_tasks if task not in _tasks
            ]
            max_bandwidth = server.bandwidth_capacity - sum(
                task.loading_speed + task.sending_speed
                for task in server_old_tasks)
            max_computation = server.computation_capacity - sum(
                task.compute_speed for task in server_old_tasks)
            assert compute_availability == max_computation, \
                f'Availability: {compute_availability}, actual: {max_computation}'
            assert bandwidth_availability == max_bandwidth, \
                f'Availability: {bandwidth_availability}, actual: {max_bandwidth}'

        minimal_allocated_resources_solver(_tasks, valid_servers,
                                           minimise_time_limit)

    batched_tasks = generate_batch_tasks(tasks, 1, 20)
    optimal_result = online_batch_solver(batched_tasks,
                                         servers,
                                         1,
                                         'Online Elastic Optimal',
                                         custom_solver,
                                         solver_time_limit=2)
    print(f'Optimal - Social welfare: {optimal_result.social_welfare}')
    reset_model([], servers)
コード例 #6
0
def greedy_permutations(tasks: List[ElasticTask],
                        servers: List[Server],
                        results: Dict[str, Result],
                        prefix: str = ''):
    for task_priority in task_priority_functions:
        for server_selection in server_selection_functions:
            for resource_allocation in resource_allocation_functions:
                result = greedy_algorithm(tasks, servers, task_priority,
                                          server_selection,
                                          resource_allocation)
                results[f'{prefix}{result.algorithm}'] = result.store()
                reset_model(tasks, servers)
コード例 #7
0
def test_model_tasks(num_servers: int = 8):
    greedy_results, non_elastic_results = [], []
    for num_tasks in range(24, 60, 4):
        model = SyntheticModelDist(num_tasks, num_servers)
        tasks, servers = model.generate_oneshot()
        non_elastic_tasks = [
            NonElasticTask(task, SumSpeedPowResourcePriority())
            for task in tasks
        ]

        greedy_results.append([
            num_tasks,
            greedy_algorithm(tasks, servers,
                             UtilityDeadlinePerResourcePriority(),
                             SumResources(), SumPercentage())
        ])
        reset_model(tasks, servers)
        non_elastic_results.append(
            [num_tasks,
             non_elastic_optimal(non_elastic_tasks, servers, 3)])

    def print_results(results):
        """
        Print the results of an algorithm

        :param results: List of results
        """
        print(
            f'Num of Tasks | Percent Tasks | Social Welfare % | Storage usage | Comp usage | Bandwidth usage'
        )
        for task_num, result in results:
            # noinspection PyTypeChecker
            print(
                f' {task_num:11} | {result.percentage_tasks_allocated:^13} | '
                f'{result.percentage_social_welfare:^22} | '
                f'{round(np.mean(list(result.server_storage_used.values())), 3):^13} | '
                f'{round(np.mean(list(result.server_computation_used.values())), 3):^10} | '
                f'{round(np.mean(list(result.server_bandwidth_used.values())), 3):10}'
            )

    print('\n\n\tGreedy algorithm')
    print_results(greedy_results)
    print('\n\tNon-elastic optimal results')
    print_results(non_elastic_results)

    print(f'\nNum of Tasks | Difference | Greedy SW | Non-elastic SW')
    for (num_tasks, greedy_result), (_, non_elastic_result) in zip(
            greedy_results, non_elastic_results):
        print(
            f' {num_tasks:11} | {non_elastic_result.social_welfare - greedy_result.social_welfare:10.3f} | '
            f'{greedy_result.social_welfare:9.3f} | {non_elastic_result.social_welfare:8.3f}'
        )
コード例 #8
0
def test_branch_bound():
    model = SyntheticModelDist(4, 2)
    tasks, servers = model.generate_oneshot()

    branch_bound_result = branch_bound_algorithm(tasks,
                                                 servers,
                                                 debug_update_lower_bound=True)
    branch_bound_result.pretty_print()

    reset_model(tasks, servers)

    optimal_result = elastic_optimal(tasks, servers, time_limit=200)
    optimal_result.pretty_print()
コード例 #9
0
def dia_heuristic_grid_search(model_dist: ModelDist,
                              repeats: int = 50,
                              time_limit: int = 4,
                              initial_prices: Iterable[int] = (0, 4, 8, 12),
                              price_changes: Iterable[int] = (1, 2, 4, 6)):
    """
    Evaluates the difference in results with the decentralised iterative auction uses different price changes and
        initial price variables

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the DIA Auction
    :param initial_prices: The initial price for auctions
    :param price_changes: The price change of the servers
    """
    print(
        f'DIA Heuristic grid search with initial prices: {initial_prices}, price changes: {price_changes}'
        f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_heuristic_grid_search', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        for initial_price in initial_prices:
            for price_change in price_changes:
                set_server_heuristics(servers,
                                      price_change=price_change,
                                      initial_price=initial_price)

                results = optimal_decentralised_iterative_auction(
                    tasks, servers, time_limit)
                algorithm_results[
                    f'IP: {initial_price}, PC: {price_change}'] = results.store(
                        **{
                            'initial price': initial_price,
                            'price change': price_change
                        })
                results.pretty_print()
                reset_model(tasks, servers)

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #10
0
def evolve_greedy_policies(model_dist: ModelDist,
                           iterations: int = 30,
                           population_size: int = 5):
    """
    Evolves the greedy policy to find the best policies

    :param model_dist: Model distribution
    :param iterations: Number of evolutions
    :param population_size: The population size
    """
    print(f'Evolves the greedy policies for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')

    eval_tasks, eval_servers = model_dist.generate_oneshot()
    lower_bound = greedy_algorithm(eval_tasks, eval_servers, ValuePriority(),
                                   ProductResources(),
                                   SumSpeed()).social_welfare
    print(f'Lower bound is {lower_bound}')
    reset_model(eval_tasks, eval_servers)

    evolution_strategy = CMAEvolutionStrategy(
        11 * [1], 0.2, {'population size': population_size})
    for iteration in range(iterations):
        suggestions = evolution_strategy.ask()
        tasks, servers = model_dist.generate_oneshot()

        solutions = []
        for i, suggestion in enumerate(suggestions):
            solutions.append(
                greedy_algorithm(
                    tasks, servers,
                    TaskPriorityEvoStrategy(i, *suggestion[:5]),
                    ServerSelectionEvoStrategy(i, *suggestion[5:8]),
                    ResourceAllocationEvoStrategy(
                        i, *suggestion[8:11])).social_welfare)
            reset_model(tasks, servers)

        evolution_strategy.tell(suggestions, solutions)
        evolution_strategy.disp()

        if iteration % 2 == 0:
            evaluation = greedy_algorithm(
                eval_tasks, eval_servers,
                TaskPriorityEvoStrategy(0, *suggestions[0][:5]),
                ServerSelectionEvoStrategy(0, *suggestions[0][5:8]),
                ResourceAllocationEvoStrategy(0, *suggestions[0][8:11]))
            print(f'Iter: {iteration} - {evaluation.social_welfare}')

    pprint.pprint(evolution_strategy.result())
コード例 #11
0
def auction_evaluation(model_dist: ModelDist, repeats: int = 50, dia_time_limit: int = 3,
                       run_elastic: bool = True, run_non_elastic: bool = True):
    """
    Evaluation of different auction algorithms

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param dia_time_limit: Decentralised iterative auction time limit
    :param run_elastic: If to run the elastic vcg auction
    :param run_non_elastic: If to run the non-elastic vcg auction
    """
    print(f'Evaluates the auction algorithms (cva, dia, elastic vcg, non-elastic vcg) for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('auctions', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer)

        if run_elastic:
            # Elastic VCG Auctions
            vcg_result = elastic_vcg_auction(tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(tasks, servers)

        if run_non_elastic:
            # Elastic VCG auction
            vcg_result = non_elastic_vcg_auction(non_elastic_tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(non_elastic_tasks, servers)

        # Decentralised Iterative auction
        dia_result = optimal_decentralised_iterative_auction(tasks, servers, time_limit=dia_time_limit)
        algorithm_results[dia_result.algorithm] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        # Critical Value Auction
        for task_priority in task_priority_functions:
            for server_selection_policy in server_selection_functions:
                for resource_allocation_policy in resource_allocation_functions:
                    critical_value_result = critical_value_auction(tasks, servers, task_priority,
                                                                   server_selection_policy, resource_allocation_policy)
                    algorithm_results[critical_value_result.algorithm] = critical_value_result.store()
                    critical_value_result.pretty_print()
                    reset_model(tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #12
0
def test_batch_length(model_dist=SyntheticModelDist(num_servers=8),
                      batch_lengths=(1, 2, 3),
                      time_steps: int = 20,
                      mean_arrival_rate: int = 4,
                      std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    original_server_capacities = {
        server: (server.computation_capacity, server.bandwidth_capacity)
        for server in servers
    }

    # Batch greedy algorithm
    for batch_length in batch_lengths:
        batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        # Update the server capacities
        for server in servers:
            server.computation_capacity = original_server_capacities[server][
                0] * batch_length
            server.bandwidth_capacity = original_server_capacities[server][
                1] * batch_length

        greedy_result = online_batch_solver(
            batched_tasks,
            servers,
            batch_length,
            '',
            greedy_algorithm,
            task_priority=UtilityDeadlinePerResourcePriority(
                SqrtResourcesPriority()),
            server_selection_policy=SumResources(),
            resource_allocation_policy=SumPowPercentage())
        print(
            f'Batch length: {batch_length} - social welfare: {greedy_result.social_welfare}, '
            f'percentage run: {greedy_result.percentage_tasks_allocated}')
        tasks_allocated = [
            task.name for task in flattened_tasks
            if task.running_server is not None
        ]
        print(
            f'Tasks allocated ({len(tasks_allocated)}): [{", ".join(tasks_allocated)}]'
        )
        reset_model(flattened_tasks, servers)
コード例 #13
0
def test_batch_lengths(model_dist=SyntheticModelDist(num_servers=8),
                       batch_lengths: Iterable[int] = (1, 5, 10, 15),
                       time_steps: int = 100,
                       mean_arrival_rate: int = 4,
                       std_arrival_rate: float = 2):
    print()
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    original_server_capacities = {
        server: (server.computation_capacity, server.bandwidth_capacity)
        for server in servers
    }
    results = []
    # Batch greedy algorithm
    for batch_length in batch_lengths:
        batched_tasks = generate_batch_tasks(tasks, batch_length, time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        # Update the server capacities
        for server in servers:
            server.computation_capacity = original_server_capacities[server][
                0] * batch_length
            server.bandwidth_capacity = original_server_capacities[server][
                1] * batch_length

        task_priority = UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority())
        server_selection_policy = SumResources()
        resource_allocation_policy = SumPowPercentage()
        name = f'Greedy {task_priority.name}, {server_selection_policy.name}, ' \
               f'{resource_allocation_policy.name}'
        greedy_result = online_batch_solver(
            batched_tasks,
            servers,
            batch_length,
            name,
            greedy_algorithm,
            task_priority=task_priority,
            server_selection_policy=server_selection_policy,
            resource_allocation_policy=resource_allocation_policy)
        results.append(greedy_result)
        print(
            f'Batch length: {batch_length}, social welfare percent: {greedy_result.percentage_social_welfare}, '
            f'social welfare: {greedy_result.social_welfare}')
        reset_model(flattened_tasks, servers)
コード例 #14
0
def test_optimal_time_limit(model_dist: ModelDist,
                            time_limits: Sequence[int] = (10, 30, 60, 5 * 60,
                                                          15 * 60, 60 * 60,
                                                          24 * 60 * 60)):
    tasks, servers = model_dist.generate_oneshot()

    print('Models')
    print_model(tasks, servers)

    for time_limit in time_limits:
        result = elastic_optimal_solver(tasks, servers, time_limit)
        reset_model(tasks, servers)

        print(
            f'\tSolved completely at time limit: {time_limit}, social welfare: {result.social_welfare} '
            f'with solve time: {result.solve_time}')
        if result.data['solve status'] == 'Optimal':
            break
コード例 #15
0
def test_optimal_solutions(model_dist=SyntheticModelDist(num_servers=8),
                           time_steps: int = 20,
                           mean_arrival_rate: int = 4,
                           std_arrival_rate: float = 2):
    tasks, servers = model_dist.generate_online(time_steps, mean_arrival_rate,
                                                std_arrival_rate)
    non_elastic_tasks = [
        NonElasticTask(task, SumSpeedPowResourcePriority()) for task in tasks
    ]

    # batched_tasks = generate_batch_tasks(tasks, 1, time_steps)
    # optimal_result = online_batch_solver(batched_tasks, servers, 1, 'Online Elastic Optimal',
    #                                      minimal_allocated_resources_solver, solver_time_limit=2)
    # print(f'Optimal - Social welfare: {optimal_result.social_welfare}')
    # reset_model([], servers)

    non_elastic_batched_tasks = generate_batch_tasks(non_elastic_tasks, 1,
                                                     time_steps)
    non_elastic_optimal_result = online_batch_solver(
        non_elastic_batched_tasks,
        servers,
        1,
        'Non-elastic Optimal',
        non_elastic_optimal_solver,
        time_limit=2)
    print(
        f'\nNon-elastic Optimal - Social welfare: {non_elastic_optimal_result.social_welfare}'
    )
    reset_model([], servers)

    batched_tasks = generate_batch_tasks(tasks, 4, time_steps)
    greedy_result = online_batch_solver(
        batched_tasks,
        servers,
        4,
        'Greedy',
        greedy_algorithm,
        task_priority=UtilityDeadlinePerResourcePriority(
            SqrtResourcesPriority()),
        server_selection_policy=SumResources(),
        resource_allocation_policy=SumPowPercentage())
    print(f'Greedy - Social welfare: {greedy_result.social_welfare}')
コード例 #16
0
def greedy_task_price(new_task: ElasticTask, server: Server, price_density: PriceDensity,
                      resource_allocation_policy: ResourceAllocation, debug_revenue: bool = False):
    """
    Calculates the task price using greedy algorithm

    :param new_task: The new task
    :param server: Server
    :param price_density: Price density function
    :param resource_allocation_policy: Resource allocation policy
    :param debug_revenue: If to debug the revenue
    :return: Tuple of task price and possible speeds
    """
    assert new_task.price == 0
    current_speeds = {task: (task.loading_speed, task.compute_speed, task.sending_speed)
                      for task in server.allocated_tasks}
    tasks = server.allocated_tasks[:]
    server_revenue = server.revenue
    reset_model(server.allocated_tasks, (server,), forget_prices=False)

    s, w, r = resource_allocation_policy.allocate(new_task, server)
    server_task_allocation(server, new_task, s, w, r)

    for task in sorted(tasks, key=lambda task: price_density.evaluate(task), reverse=True):
        if server.can_run(task):
            s, w, r = resource_allocation_policy.allocate(task, server)
            server_task_allocation(server, task, s, w, r)

    task_price = max(server_revenue - server.revenue + server.price_change, server.initial_price)
    debug(f'Original revenue: {server_revenue}, new revenue: {server.revenue}, price change: {server.price_change}',
          debug_revenue)
    possible_speeds = {
        task: (task.loading_speed, task.compute_speed, task.sending_speed, task.running_server is not None)
        for task in tasks + [new_task]}

    reset_model(current_speeds.keys(), (server,), forget_prices=False)
    new_task.reset_allocation()

    for task, (loading, compute, sending) in current_speeds.items():
        server_task_allocation(server, task, loading, compute, sending)

    return task_price, possible_speeds
コード例 #17
0
def dia_repeat(model_dist: ModelDist,
               repeats: int = 25,
               auction_repeats: int = 5,
               time_limit: int = 2,
               price_change: int = 3,
               initial_price: int = 25):
    """
    Tests the Decentralised iterative auction by repeating the auction to see if the same local / global maxima is
        achieved

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param auction_repeats: The number of auction repeats
    :param time_limit: The auction time limit
    :param price_change: Price change
    :param initial_price: The initial price
    """
    print(f'Evaluation of DIA by repeating the auction')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('repeat_dia', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, repeat_results = generate_evaluation_model(
            model_dist, pretty_printer)
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        for auction_repeat in range(auction_repeats):
            reset_model(tasks, servers)
            auction_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit=time_limit)
            auction_result.pretty_print()
            repeat_results[f'repeat {auction_repeat}'] = auction_result.store()

        model_results.append(repeat_results)
        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #18
0
def test_optimal_vs_greedy_dia(repeats: int = 5):
    print()
    model = SyntheticModelDist(7, 1)

    print(f' Optimal    | Greedy')
    print(f'Time  | SW  | Time   | SW')
    for repeat in range(repeats):
        tasks, servers = model.generate_oneshot()
        set_server_heuristics(servers, price_change=5)

        optimal_result = optimal_decentralised_iterative_auction(tasks,
                                                                 servers,
                                                                 time_limit=1)

        reset_model(tasks, servers)
        greedy_result = greedy_decentralised_iterative_auction(
            tasks, servers, PriceResourcePerDeadline(), SumPercentage())

        print(
            f'{optimal_result.solve_time} | {optimal_result.social_welfare} | '
            f'{greedy_result.solve_time} | {greedy_result.social_welfare}')
コード例 #19
0
def test_greedy_policies():
    print()
    model = SyntheticModelDist(20, 3)
    tasks, servers = model.generate_oneshot()

    policy_results = {}

    print('Policies')
    for value_density in task_priority_functions:
        for server_selection_policy in server_selection_functions:
            for resource_allocation_policy in resource_allocation_functions:
                reset_model(tasks, servers)

                result = greedy_algorithm(tasks, servers, value_density,
                                          server_selection_policy,
                                          resource_allocation_policy)
                print(
                    f'\t{result.algorithm} - {result.data["solve time"]} secs')
                if result.algorithm in policy_results:
                    policy_results[result.algorithm].append(result)
                else:
                    policy_results[result.algorithm] = [result]

    print('\n\nSorted policies by social welfare')
    for algorithm, results in policy_results.items():
        policy_results[algorithm] = (policy_results[algorithm],
                                     float(
                                         np.mean([
                                             r.social_welfare for r in results
                                         ])),
                                     float(
                                         np.mean(
                                             [r.solve_time for r in results])))
    print(f'Algorithm | Avg SW | Avg Time | Social Welfare')
    for algorithm, (results, avg_sw,
                    avg_time) in sorted(policy_results.items(),
                                        key=lambda r: r[1][1]):
        print(
            f'{algorithm} | {avg_sw} | {avg_time} | [{" ".join([str(result.social_welfare) for result in results])}]'
        )
コード例 #20
0
def lower_bound_testing(model_dist: ModelDist, repeats: int = 50):
    """
    Testing is to compare the lower bound of the greedy to the best greedy algorithm

    :param model_dist: Model distribution
    :param repeats: Repeat number
    """
    print(f'Evaluates the greedy algorithm for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('lower_bound', model_dist)

    lb_task_functions = task_priority_functions + [ValuePriority()]
    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        # Loop over all of the greedy policies permutations
        for task_priority in lb_task_functions:
            for server_selection in server_selection_functions:
                for resource_allocation in resource_allocation_functions:
                    greedy_result = greedy_algorithm(tasks, servers,
                                                     task_priority,
                                                     server_selection,
                                                     resource_allocation)
                    algorithm_results[
                        greedy_result.algorithm] = greedy_result.store()
                    greedy_result.pretty_print()
                    reset_model(tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #21
0
def foreknowledge_evaluation(model_dist: AlibabaModelDist, repeats: int = 50, run_elastic: bool = False):
    filename = results_filename('foreknowledge', model_dist)
    model_results = []
    for _ in range(repeats):
        servers = [model_dist.generate_server(server_id) for server_id in range(model_dist.num_servers)]
        foreknowledge_tasks, requested_tasks = model_dist.generate_foreknowledge_requested_tasks(
            servers, model_dist.num_tasks)
        non_elastic_foreknowledge_tasks = generate_non_elastic_tasks(foreknowledge_tasks)
        non_elastic_requested_tasks = generate_non_elastic_tasks(requested_tasks)

        algorithm_results = {
            'model': {'foreknowledge tasks': [foreknowledge_task.save() for foreknowledge_task in foreknowledge_tasks],
                      'requested tasks': [requested_task.save() for requested_task in requested_tasks],
                      'servers': [server.save() for server in servers]}}

        if run_elastic:
            results = elastic_optimal(foreknowledge_tasks, servers, time_limit=None)
            algorithm_results['foreknowledge elastic optimal'] = results.store()
            reset_model(foreknowledge_tasks, servers)

            results = elastic_optimal(requested_tasks, servers, time_limit=None)
            algorithm_results['requested elastic optimal'] = results.store()
            reset_model(requested_tasks, servers)

        results = non_elastic_optimal(non_elastic_foreknowledge_tasks, servers, time_limit=None)
        algorithm_results['foreknowledge non-elastic optimal'] = results.store()
        reset_model(non_elastic_foreknowledge_tasks, servers)

        results = non_elastic_optimal(non_elastic_requested_tasks, servers, time_limit=None)
        algorithm_results['requested non-elastic optimal'] = results.store()
        reset_model(non_elastic_requested_tasks, servers)

        greedy_permutations(foreknowledge_tasks, servers, algorithm_results, 'foreknowledge ')
        greedy_permutations(requested_tasks, servers, algorithm_results, 'requested ')

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished')
コード例 #22
0
def test_optimal_solution():
    model_dist = SyntheticModelDist(num_tasks=20, num_servers=4)
    tasks, servers = model_dist.generate_oneshot()
    non_elastic_tasks = generate_non_elastic_tasks(tasks)

    greedy_result = greedy_algorithm(tasks, servers,
                                     UtilityDeadlinePerResourcePriority(),
                                     SumResources(), SumPercentage())
    print(f'\nGreedy - {greedy_result.social_welfare}')
    reset_model(tasks, servers)

    optimal_result = elastic_optimal(tasks, servers, 5)
    print(f'Optimal - {optimal_result.social_welfare}')
    reset_model(tasks, servers)

    server_relaxed_result = server_relaxed_elastic_optimal(tasks, servers, 5)
    print(f'Server relaxed - {server_relaxed_result.social_welfare}')
    reset_model(tasks, servers)

    non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks,
                                                     servers, 5)
    print(f'Non-elastic Optimal - {non_elastic_optimal_result.social_welfare}')
    reset_model(non_elastic_tasks, servers)
コード例 #23
0
def greedy_evaluation(model_dist: ModelDist,
                      repeats: int = 50,
                      run_elastic_optimal: bool = True,
                      run_non_elastic_optimal: bool = True,
                      run_server_relaxed_optimal: bool = True):
    """
    Evaluation of different greedy algorithms

    :param model_dist: The model distribution
    :param repeats: Number of model runs
    :param run_elastic_optimal: If to run the optimal elastic solver
    :param run_non_elastic_optimal: If to run the optimal non-elastic solver
    :param run_server_relaxed_optimal: If to run the relaxed elastic solver
    """
    print(
        f'Evaluates the greedy algorithms (plus elastic, non-elastic and server relaxed optimal solutions) '
        f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('greedy', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        if run_elastic_optimal:
            # Find the optimal solution
            elastic_optimal_result = elastic_optimal(tasks,
                                                     servers,
                                                     time_limit=None)
            algorithm_results[elastic_optimal_result.
                              algorithm] = elastic_optimal_result.store()
            elastic_optimal_result.pretty_print()
            reset_model(tasks, servers)

        if run_server_relaxed_optimal:
            # Find the relaxed solution
            relaxed_result = server_relaxed_elastic_optimal(tasks,
                                                            servers,
                                                            time_limit=None)
            algorithm_results[
                relaxed_result.algorithm] = relaxed_result.store()
            relaxed_result.pretty_print()
            reset_model(tasks, servers)

        if run_non_elastic_optimal:
            # Find the non-elastic solution
            non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks,
                                                             servers,
                                                             time_limit=None)
            algorithm_results[non_elastic_optimal_result.
                              algorithm] = non_elastic_optimal_result.store()
            non_elastic_optimal_result.pretty_print()
            reset_model(non_elastic_tasks, servers)

        # Loop over all of the greedy policies permutations
        greedy_permutations(tasks, servers, algorithm_results)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #24
0
def mutation_grid_search(model_dist: ModelDist,
                         percent: float = 0.10,
                         time_limit: int = 3,
                         price_change: int = 3,
                         initial_price: int = 30):
    """
    Attempts a grid search version of the mutation testing above where a single task is mutated in every possible way
        within a particular way to keep that the random testing is not missing anything

    :param model_dist: The model distribution to generate servers and tasks
    :param percent: The percentage by which mutations can occur within
    :param time_limit: The time limit for the optimal decentralised iterative auction
    :param price_change: The price change of the servers
    :param initial_price: The initial price for the servers
    """
    print(f'Completes a grid search of a task known to achieve better results')
    filename = results_filename('mutation_grid_search', model_dist)
    positive_percent, negative_percent = 1 + percent, 1 - percent

    # Generate the tasks and servers
    tasks, servers = model_dist.generate_oneshot()
    set_server_heuristics(servers,
                          price_change=price_change,
                          initial_price=initial_price)

    # The mutation results
    mutation_results = {
        'model': {
            'tasks': [task.save() for task in tasks],
            'servers': [server.save() for server in servers]
        }
    }

    no_mutation_dia = optimal_decentralised_iterative_auction(
        tasks, servers, time_limit=time_limit)
    no_mutation_dia.pretty_print()
    task = next(task for task in tasks if task.running_server is not None)
    mutation_results['no mutation'] = no_mutation_dia.store(
        **{
            'allocated': task.running_server is not None,
            'task price': task.price
        })

    # The original task not mutated that is randomly selected (given the tasks are already randomly generated)
    permutations = ((int(task.required_storage * positive_percent) + 1) - task.required_storage) * \
                   ((int(task.required_computation * positive_percent) + 1) - task.required_computation) * \
                   ((int(task.required_results_data * positive_percent) + 1) - task.required_results_data) * \
                   ((task.deadline + 1) - int(task.deadline * negative_percent))
    print(
        f'Number of permutations: {permutations}, original solve time: {no_mutation_dia.solve_time}, '
        f'estimated time: {round(permutations * no_mutation_dia.solve_time / 60, 1)} minutes'
    )
    reset_model(tasks, servers)
    mutation_pos = 0
    # Loop over all of the permutations that the task requirement resources have up to the mutate percentage
    for required_storage in range(
            task.required_storage,
            int(task.required_storage * positive_percent) + 1):
        for required_computation in range(
                task.required_computation,
                int(task.required_computation * positive_percent) + 1):
            for required_results_data in range(
                    task.required_results_data,
                    int(task.required_results_data * positive_percent) + 1):
                for deadline in range(int(task.deadline * negative_percent),
                                      task.deadline + 1):
                    # Create the new mutated task and create new tasks list with the mutant task replacing the task
                    mutant_task = ElasticTask(
                        f'mutated {task.name}',
                        required_storage=required_storage,
                        required_computation=required_computation,
                        required_results_data=required_results_data,
                        deadline=deadline,
                        value=task.value)
                    tasks.append(mutant_task)

                    # Calculate the task price with the mutated task
                    mutated_result = optimal_decentralised_iterative_auction(
                        tasks, servers, time_limit)
                    mutated_result.pretty_print()
                    mutation_results[
                        f'Mutation {mutation_pos}'] = mutated_result.store(
                            **{
                                'mutated task': task.name,
                                'task price': mutant_task.price,
                                'required storage': required_storage,
                                'required computation': required_computation,
                                'required results data': required_results_data,
                                'deadline': deadline,
                                'allocated': mutant_task.running_server
                                is not None
                            })
                    mutation_pos += 1

                    # Remove the mutant task and read the task to the list of tasks and reset the model
                    tasks.remove(mutant_task)
                    reset_model(tasks, servers)

                    # Save all of the results to a file
                    with open(filename, 'w') as file:
                        json.dump(mutation_results, file)
    print('Finished running')
コード例 #25
0
def vcg_solver(tasks: List[ElasticTask],
               servers: List[Server],
               solver: Callable,
               debug_running: bool = False) -> Optional[CpoSolveResult]:
    """
    VCG auction solver

    :param tasks: List of tasks
    :param servers: List of servers
    :param solver: Solver to find solution
    :param debug_running: If to debug the running algorithm
    :return: Total solve time
    """
    # Price information
    task_prices: Dict[ElasticTask, float] = {}

    # Find the optimal solution
    debug('Running optimal solution', debug_running)
    optimal_results = solver(tasks, servers)
    if optimal_results is None:
        print(f'Optimal solver failed')
        return None
    optimal_social_welfare = sum(task.value for task in tasks
                                 if task.running_server)
    debug(f'Optimal social welfare: {optimal_social_welfare}', debug_running)

    # Save the task and server information from the optimal solution
    allocated_tasks = [task for task in tasks if task.running_server]
    task_allocation: Dict[ElasticTask, Tuple[int, int, int, Server]] = {
        task: (task.loading_speed, task.compute_speed, task.sending_speed,
               task.running_server)
        for task in allocated_tasks
    }

    debug(
        f"Allocated tasks: {', '.join([task.name for task in allocated_tasks])}",
        debug_running)

    # For each allocated task, find the sum of values if the task doesnt exist
    for task in allocated_tasks:
        # Reset the model and remove the task from the task list
        reset_model(tasks, servers)
        tasks_prime = list_copy_remove(tasks, task)

        # Find the optimal solution where the task doesnt exist
        debug(f'Solving for without task {task.name}', debug_running)
        prime_results = solver(tasks_prime, servers)
        if prime_results is None:
            print(f'Failed for task: {task.name}')
            return None
        else:
            task_prices[task] = optimal_social_welfare - sum(
                task.value for task in tasks_prime if task.running_server)
            debug(
                f'{task.name} Task: £{task_prices[task]:.1f}, Value: {task.value} ',
                debug_running)

    # Reset the model and allocates all of the their info from the original optimal solution
    reset_model(tasks, servers)
    for task, (s, w, r, server) in task_allocation.items():
        server_task_allocation(server, task, s, w, r, price=task_prices[task])

    return optimal_results
コード例 #26
0
def critical_value_auction(tasks: List[ElasticTask],
                           servers: List[Server],
                           value_density: TaskPriority,
                           server_selection_policy: ServerSelection,
                           resource_allocation_policy: ResourceAllocation,
                           debug_initial_allocation: bool = False,
                           debug_critical_value: bool = False) -> Result:
    """
    Run the Critical value auction

    :param tasks: List of tasks
    :param servers: List of servers
    :param value_density: Value density function
    :param server_selection_policy: Server selection function
    :param resource_allocation_policy: Resource allocation function
    :param debug_initial_allocation: If to debug the initial allocation
    :param debug_critical_value: If to debug the critical value
    :return: The results from the auction
    """
    start_time = time()

    valued_tasks: Dict[ElasticTask, float] = {
        task: value_density.evaluate(task)
        for task in tasks
    }
    ranked_tasks: List[ElasticTask] = sorted(valued_tasks,
                                             key=lambda j: valued_tasks[j],
                                             reverse=True)

    # Runs the greedy algorithm
    allocate_tasks(ranked_tasks, servers, server_selection_policy,
                   resource_allocation_policy)
    allocation_data: Dict[ElasticTask, Tuple[int, int, int, Server]] = {
        task: (task.loading_speed, task.compute_speed, task.sending_speed,
               task.running_server)
        for task in ranked_tasks if task.running_server
    }

    if debug_initial_allocation:
        max_name_len = max(len(task.name) for task in tasks)
        print(f"{'Task':<{max_name_len}} | s | w | r | server")
        for task, (s, w, r, server) in allocation_data.items():
            print(f'{task:<{max_name_len}}|{s:3f}|{w:3f}|{r:3f}|{server.name}')

    reset_model(tasks, servers)

    # Loop through each task allocated and find the critical value for the task
    for critical_task in allocation_data.keys():
        # Remove the task from the ranked tasks and save the original position
        critical_pos = ranked_tasks.index(critical_task)
        ranked_tasks.remove(critical_task)

        # Loop though the tasks in order checking if the task can be allocated at any point
        for task_pos, task in enumerate(ranked_tasks):
            # If any of the servers can allocate the critical task then allocate the current task to a server
            if any(server.can_run(critical_task) for server in servers):
                server = server_selection_policy.select(task, servers)
                if server:  # There may not be a server that can allocate the task
                    s, w, r = resource_allocation_policy.allocate(task, server)
                    server_task_allocation(server, task, s, w, r)
            else:
                # If critical task isn't able to be allocated therefore the last task's density is found
                #   and the inverse of the value density is calculated with the last task's density.
                #   If the task can always run then the price is zero, the default price so no changes need to be made
                critical_task_density = valued_tasks[ranked_tasks[task_pos -
                                                                  1]]
                critical_task.price = round(
                    value_density.inverse(critical_task,
                                          critical_task_density), 3)
                break

        debug(
            f'{critical_task.name} Task critical value: {critical_task.price:.3f}',
            debug_critical_value)

        # Read the task back into the ranked task in its original position and reset the model but not forgetting the
        #   new critical task's price
        ranked_tasks.insert(critical_pos, critical_task)
        reset_model(tasks, servers, forget_prices=False)

    # Allocate the tasks and set the price to the critical value
    for task, (s, w, r, server) in allocation_data.items():
        server_task_allocation(server, task, s, w, r)

    algorithm_name = f'Critical Value Auction {value_density.name}, ' \
                     f'{server_selection_policy.name}, {resource_allocation_policy.name}'
    return Result(algorithm_name,
                  tasks,
                  servers,
                  time() - start_time,
                  is_auction=True,
                  **{
                      'value density': value_density.name,
                      'server selection': server_selection_policy.name,
                      'resource allocation': resource_allocation_policy.name
                  })
コード例 #27
0
def value_only_mutation(model_dist: ModelDist,
                        repeats: int = 25,
                        time_limit: int = 2,
                        price_change: int = 3,
                        initial_price: int = 25,
                        model_mutations: int = 15,
                        value_mutations: Iterable[int] = (1, 2, 3, 4)):
    """
    Evaluates the value only mutation of tasks

    :param model_dist: Model distribution to generate tasks and servers
    :param repeats: The number of model repeats
    :param time_limit: DIA time limit
    :param price_change: Server price change
    :param initial_price: Server initial price
    :param model_mutations: The number of model mutation attempts
    :param value_mutations: The value difference to do testing with
    """
    print(f'Evaluates the value mutation of tasks')
    pretty_printer, model_results = PrettyPrinter(), []

    filename = results_filename('value_mutation', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers = model_dist.generate_oneshot()
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        mutation_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        pretty_printer.pprint(mutation_results)

        # Calculate the results without any mutation
        no_mutation_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit=time_limit)
        no_mutation_result.pretty_print()
        mutation_results['no mutation'] = no_mutation_result.store()

        # Save the task prices and server revenues
        to_mutate_tasks = tasks[:]
        reset_model(tasks, servers)

        # Loop each time mutating a task or server and find the auction results and compare to the unmutated result
        for model_mutation in range(min(model_mutations,
                                        len(to_mutate_tasks))):
            # Choice a random task and mutate it
            task: ElasticTask = to_mutate_tasks.pop(
                rnd.randint(0,
                            len(to_mutate_tasks) - 1))
            task_value = task.value

            task_mutation_results = {}
            for value in value_mutations:
                task.value = task_value - value

                # Find the result with the mutated task
                mutant_result = optimal_decentralised_iterative_auction(
                    tasks, servers, time_limit)
                task_mutation_results[f'value {value}'] = mutant_result.store(
                    **{
                        'price': task.price,
                        'allocated': task.running_server is not None,
                        'value': task.value
                    })
                pretty_printer.pprint(task_mutation_results[f'value {value}'])
                reset_model(tasks, servers)

            task.value = task_value
            mutation_results[f'task {task.name}'] = task_mutation_results

        # Append the results to the data list
        model_results.append(mutation_results)

        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #28
0
def greedy_permutations(model_dist: ModelDist,
                        repeats: int = 20,
                        time_steps: int = 1000,
                        mean_arrival_rate: float = 2,
                        std_arrival_rate: float = 2,
                        batch_length: int = 1):
    """
    Evaluates the performance between greedy algorithms with different module functions

    :param model_dist: The model distribution used to test with
    :param repeats: The number of testing repeats that are computed
    :param time_steps: The total number of time steps for tasks to arrive at
    :param mean_arrival_rate: The mean arrival rate of tasks
    :param std_arrival_rate: The standard deviation of the arrival rate for the task
    :param batch_length: The batch length of the testing setting
    """
    print(f'Evaluates performance between different greedy permutations')
    print(
        f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate}, std: {std_arrival_rate}'
    )
    model_results = []

    filename = results_filename('online_greedy_permutations', model_dist)
    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers = model_dist.generate_online(time_steps,
                                                    mean_arrival_rate,
                                                    std_arrival_rate)
        algorithm_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }

        valid_tasks = [task for task in tasks if batch_length < task.deadline]
        batched_tasks = generate_batch_tasks(valid_tasks, batch_length,
                                             time_steps)
        flattened_tasks = [task for tasks in batched_tasks for task in tasks]

        for task_priority, server_selection, resource_allocation in [
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             ProductResources(True), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceProductPriority()),
             ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceProductPriority()),
             ProductResources(True), SumPowPercentage()),
            (ValuePriority(), ProductResources(), SumPowPercentage()),
            (ValuePriority(), ProductResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             SumResources(), SumPowPercentage()),
            (UtilityDeadlinePerResourcePriority(ResourceSumPriority()),
             SumResources(True), SumPowPercentage())
        ]:
            greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, ' \
                          f'{resource_allocation.name}'
            greedy_result = online_batch_solver(
                batched_tasks,
                servers,
                batch_length,
                greedy_name,
                greedy_algorithm,
                task_priority=task_priority,
                server_selection=server_selection,
                resource_allocation=resource_allocation)
            algorithm_results[greedy_result.algorithm] = greedy_result.store()
            print(greedy_name)
            reset_model(flattened_tasks, servers)

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #29
0
def full_task_mutation(model_dist: ModelDist,
                       repeats: int = 25,
                       time_limit: int = 2,
                       price_change: int = 3,
                       initial_price: int = 25,
                       model_mutations: int = 15,
                       mutate_percent: float = 0.15):
    """
    Evaluates the effectiveness of a task mutations on if the mutated task is allocated and if so the difference in
        price between the mutated and normal task

    :param model_dist: Model distribution to generate tasks and servers
    :param repeats: The number of model repeats
    :param time_limit: The time limit for the decentralised iterative auction results
    :param price_change: The price change of the servers
    :param initial_price: The initial price of tasks for the servers
    :param model_mutations: The number of model mutations to attempt
    :param mutate_percent: The percentage of the model that it can be mutated by
    """
    print(
        f'Evaluates the possibility of tasks mutating resulting in a lower price'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('task_mutation', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers = model_dist.generate_oneshot()
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        mutation_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        pretty_printer.pprint(mutation_results)

        # Calculate the results without any mutation
        no_mutation_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit=time_limit)
        no_mutation_result.pretty_print()
        mutation_results['no mutation'] = no_mutation_result.store()

        # Save the task prices and server revenues
        task_prices = {task: task.price for task in tasks}
        allocated_tasks = {
            task: task.running_server is not None
            for task in tasks
        }
        to_mutate_tasks = [
            task for task, allocated in allocated_tasks.items()
        ]  # if allocated todo future testing
        reset_model(tasks, servers)

        # Loop each time mutating a task or server and find the auction results and compare to the unmutated result
        for model_mutation in range(min(model_mutations,
                                        len(to_mutate_tasks))):
            # Choice a random task and mutate it
            task: ElasticTask = to_mutate_tasks.pop(
                rnd.randint(0,
                            len(to_mutate_tasks) - 1))
            mutant_task = task.mutate(mutate_percent)

            # Replace the task with the mutant task in the task list
            list_item_replacement(tasks, task, mutant_task)
            assert mutant_task in tasks
            assert task not in tasks

            # Find the result with the mutated task
            mutant_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit)
            mutation_results[f'mutation {model_mutation}'] = mutant_result.store(
                **{
                    'task price': task_prices[task],
                    'task allocated': allocated_tasks[task],
                    'mutant price': mutant_task.price,
                    'mutant task allocated': mutant_task.running_server
                    is not None,
                    'mutant task name': task.name,
                    'mutant task deadline': mutant_task.deadline,
                    'mutant task value': mutant_task.value,
                    'mutant task storage': mutant_task.required_storage,
                    'mutant task computation':
                    mutant_task.required_computation,
                    'mutant task results data':
                    mutant_task.required_results_data,
                })
            pretty_printer.pprint(
                mutation_results[f'mutation {model_mutation}'])

            # Replace the mutant task with the task in the task list
            list_item_replacement(tasks, mutant_task, task)
            assert mutant_task not in tasks
            assert task in tasks

        # Append the results to the data list
        model_results.append(mutation_results)

        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
コード例 #30
0
def online_evaluation(model_dist: ModelDist,
                      repeats: int = 20,
                      time_steps: int = 200,
                      mean_arrival_rate: float = 1,
                      std_arrival_rate: float = 2,
                      task_priority=UtilityDeadlinePerResourcePriority(
                          ResourceSumPriority()),
                      server_selection=ProductResources(),
                      resource_allocation=SumPowPercentage()):
    """
    Evaluates the batch online

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_steps: Total number of time steps
    :param mean_arrival_rate: Mean arrival rate of tasks
    :param std_arrival_rate: Standard deviation arrival rate of tasks
    :param task_priority: The task prioritisation function
    :param server_selection: Server selection policy
    :param resource_allocation: Resource allocation policy
    """
    print(
        f'Evaluates difference in performance between batch and online algorithm for {model_dist.name} model with '
        f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    print(
        f'Settings - Time steps: {time_steps}, mean arrival rate: {mean_arrival_rate} with std: {std_arrival_rate}'
    )
    model_results = []

    filename = results_filename('online_resource_allocation', model_dist)
    greedy_name = f'Greedy {task_priority.name}, {server_selection.name}, {resource_allocation.name}'
    batch_length = 1

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers = model_dist.generate_online(time_steps,
                                                    mean_arrival_rate,
                                                    std_arrival_rate)
        algorithm_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        non_elastic_tasks = generate_non_elastic_tasks(tasks)

        valid_elastic_tasks = [
            task for task in tasks if batch_length < task.deadline
        ]
        batched_elastic_tasks = generate_batch_tasks(valid_elastic_tasks,
                                                     batch_length, time_steps)

        valid_non_elastic_tasks = [
            task for task in non_elastic_tasks if batch_length < task.deadline
        ]
        batched_non_elastic_tasks = generate_batch_tasks(
            valid_non_elastic_tasks, batch_length, time_steps)

        # Flatten the tasks
        flattened_elastic_tasks = [
            task for tasks in batched_elastic_tasks for task in tasks
        ]
        flattened_non_elastic_tasks = [
            task for tasks in batched_non_elastic_tasks for task in tasks
        ]

        elastic_optimal_result = online_batch_solver(batched_elastic_tasks,
                                                     servers,
                                                     batch_length,
                                                     'Elastic Optimal',
                                                     elastic_optimal_solver,
                                                     time_limit=None)
        algorithm_results[
            elastic_optimal_result.algorithm] = elastic_optimal_result.store()
        reset_model(flattened_elastic_tasks, servers)

        non_elastic_optimal_result = online_batch_solver(
            batched_non_elastic_tasks,
            servers,
            batch_length,
            'Non-elastic Optimal',
            non_elastic_optimal_solver,
            time_limit=None)
        algorithm_results[non_elastic_optimal_result.
                          algorithm] = non_elastic_optimal_result.store()
        reset_model(flattened_non_elastic_tasks, servers)

        # Loop over all of the greedy policies permutations
        greedy_result = online_batch_solver(
            batched_elastic_tasks,
            servers,
            batch_length,
            greedy_name,
            greedy_algorithm,
            task_priority=task_priority,
            server_selection=server_selection,
            resource_allocation=resource_allocation)
        algorithm_results[greedy_result.algorithm] = greedy_result.store()
        reset_model(flattened_elastic_tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')