Exemplo n.º 1
0
def non_uniform_server_heuristics(model_dist: ModelDist,
                                  repeats: int = 20,
                                  time_limit: int = 2,
                                  random_repeats: int = 10,
                                  price_change_mean: int = 4,
                                  price_change_std: int = 2,
                                  initial_price_mean: int = 25,
                                  initial_price_std: int = 4):
    """
    Evaluates the effect of the server heuristics when they are non-uniform (all server's dont use the same value)

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the decentralised iterative auction
    :param random_repeats: The number of random repeats for each model generated
    :param price_change_mean: The mean price change value
    :param price_change_std: The standard deviation of the price change value
    :param initial_price_mean: The mean initial change value
    :param initial_price_std: The standard deviation of the initial change value
    """
    print(
        f'DIA non-uniform heuristic investigation with initial price mean: {initial_price_mean} and '
        f'std: {initial_price_std}, price change mean: {price_change_mean} and price change std: {price_change_std}, '
        f'using {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_non_uniform_heuristic', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        set_server_heuristics(servers,
                              price_change=price_change_mean,
                              initial_price=initial_price_mean)
        dia_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit)
        algorithm_results['normal'] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        for random_repeat in range(random_repeats):
            for server in servers:
                server.price_change = max(
                    1, int(gauss(price_change_mean, price_change_std)))
                server.initial_price = max(
                    1, int(gauss(initial_price_mean, initial_price_std)))

            dia_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit)
            algorithm_results[f'repeat {random_repeat}'] = dia_result.store()
            dia_result.pretty_print()
            reset_model(tasks, servers)
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def server_sizing(repeats: int = 20):
    model_dist = AlibabaModelDist(20, 4)
    pretty_printer, server_scales = PrettyPrinter(), {}

    for mean_storage, mean_computation, mean_bandwidth in ((400, 50, 120), (400, 60, 150), (400, 70, 160)):
        model_dist.model['server distributions'] = [{
            "name": "custom",
            "probability": 1,
            "storage mean": mean_storage, "storage std": 30,
            "computation mean": mean_computation, "computation std": 8,
            "bandwidth mean": mean_bandwidth, "bandwidth std": 15
        }]
        model_results = []
        for _ in range(repeats):
            tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer)

            non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=60)
            algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store()
            reset_model(non_elastic_tasks, servers)

            greedy_permutations(tasks, servers, algorithm_results)

            model_results.append(algorithm_results)

        server_scales[f'{mean_storage}, {mean_computation}, {mean_bandwidth}'] = model_results

        with open('server_scaling_3.json', 'w') as file:
            json.dump(server_scales, file)
Exemplo n.º 3
0
def auction_evaluation(model_dist: ModelDist, repeats: int = 50, dia_time_limit: int = 3,
                       run_elastic: bool = True, run_non_elastic: bool = True):
    """
    Evaluation of different auction algorithms

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param dia_time_limit: Decentralised iterative auction time limit
    :param run_elastic: If to run the elastic vcg auction
    :param run_non_elastic: If to run the non-elastic vcg auction
    """
    print(f'Evaluates the auction algorithms (cva, dia, elastic vcg, non-elastic vcg) for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('auctions', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer)

        if run_elastic:
            # Elastic VCG Auctions
            vcg_result = elastic_vcg_auction(tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(tasks, servers)

        if run_non_elastic:
            # Elastic VCG auction
            vcg_result = non_elastic_vcg_auction(non_elastic_tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(non_elastic_tasks, servers)

        # Decentralised Iterative auction
        dia_result = optimal_decentralised_iterative_auction(tasks, servers, time_limit=dia_time_limit)
        algorithm_results[dia_result.algorithm] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        # Critical Value Auction
        for task_priority in task_priority_functions:
            for server_selection_policy in server_selection_functions:
                for resource_allocation_policy in resource_allocation_functions:
                    critical_value_result = critical_value_auction(tasks, servers, task_priority,
                                                                   server_selection_policy, resource_allocation_policy)
                    algorithm_results[critical_value_result.algorithm] = critical_value_result.store()
                    critical_value_result.pretty_print()
                    reset_model(tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
Exemplo n.º 4
0
def server_resource_ratio(model_dist: ModelDist, repeats: int = 25, run_elastic: bool = True,
                          run_non_elastic: bool = True, non_elastic_time_limit: Optional[int] = None,
                          ratios: Iterable[int] = (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)):
    """
    Evaluates the difference in social welfare when the ratio of computational to bandwidth capacity is changed between
        different algorithms: greedy, elastic optimal, non-elastic optimal and server relaxed optimal

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param run_elastic: If to run the optimal elastic solver
    :param run_non_elastic: If to run the optimal non-elastic solver
    :param non_elastic_time_limit: The non-elastic optimal time limit
    :param ratios: List of ratios to test
    """
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('resource_ratio', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        # Generate the tasks and servers
        tasks, servers, non_elastic_tasks, ratio_results = generate_evaluation_model(model_dist, pretty_printer)

        server_total_resources = {server: server.computation_capacity + server.bandwidth_capacity
                                  for server in servers}
        for ratio in ratios:
            algorithm_results = {}
            # Update server capacities
            for server in servers:
                server.update_capacities(int(server_total_resources[server] * ratio),
                                         int(server_total_resources[server] * (1 - ratio)))

            if run_elastic:
                # Finds the elastic optimal solution
                elastic_optimal_results = elastic_optimal(tasks, servers, time_limit=None)
                algorithm_results[elastic_optimal_results.algorithm] = elastic_optimal_results.store(ratio=ratio)
                pretty_printer.pprint(algorithm_results[elastic_optimal_results.algorithm])
                reset_model(tasks, servers)

            if run_non_elastic:
                # Find the non-elastic optimal solution
                non_elastic_results = non_elastic_optimal(non_elastic_tasks, servers, time_limit=non_elastic_time_limit)
                algorithm_results[non_elastic_results.algorithm] = non_elastic_results.store(ratio=ratio)
                non_elastic_results.pretty_print()
                reset_model(non_elastic_tasks, servers)

            # Loop over all of the greedy policies permutations
            greedy_permutations(tasks, servers, algorithm_results)

            ratio_results[f'ratio {ratio}'] = algorithm_results
        model_results.append(ratio_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
Exemplo n.º 5
0
def dia_heuristic_grid_search(model_dist: ModelDist,
                              repeats: int = 50,
                              time_limit: int = 4,
                              initial_prices: Iterable[int] = (0, 4, 8, 12),
                              price_changes: Iterable[int] = (1, 2, 4, 6)):
    """
    Evaluates the difference in results with the decentralised iterative auction uses different price changes and
        initial price variables

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the DIA Auction
    :param initial_prices: The initial price for auctions
    :param price_changes: The price change of the servers
    """
    print(
        f'DIA Heuristic grid search with initial prices: {initial_prices}, price changes: {price_changes}'
        f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_heuristic_grid_search', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        for initial_price in initial_prices:
            for price_change in price_changes:
                set_server_heuristics(servers,
                                      price_change=price_change,
                                      initial_price=initial_price)

                results = optimal_decentralised_iterative_auction(
                    tasks, servers, time_limit)
                algorithm_results[
                    f'IP: {initial_price}, PC: {price_change}'] = results.store(
                        **{
                            'initial price': initial_price,
                            'price change': price_change
                        })
                results.pretty_print()
                reset_model(tasks, servers)

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def dia_repeat(model_dist: ModelDist,
               repeats: int = 25,
               auction_repeats: int = 5,
               time_limit: int = 2,
               price_change: int = 3,
               initial_price: int = 25):
    """
    Tests the Decentralised iterative auction by repeating the auction to see if the same local / global maxima is
        achieved

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param auction_repeats: The number of auction repeats
    :param time_limit: The auction time limit
    :param price_change: Price change
    :param initial_price: The initial price
    """
    print(f'Evaluation of DIA by repeating the auction')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('repeat_dia', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, repeat_results = generate_evaluation_model(
            model_dist, pretty_printer)
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        for auction_repeat in range(auction_repeats):
            reset_model(tasks, servers)
            auction_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit=time_limit)
            auction_result.pretty_print()
            repeat_results[f'repeat {auction_repeat}'] = auction_result.store()

        model_results.append(repeat_results)
        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def lower_bound_testing(model_dist: ModelDist, repeats: int = 50):
    """
    Testing is to compare the lower bound of the greedy to the best greedy algorithm

    :param model_dist: Model distribution
    :param repeats: Repeat number
    """
    print(f'Evaluates the greedy algorithm for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('lower_bound', model_dist)

    lb_task_functions = task_priority_functions + [ValuePriority()]
    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        # Loop over all of the greedy policies permutations
        for task_priority in lb_task_functions:
            for server_selection in server_selection_functions:
                for resource_allocation in resource_allocation_functions:
                    greedy_result = greedy_algorithm(tasks, servers,
                                                     task_priority,
                                                     server_selection,
                                                     resource_allocation)
                    algorithm_results[
                        greedy_result.algorithm] = greedy_result.store()
                    greedy_result.pretty_print()
                    reset_model(tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def greedy_evaluation(model_dist: ModelDist,
                      repeats: int = 50,
                      run_elastic_optimal: bool = True,
                      run_non_elastic_optimal: bool = True,
                      run_server_relaxed_optimal: bool = True):
    """
    Evaluation of different greedy algorithms

    :param model_dist: The model distribution
    :param repeats: Number of model runs
    :param run_elastic_optimal: If to run the optimal elastic solver
    :param run_non_elastic_optimal: If to run the optimal non-elastic solver
    :param run_server_relaxed_optimal: If to run the relaxed elastic solver
    """
    print(
        f'Evaluates the greedy algorithms (plus elastic, non-elastic and server relaxed optimal solutions) '
        f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('greedy', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        if run_elastic_optimal:
            # Find the optimal solution
            elastic_optimal_result = elastic_optimal(tasks,
                                                     servers,
                                                     time_limit=None)
            algorithm_results[elastic_optimal_result.
                              algorithm] = elastic_optimal_result.store()
            elastic_optimal_result.pretty_print()
            reset_model(tasks, servers)

        if run_server_relaxed_optimal:
            # Find the relaxed solution
            relaxed_result = server_relaxed_elastic_optimal(tasks,
                                                            servers,
                                                            time_limit=None)
            algorithm_results[
                relaxed_result.algorithm] = relaxed_result.store()
            relaxed_result.pretty_print()
            reset_model(tasks, servers)

        if run_non_elastic_optimal:
            # Find the non-elastic solution
            non_elastic_optimal_result = non_elastic_optimal(non_elastic_tasks,
                                                             servers,
                                                             time_limit=None)
            algorithm_results[non_elastic_optimal_result.
                              algorithm] = non_elastic_optimal_result.store()
            non_elastic_optimal_result.pretty_print()
            reset_model(non_elastic_tasks, servers)

        # Loop over all of the greedy policies permutations
        greedy_permutations(tasks, servers, algorithm_results)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')