示例#1
0
def non_uniform_server_heuristics(model_dist: ModelDist,
                                  repeats: int = 20,
                                  time_limit: int = 2,
                                  random_repeats: int = 10,
                                  price_change_mean: int = 4,
                                  price_change_std: int = 2,
                                  initial_price_mean: int = 25,
                                  initial_price_std: int = 4):
    """
    Evaluates the effect of the server heuristics when they are non-uniform (all server's dont use the same value)

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the decentralised iterative auction
    :param random_repeats: The number of random repeats for each model generated
    :param price_change_mean: The mean price change value
    :param price_change_std: The standard deviation of the price change value
    :param initial_price_mean: The mean initial change value
    :param initial_price_std: The standard deviation of the initial change value
    """
    print(
        f'DIA non-uniform heuristic investigation with initial price mean: {initial_price_mean} and '
        f'std: {initial_price_std}, price change mean: {price_change_mean} and price change std: {price_change_std}, '
        f'using {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_non_uniform_heuristic', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        set_server_heuristics(servers,
                              price_change=price_change_mean,
                              initial_price=initial_price_mean)
        dia_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit)
        algorithm_results['normal'] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        for random_repeat in range(random_repeats):
            for server in servers:
                server.price_change = max(
                    1, int(gauss(price_change_mean, price_change_std)))
                server.initial_price = max(
                    1, int(gauss(initial_price_mean, initial_price_std)))

            dia_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit)
            algorithm_results[f'repeat {random_repeat}'] = dia_result.store()
            dia_result.pretty_print()
            reset_model(tasks, servers)
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def dia_social_welfare_test(model_dist: ModelDist,
                            repeat: int,
                            repeats: int = 20):
    """
    Evaluates the results using the optimality

    :param model_dist: The model distribution
    :param repeat: The repeat of the testing
    :param repeats: The number of repeats
    """
    data = []
    filename = results_filename('testing', model_dist)
    for _ in range(repeats):
        tasks, servers = model_dist.generate_oneshot()
        model_results = {}

        optimal_result = elastic_optimal(tasks, servers, 30)
        model_results[optimal_result.algorithm] = optimal_result.store()
        reset_model(tasks, servers)

        for pos in range(5):
            set_server_heuristics(servers, price_change=3, initial_price=25)
            dia_result = optimal_decentralised_iterative_auction(
                tasks, servers, 2)
            model_results[f'DIA {pos}'] = dia_result
            reset_model(tasks, servers)

        data.append(model_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(data, file)
示例#3
0
def auction_evaluation(model_dist: ModelDist, repeats: int = 50, dia_time_limit: int = 3,
                       run_elastic: bool = True, run_non_elastic: bool = True):
    """
    Evaluation of different auction algorithms

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param dia_time_limit: Decentralised iterative auction time limit
    :param run_elastic: If to run the elastic vcg auction
    :param run_non_elastic: If to run the non-elastic vcg auction
    """
    print(f'Evaluates the auction algorithms (cva, dia, elastic vcg, non-elastic vcg) for {model_dist.name} model with '
          f'{model_dist.num_tasks} tasks and {model_dist.num_servers} servers')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('auctions', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(model_dist, pretty_printer)

        if run_elastic:
            # Elastic VCG Auctions
            vcg_result = elastic_vcg_auction(tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(tasks, servers)

        if run_non_elastic:
            # Elastic VCG auction
            vcg_result = non_elastic_vcg_auction(non_elastic_tasks, servers, time_limit=None)
            algorithm_results[vcg_result.algorithm] = vcg_result.store()
            vcg_result.pretty_print()
            reset_model(non_elastic_tasks, servers)

        # Decentralised Iterative auction
        dia_result = optimal_decentralised_iterative_auction(tasks, servers, time_limit=dia_time_limit)
        algorithm_results[dia_result.algorithm] = dia_result.store()
        dia_result.pretty_print()
        reset_model(tasks, servers)

        # Critical Value Auction
        for task_priority in task_priority_functions:
            for server_selection_policy in server_selection_functions:
                for resource_allocation_policy in resource_allocation_functions:
                    critical_value_result = critical_value_auction(tasks, servers, task_priority,
                                                                   server_selection_policy, resource_allocation_policy)
                    algorithm_results[critical_value_result.algorithm] = critical_value_result.store()
                    critical_value_result.pretty_print()
                    reset_model(tasks, servers)

        # Add the results to the data
        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
示例#4
0
def dia_heuristic_grid_search(model_dist: ModelDist,
                              repeats: int = 50,
                              time_limit: int = 4,
                              initial_prices: Iterable[int] = (0, 4, 8, 12),
                              price_changes: Iterable[int] = (1, 2, 4, 6)):
    """
    Evaluates the difference in results with the decentralised iterative auction uses different price changes and
        initial price variables

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param time_limit: The time limit for the DIA Auction
    :param initial_prices: The initial price for auctions
    :param price_changes: The price change of the servers
    """
    print(
        f'DIA Heuristic grid search with initial prices: {initial_prices}, price changes: {price_changes}'
        f'for {model_dist.name} model with {model_dist.num_tasks} tasks and {model_dist.num_servers} servers'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('dia_heuristic_grid_search', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, algorithm_results = generate_evaluation_model(
            model_dist, pretty_printer)

        for initial_price in initial_prices:
            for price_change in price_changes:
                set_server_heuristics(servers,
                                      price_change=price_change,
                                      initial_price=initial_price)

                results = optimal_decentralised_iterative_auction(
                    tasks, servers, time_limit)
                algorithm_results[
                    f'IP: {initial_price}, PC: {price_change}'] = results.store(
                        **{
                            'initial price': initial_price,
                            'price change': price_change
                        })
                results.pretty_print()
                reset_model(tasks, servers)

        model_results.append(algorithm_results)

        # Save the results to the file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def dia_repeat(model_dist: ModelDist,
               repeats: int = 25,
               auction_repeats: int = 5,
               time_limit: int = 2,
               price_change: int = 3,
               initial_price: int = 25):
    """
    Tests the Decentralised iterative auction by repeating the auction to see if the same local / global maxima is
        achieved

    :param model_dist: The model distribution
    :param repeats: The number of repeats
    :param auction_repeats: The number of auction repeats
    :param time_limit: The auction time limit
    :param price_change: Price change
    :param initial_price: The initial price
    """
    print(f'Evaluation of DIA by repeating the auction')
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('repeat_dia', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers, non_elastic_tasks, repeat_results = generate_evaluation_model(
            model_dist, pretty_printer)
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        for auction_repeat in range(auction_repeats):
            reset_model(tasks, servers)
            auction_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit=time_limit)
            auction_result.pretty_print()
            repeat_results[f'repeat {auction_repeat}'] = auction_result.store()

        model_results.append(repeat_results)
        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def test_optimal_vs_greedy_dia(repeats: int = 5):
    print()
    model = SyntheticModelDist(7, 1)

    print(f' Optimal    | Greedy')
    print(f'Time  | SW  | Time   | SW')
    for repeat in range(repeats):
        tasks, servers = model.generate_oneshot()
        set_server_heuristics(servers, price_change=5)

        optimal_result = optimal_decentralised_iterative_auction(tasks,
                                                                 servers,
                                                                 time_limit=1)

        reset_model(tasks, servers)
        greedy_result = greedy_decentralised_iterative_auction(
            tasks, servers, PriceResourcePerDeadline(), SumPercentage())

        print(
            f'{optimal_result.solve_time} | {optimal_result.social_welfare} | '
            f'{greedy_result.solve_time} | {greedy_result.social_welfare}')
def full_task_mutation(model_dist: ModelDist,
                       repeats: int = 25,
                       time_limit: int = 2,
                       price_change: int = 3,
                       initial_price: int = 25,
                       model_mutations: int = 15,
                       mutate_percent: float = 0.15):
    """
    Evaluates the effectiveness of a task mutations on if the mutated task is allocated and if so the difference in
        price between the mutated and normal task

    :param model_dist: Model distribution to generate tasks and servers
    :param repeats: The number of model repeats
    :param time_limit: The time limit for the decentralised iterative auction results
    :param price_change: The price change of the servers
    :param initial_price: The initial price of tasks for the servers
    :param model_mutations: The number of model mutations to attempt
    :param mutate_percent: The percentage of the model that it can be mutated by
    """
    print(
        f'Evaluates the possibility of tasks mutating resulting in a lower price'
    )
    pretty_printer, model_results = PrettyPrinter(), []
    filename = results_filename('task_mutation', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers = model_dist.generate_oneshot()
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        mutation_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        pretty_printer.pprint(mutation_results)

        # Calculate the results without any mutation
        no_mutation_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit=time_limit)
        no_mutation_result.pretty_print()
        mutation_results['no mutation'] = no_mutation_result.store()

        # Save the task prices and server revenues
        task_prices = {task: task.price for task in tasks}
        allocated_tasks = {
            task: task.running_server is not None
            for task in tasks
        }
        to_mutate_tasks = [
            task for task, allocated in allocated_tasks.items()
        ]  # if allocated todo future testing
        reset_model(tasks, servers)

        # Loop each time mutating a task or server and find the auction results and compare to the unmutated result
        for model_mutation in range(min(model_mutations,
                                        len(to_mutate_tasks))):
            # Choice a random task and mutate it
            task: ElasticTask = to_mutate_tasks.pop(
                rnd.randint(0,
                            len(to_mutate_tasks) - 1))
            mutant_task = task.mutate(mutate_percent)

            # Replace the task with the mutant task in the task list
            list_item_replacement(tasks, task, mutant_task)
            assert mutant_task in tasks
            assert task not in tasks

            # Find the result with the mutated task
            mutant_result = optimal_decentralised_iterative_auction(
                tasks, servers, time_limit)
            mutation_results[f'mutation {model_mutation}'] = mutant_result.store(
                **{
                    'task price': task_prices[task],
                    'task allocated': allocated_tasks[task],
                    'mutant price': mutant_task.price,
                    'mutant task allocated': mutant_task.running_server
                    is not None,
                    'mutant task name': task.name,
                    'mutant task deadline': mutant_task.deadline,
                    'mutant task value': mutant_task.value,
                    'mutant task storage': mutant_task.required_storage,
                    'mutant task computation':
                    mutant_task.required_computation,
                    'mutant task results data':
                    mutant_task.required_results_data,
                })
            pretty_printer.pprint(
                mutation_results[f'mutation {model_mutation}'])

            # Replace the mutant task with the task in the task list
            list_item_replacement(tasks, mutant_task, task)
            assert mutant_task not in tasks
            assert task in tasks

        # Append the results to the data list
        model_results.append(mutation_results)

        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def value_only_mutation(model_dist: ModelDist,
                        repeats: int = 25,
                        time_limit: int = 2,
                        price_change: int = 3,
                        initial_price: int = 25,
                        model_mutations: int = 15,
                        value_mutations: Iterable[int] = (1, 2, 3, 4)):
    """
    Evaluates the value only mutation of tasks

    :param model_dist: Model distribution to generate tasks and servers
    :param repeats: The number of model repeats
    :param time_limit: DIA time limit
    :param price_change: Server price change
    :param initial_price: Server initial price
    :param model_mutations: The number of model mutation attempts
    :param value_mutations: The value difference to do testing with
    """
    print(f'Evaluates the value mutation of tasks')
    pretty_printer, model_results = PrettyPrinter(), []

    filename = results_filename('value_mutation', model_dist)

    for repeat in range(repeats):
        print(f'\nRepeat: {repeat}')
        tasks, servers = model_dist.generate_oneshot()
        set_server_heuristics(servers,
                              price_change=price_change,
                              initial_price=initial_price)

        mutation_results = {
            'model': {
                'tasks': [task.save() for task in tasks],
                'servers': [server.save() for server in servers]
            }
        }
        pretty_printer.pprint(mutation_results)

        # Calculate the results without any mutation
        no_mutation_result = optimal_decentralised_iterative_auction(
            tasks, servers, time_limit=time_limit)
        no_mutation_result.pretty_print()
        mutation_results['no mutation'] = no_mutation_result.store()

        # Save the task prices and server revenues
        to_mutate_tasks = tasks[:]
        reset_model(tasks, servers)

        # Loop each time mutating a task or server and find the auction results and compare to the unmutated result
        for model_mutation in range(min(model_mutations,
                                        len(to_mutate_tasks))):
            # Choice a random task and mutate it
            task: ElasticTask = to_mutate_tasks.pop(
                rnd.randint(0,
                            len(to_mutate_tasks) - 1))
            task_value = task.value

            task_mutation_results = {}
            for value in value_mutations:
                task.value = task_value - value

                # Find the result with the mutated task
                mutant_result = optimal_decentralised_iterative_auction(
                    tasks, servers, time_limit)
                task_mutation_results[f'value {value}'] = mutant_result.store(
                    **{
                        'price': task.price,
                        'allocated': task.running_server is not None,
                        'value': task.value
                    })
                pretty_printer.pprint(task_mutation_results[f'value {value}'])
                reset_model(tasks, servers)

            task.value = task_value
            mutation_results[f'task {task.name}'] = task_mutation_results

        # Append the results to the data list
        model_results.append(mutation_results)

        # Save all of the results to a file
        with open(filename, 'w') as file:
            json.dump(model_results, file)
    print('Finished running')
def mutation_grid_search(model_dist: ModelDist,
                         percent: float = 0.10,
                         time_limit: int = 3,
                         price_change: int = 3,
                         initial_price: int = 30):
    """
    Attempts a grid search version of the mutation testing above where a single task is mutated in every possible way
        within a particular way to keep that the random testing is not missing anything

    :param model_dist: The model distribution to generate servers and tasks
    :param percent: The percentage by which mutations can occur within
    :param time_limit: The time limit for the optimal decentralised iterative auction
    :param price_change: The price change of the servers
    :param initial_price: The initial price for the servers
    """
    print(f'Completes a grid search of a task known to achieve better results')
    filename = results_filename('mutation_grid_search', model_dist)
    positive_percent, negative_percent = 1 + percent, 1 - percent

    # Generate the tasks and servers
    tasks, servers = model_dist.generate_oneshot()
    set_server_heuristics(servers,
                          price_change=price_change,
                          initial_price=initial_price)

    # The mutation results
    mutation_results = {
        'model': {
            'tasks': [task.save() for task in tasks],
            'servers': [server.save() for server in servers]
        }
    }

    no_mutation_dia = optimal_decentralised_iterative_auction(
        tasks, servers, time_limit=time_limit)
    no_mutation_dia.pretty_print()
    task = next(task for task in tasks if task.running_server is not None)
    mutation_results['no mutation'] = no_mutation_dia.store(
        **{
            'allocated': task.running_server is not None,
            'task price': task.price
        })

    # The original task not mutated that is randomly selected (given the tasks are already randomly generated)
    permutations = ((int(task.required_storage * positive_percent) + 1) - task.required_storage) * \
                   ((int(task.required_computation * positive_percent) + 1) - task.required_computation) * \
                   ((int(task.required_results_data * positive_percent) + 1) - task.required_results_data) * \
                   ((task.deadline + 1) - int(task.deadline * negative_percent))
    print(
        f'Number of permutations: {permutations}, original solve time: {no_mutation_dia.solve_time}, '
        f'estimated time: {round(permutations * no_mutation_dia.solve_time / 60, 1)} minutes'
    )
    reset_model(tasks, servers)
    mutation_pos = 0
    # Loop over all of the permutations that the task requirement resources have up to the mutate percentage
    for required_storage in range(
            task.required_storage,
            int(task.required_storage * positive_percent) + 1):
        for required_computation in range(
                task.required_computation,
                int(task.required_computation * positive_percent) + 1):
            for required_results_data in range(
                    task.required_results_data,
                    int(task.required_results_data * positive_percent) + 1):
                for deadline in range(int(task.deadline * negative_percent),
                                      task.deadline + 1):
                    # Create the new mutated task and create new tasks list with the mutant task replacing the task
                    mutant_task = ElasticTask(
                        f'mutated {task.name}',
                        required_storage=required_storage,
                        required_computation=required_computation,
                        required_results_data=required_results_data,
                        deadline=deadline,
                        value=task.value)
                    tasks.append(mutant_task)

                    # Calculate the task price with the mutated task
                    mutated_result = optimal_decentralised_iterative_auction(
                        tasks, servers, time_limit)
                    mutated_result.pretty_print()
                    mutation_results[
                        f'Mutation {mutation_pos}'] = mutated_result.store(
                            **{
                                'mutated task': task.name,
                                'task price': mutant_task.price,
                                'required storage': required_storage,
                                'required computation': required_computation,
                                'required results data': required_results_data,
                                'deadline': deadline,
                                'allocated': mutant_task.running_server
                                is not None
                            })
                    mutation_pos += 1

                    # Remove the mutant task and read the task to the list of tasks and reset the model
                    tasks.remove(mutant_task)
                    reset_model(tasks, servers)

                    # Save all of the results to a file
                    with open(filename, 'w') as file:
                        json.dump(mutation_results, file)
    print('Finished running')