예제 #1
0
def test_agent_actions():
    print()
    pricing_agents = [
        TaskPricingDqnAgent(0, create_lstm_dqn_network(9, 5)),
        TaskPricingDdqnAgent(1, create_lstm_dqn_network(9, 5)),
        TaskPricingDuelingDqnAgent(2, create_lstm_dueling_dqn_network(9, 5)),
        TaskPricingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(9, 5)),
        TaskPricingDdpgAgent(4, create_lstm_actor_network(9),
                             create_lstm_critic_network(9)),
        TaskPricingTD3Agent(5, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9))
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(0, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDdqnAgent(1, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDuelingDqnAgent(
            2, create_lstm_dueling_dqn_network(16, 5)),
        ResourceWeightingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(16, 5)),
        ResourceWeightingDdpgAgent(4, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16)),
        ResourceWeightingTD3Agent(5, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16))
    ]

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')
    for agent in pricing_agents:
        actions = {
            server: agent.bid(state.auction_task, tasks, server,
                              state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    # noinspection PyUnboundLocalVariable
    print(
        f'Actions: {{{", ".join([f"{server.name}: {action}" for server, action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)

    for agent in weighting_agents:
        actions = {
            server: agent.weight(tasks, server, state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    print(
        f'Actions: {{{", ".join([f"{server.name}: {list(task_action.values())}" for server, task_action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)
def test_agent_evaluation():
    print()
    setup_tensorboard('training/results/tmp/', 'agent_eval')

    env = OnlineFlexibleResourceAllocationEnv('training/settings/basic.env')

    eval_envs = generate_eval_envs(env,
                                   5,
                                   'training/settings/tmp/',
                                   overwrite=True)
    assert len(os.listdir('training/settings/tmp/')) == 5
    total_auctions, total_resource_allocation = 0, 0
    for eval_env in eval_envs:
        env, state = OnlineFlexibleResourceAllocationEnv.load_env(eval_env)
        total_auctions += len(env._unallocated_tasks) + (
            1 if state.auction_task is not None else 0)
        total_resource_allocation += env._total_time_steps + 1

    pricing_agents = [
        TaskPricingDqnAgent(0, create_bidirectional_dqn_network(9, 5)),
        TaskPricingDdpgAgent(1, create_lstm_actor_network(9),
                             create_lstm_critic_network(9))
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(2, create_bidirectional_dqn_network(16, 5)),
        ResourceWeightingDdpgAgent(3, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16)),
    ]

    results = eval_agent(eval_envs, 0, pricing_agents, weighting_agents)
    print(
        f'Results - Total prices: {results.total_prices}, Number of completed tasks: {results.num_completed_tasks}, '
        f'failed tasks: {results.num_failed_tasks}, winning prices: {results.winning_prices}, '
        f'Number of auctions: {results.num_auctions}, resource allocations: {results.num_resource_allocations}'
    )
    assert 0 < results.num_completed_tasks
    assert 0 < results.num_failed_tasks

    assert results.num_auctions == total_auctions
    assert results.num_resource_allocations == total_resource_allocation
def test_resource_allocation_training():
    print()
    setup_tensorboard('/tmp/results/', 'resource_allocation_training')

    # List of agents
    agents: List[ResourceWeightingRLAgent] = [
        ResourceWeightingDqnAgent(0,
                                  create_lstm_dqn_network(16, 10),
                                  batch_size=4,
                                  save_folder='tmp'),
        ResourceWeightingDdqnAgent(1,
                                   create_lstm_dqn_network(16, 10),
                                   batch_size=4,
                                   save_folder='tmp'),
        ResourceWeightingDuelingDqnAgent(2,
                                         create_lstm_dueling_dqn_network(
                                             16, 10),
                                         batch_size=4,
                                         save_folder='tmp'),
        ResourceWeightingCategoricalDqnAgent(
            3,
            create_lstm_categorical_dqn_network(16, 10),
            batch_size=2,
            save_folder='tmp'),
        ResourceWeightingDdpgAgent(4,
                                   create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   batch_size=4,
                                   save_folder='tmp'),
        ResourceWeightingTD3Agent(5,
                                  create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16),
                                  batch_size=4,
                                  save_folder='tmp'),
    ]

    # Load the environment
    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'training/settings/resource_allocation.env')

    # Servers and tasks
    server = list(state.server_tasks.keys())[0]
    task_1, task_2, task_3, task_4 = list(state.server_tasks[server])

    # Actions
    actions = {server: {task_1: 1.0, task_2: 3.0, task_3: 0.0, task_4: 5.0}}

    # Environment step
    next_state, rewards, done, _ = env.step(actions)

    # Resource state
    resource_state = ResourceAllocationState(state.server_tasks[server],
                                             server, state.time_step)
    # Next server and resource state
    next_resource_state = ResourceAllocationState(
        next_state.server_tasks[server], server, next_state.time_step)

    for agent in agents:
        agent.resource_allocation_obs(resource_state, actions[server],
                                      next_resource_state, rewards[server])

        agent.train()

    agent = ResourceWeightingSeq2SeqAgent(6,
                                          create_seq2seq_actor_network(),
                                          create_seq2seq_critic_network(),
                                          create_seq2seq_critic_network(),
                                          batch_size=2,
                                          save_folder='tmp')
    agent.resource_allocation_obs(resource_state, actions[server],
                                  next_resource_state, rewards[server])
    agent.resource_allocation_obs(resource_state, actions[server],
                                  next_resource_state, rewards[server])
    agent.train()

    print(
        f'Rewards: {[trajectory[3] for trajectory in agents[0].replay_buffer]}'
    )
def test_epsilon_policy():
    print()
    # Tests the epsilon policy by getting agent actions that should update the agent epsilon over time

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')

    # Number of epsilon steps for the agents
    epsilon_steps = 25

    # Agents that have a custom _get_action function
    pricing_agents = [
        TaskPricingDqnAgent(0,
                            create_lstm_dqn_network(9, 5),
                            epsilon_steps=epsilon_steps,
                            epsilon_update_freq=1,
                            epsilon_log_freq=1),
        TaskPricingCategoricalDqnAgent(1,
                                       create_lstm_categorical_dqn_network(
                                           9, 5),
                                       epsilon_steps=epsilon_steps,
                                       epsilon_update_freq=1,
                                       epsilon_log_freq=1),
        TaskPricingDdpgAgent(2,
                             create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             epsilon_steps=epsilon_steps,
                             epsilon_update_freq=1,
                             epsilon_log_freq=1)
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(0,
                                  create_lstm_dqn_network(16, 5),
                                  epsilon_steps=epsilon_steps,
                                  epsilon_update_freq=1,
                                  epsilon_log_freq=1),
        ResourceWeightingCategoricalDqnAgent(
            1,
            create_lstm_categorical_dqn_network(16, 5),
            epsilon_steps=epsilon_steps,
            epsilon_update_freq=1,
            epsilon_log_freq=1),
        ResourceWeightingDdpgAgent(2,
                                   create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   epsilon_steps=epsilon_steps,
                                   epsilon_update_freq=1,
                                   epsilon_log_freq=1)
    ]

    # Generate a tf writer and generate actions that will update the epsilon values for both agents
    writer = tf.summary.create_file_writer(f'agent/tmp/testing_epsilon')
    num_steps = 10
    with writer.as_default():
        for _ in range(num_steps):
            for agent in pricing_agents:
                actions = {
                    server: agent.bid(state.auction_task,
                                      tasks,
                                      server,
                                      state.time_step,
                                      training=True)
                    for server, tasks in state.server_tasks.items()
                }

        state, rewards, done, _ = env.step(actions)

        for _ in range(num_steps):
            for agent in weighting_agents:
                actions = {
                    server: agent.weight(tasks,
                                         server,
                                         state.time_step,
                                         training=True)
                    for server, tasks in state.server_tasks.items()
                }

        state, rewards, done, _ = env.step(actions)

    # Check that the resulting total action are valid
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert agent.total_actions == num_steps * 3

    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert agent.total_actions == num_steps * 3

    # Check that the agent epsilon are correct
    assert pricing_agents[0].final_epsilon == pricing_agents[
        0].epsilon and pricing_agents[1].final_epsilon == pricing_agents[
            1].epsilon
    assert weighting_agents[0].final_epsilon == weighting_agents[
        0].epsilon and weighting_agents[1].final_epsilon == weighting_agents[
            1].epsilon
    assert pricing_agents[2].final_epsilon_std == pricing_agents[2].epsilon_std
    assert weighting_agents[2].final_epsilon_std == weighting_agents[
        2].epsilon_std
예제 #5
0
def test_ddpg_actions():
    print()
    # Check that DDPG actions are valid
    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')

    repeat, max_repeat = 0, 10
    auction_actions = {}
    while repeat <= max_repeat:
        pricing_agent = TaskPricingDdpgAgent(3,
                                             create_lstm_actor_network(9),
                                             create_lstm_critic_network(9),
                                             initial_epsilon=0.5)
        auction_actions = {
            server: pricing_agent.bid(state.auction_task, tasks, server,
                                      state.time_step)
            for server, tasks in state.server_tasks.items()
        }
        print(f'Greedy actions: {list(auction_actions.values())}')
        if any(0 < action for server, action in auction_actions.items()):

            auction_actions = {
                server: pricing_agent.bid(state.auction_task,
                                          tasks,
                                          server,
                                          state.time_step,
                                          training=True)
                for server, tasks in state.server_tasks.items()
            }
            print(
                f'Epsilon Greedy actions: {list(auction_actions.values())}\n')
            if any(0 < action for server, action in auction_actions.items()):
                break
        elif repeat == max_repeat:
            raise Exception()
        else:
            repeat += 1

    states, rewards, dones, _ = env.step(auction_actions)

    repeat, max_repeat = 0, 10
    while repeat <= max_repeat:
        weighting_agent = ResourceWeightingDdpgAgent(
            3,
            create_lstm_actor_network(16),
            create_lstm_critic_network(16),
            initial_epsilon=0.5)
        weighting_actions = {
            server: weighting_agent.weight(tasks, server, state.time_step)
            for server, tasks in state.server_tasks.items()
        }
        print(
            f'Greedy actions: {[list(actions.values()) for actions in weighting_actions.values()]}'
        )
        if any(0 < action
               for server, task_actions in weighting_actions.items()
               for task, action in task_actions.items()):
            weighting_actions = {
                server: weighting_agent.weight(tasks,
                                               server,
                                               state.time_step,
                                               training=True)
                for server, tasks in state.server_tasks.items()
            }
            print(
                f'Greedy actions: {[list(actions.values()) for actions in weighting_actions.values()]}'
            )
            if any(0 < action
                   for server, task_actions in weighting_actions.items()
                   for task, action in task_actions.items()):
                break
        elif repeat == max_repeat:
            raise Exception()
        else:
            repeat += 1
예제 #6
0
def test_build_agent():
    def assert_args(test_agent, args):
        """
        Asserts that the proposed arguments have assigned to the agent

        Args:
            test_agent: The test agent
            args: The argument used on the agent
        """
        for arg_name, arg_value in args.items():
            assert getattr(test_agent, arg_name) == arg_value, \
                f'Attr: {arg_name}, correct value: {arg_value}, actual value: {getattr(test_agent, arg_name)}'

    # Check inheritance arguments
    reinforcement_learning_arguments = {
        'batch_size': 16,
        'error_loss_fn': tf.compat.v1.losses.mean_squared_error,
        'initial_training_replay_size': 1000,
        'training_freq': 2,
        'replay_buffer_length': 20000,
        'save_frequency': 12500,
        'save_folder': 'test',
        'discount_factor': 0.9
    }
    dqn_arguments = {
        'target_update_tau': 1.0,
        'target_update_frequency': 2500,
        'optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon': 0.5,
        'final_epsilon': 0.2,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
    }
    ddpg_arguments = {
        'actor_optimiser': tf.keras.optimizers.Adadelta(),
        'critic_optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon_std': 0.8,
        'final_epsilon_std': 0.1,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
        'min_value': -15.0,
        'max_value': 15
    }
    pricing_arguments = {
        'failed_auction_reward': -100,
        'failed_multiplier': -100
    }
    weighting_arguments = {
        'other_task_discount': 0.2,
        'success_reward': 1,
        'failed_reward': -2
    }

    # DQN Agent arguments ----------------------------------------------------------------------
    dqn_pricing_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **pricing_arguments
    }
    dqn_weighting_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **weighting_arguments
    }

    pricing_network = create_lstm_dqn_network(9, 10)
    categorical_pricing_network = create_lstm_categorical_dqn_network(9, 10)
    pricing_agents = [
        TaskPricingDqnAgent(0, pricing_network, **dqn_pricing_arguments),
        TaskPricingDdqnAgent(1, pricing_network, **dqn_pricing_arguments),
        TaskPricingDuelingDqnAgent(2, pricing_network,
                                   **dqn_pricing_arguments),
        TaskPricingCategoricalDqnAgent(3, categorical_pricing_network,
                                       **dqn_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_pricing_arguments)

    weighting_network = create_lstm_dqn_network(16, 10)
    categorical_weighting_network = create_lstm_categorical_dqn_network(16, 10)
    weighting_agents = [
        ResourceWeightingDqnAgent(0, weighting_network,
                                  **dqn_weighting_arguments),
        ResourceWeightingDdqnAgent(1, weighting_network,
                                   **dqn_weighting_arguments),
        ResourceWeightingDuelingDqnAgent(2, weighting_network,
                                         **dqn_weighting_arguments),
        ResourceWeightingCategoricalDqnAgent(3, categorical_weighting_network,
                                             **dqn_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_weighting_arguments)

    # PG Agent arguments ----------------------------------------------------------------------------------
    ddpg_pricing_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **pricing_arguments
    }
    ddpg_weighting_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **weighting_arguments
    }

    pricing_agents = [
        TaskPricingDdpgAgent(3, create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             **ddpg_pricing_arguments),
        TaskPricingTD3Agent(4, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9),
                            **ddpg_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_pricing_arguments)

    weighting_agents = [
        ResourceWeightingDdpgAgent(3, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   **ddpg_weighting_arguments),
        ResourceWeightingTD3Agent(4, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16),
                                  **ddpg_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_weighting_arguments)
    env = OnlineFlexibleResourceAllocationEnv([
        './training/settings/basic.env',
        './training/settings/large_tasks_servers.env',
        './training/settings/limited_resources.env',
        './training/settings/mixture_tasks_servers.env'
    ])
    eval_envs = generate_eval_envs(env, 5,
                                   f'./training/settings/eval_envs/algo/')

    task_pricing_agents = [
        TaskPricingDdpgAgent(agent_num,
                             create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             save_folder=save_folder) for agent_num in range(3)
    ]
    resource_weighting_agents = [
        ResourceWeightingDdpgAgent(0,
                                   create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   save_folder=save_folder)
    ]

    with writer.as_default():
        run_training(env, eval_envs, 600, task_pricing_agents,
                     resource_weighting_agents, 10)

    for agent in task_pricing_agents:
        agent.save()
    for agent in resource_weighting_agents:
        agent.save()