Exemple #1
0
def test_agent_actions():
    print()
    pricing_agents = [
        TaskPricingDqnAgent(0, create_lstm_dqn_network(9, 5)),
        TaskPricingDdqnAgent(1, create_lstm_dqn_network(9, 5)),
        TaskPricingDuelingDqnAgent(2, create_lstm_dueling_dqn_network(9, 5)),
        TaskPricingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(9, 5)),
        TaskPricingDdpgAgent(4, create_lstm_actor_network(9),
                             create_lstm_critic_network(9)),
        TaskPricingTD3Agent(5, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9))
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(0, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDdqnAgent(1, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDuelingDqnAgent(
            2, create_lstm_dueling_dqn_network(16, 5)),
        ResourceWeightingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(16, 5)),
        ResourceWeightingDdpgAgent(4, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16)),
        ResourceWeightingTD3Agent(5, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16))
    ]

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')
    for agent in pricing_agents:
        actions = {
            server: agent.bid(state.auction_task, tasks, server,
                              state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    # noinspection PyUnboundLocalVariable
    print(
        f'Actions: {{{", ".join([f"{server.name}: {action}" for server, action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)

    for agent in weighting_agents:
        actions = {
            server: agent.weight(tasks, server, state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    print(
        f'Actions: {{{", ".join([f"{server.name}: {list(task_action.values())}" for server, task_action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)
Exemple #2
0
    writer, datetime = setup_tensorboard('training/results/logs/', folder)

    save_folder = f'{folder}_{datetime}'

    env = OnlineFlexibleResourceAllocationEnv([
        './training/settings/basic.env',
        './training/settings/large_tasks_servers.env',
        './training/settings/limited_resources.env',
        './training/settings/mixture_tasks_servers.env'
    ])
    eval_envs = generate_eval_envs(env, 20,
                                   f'./training/settings/eval_envs/algo/')

    task_pricing_agents = [
        TaskPricingCategoricalDqnAgent(agent_num,
                                       create_lstm_categorical_dqn_network(
                                           9, 21),
                                       save_folder=save_folder)
        for agent_num in range(3)
    ]
    resource_weighting_agents = [
        ResourceWeightingCategoricalDqnAgent(
            0,
            create_lstm_categorical_dqn_network(16, 11),
            save_folder=save_folder)
    ]

    with writer.as_default():
        run_training(env, eval_envs, 600, task_pricing_agents,
                     resource_weighting_agents, 10)

    for agent in task_pricing_agents:
def test_task_price_training():
    print()
    setup_tensorboard('/tmp/results/', 'price_training')

    # List of agents
    agents: List[TaskPricingRLAgent] = [
        TaskPricingDqnAgent(0,
                            create_lstm_dqn_network(9, 10),
                            batch_size=4,
                            save_folder='tmp'),
        TaskPricingDdqnAgent(1,
                             create_lstm_dqn_network(9, 10),
                             batch_size=4,
                             save_folder='tmp'),
        TaskPricingDuelingDqnAgent(2,
                                   create_lstm_dueling_dqn_network(9, 10),
                                   batch_size=4,
                                   save_folder='tmp'),
        TaskPricingCategoricalDqnAgent(3,
                                       create_lstm_categorical_dqn_network(
                                           9, 10),
                                       batch_size=4,
                                       save_folder='tmp'),
        TaskPricingDdpgAgent(4,
                             create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             batch_size=4,
                             save_folder='tmp'),
        TaskPricingTD3Agent(5,
                            create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9),
                            batch_size=4,
                            save_folder='tmp')
    ]

    # Load the environment
    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'training/settings/auction.env')

    # Servers
    server_1, server_2 = list(state.server_tasks.keys())
    # Actions
    actions = {server_1: 1.0, server_2: 2.0}

    # Environment step
    next_state, reward, done, info = env.step(actions)

    # Server states
    server_1_state = TaskPricingState(state.auction_task,
                                      state.server_tasks[server_1], server_1,
                                      state.time_step)
    server_2_state = TaskPricingState(state.auction_task,
                                      state.server_tasks[server_2], server_2,
                                      state.time_step)

    # Next server states
    next_server_1_state = TaskPricingState(next_state.auction_task,
                                           next_state.server_tasks[server_1],
                                           server_1, next_state.time_step)
    next_server_2_state = TaskPricingState(next_state.auction_task,
                                           next_state.server_tasks[server_2],
                                           server_2, next_state.time_step)
    # Finished auction task
    finished_task = next(finished_task
                         for finished_task in next_state.server_tasks[server_1]
                         if finished_task == state.auction_task)
    finished_task = finished_task._replace(stage=TaskStage.COMPLETED)
    failed_task = finished_task._replace(stage=TaskStage.FAILED)

    # Loop over the agents, add the observations and try training
    for agent in agents:
        agent.winning_auction_bid(server_1_state, actions[server_1],
                                  finished_task, next_server_1_state)
        agent.winning_auction_bid(server_1_state, actions[server_1],
                                  failed_task, next_server_1_state)
        agent.failed_auction_bid(server_2_state, actions[server_2],
                                 next_server_2_state)
        agent.failed_auction_bid(server_2_state, 0, next_server_2_state)

        agent.train()

    print(
        f'Rewards: {[trajectory[3] for trajectory in agents[0].replay_buffer]}'
    )
def test_epsilon_policy():
    print()
    # Tests the epsilon policy by getting agent actions that should update the agent epsilon over time

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')

    # Number of epsilon steps for the agents
    epsilon_steps = 25

    # Agents that have a custom _get_action function
    pricing_agents = [
        TaskPricingDqnAgent(0,
                            create_lstm_dqn_network(9, 5),
                            epsilon_steps=epsilon_steps,
                            epsilon_update_freq=1,
                            epsilon_log_freq=1),
        TaskPricingCategoricalDqnAgent(1,
                                       create_lstm_categorical_dqn_network(
                                           9, 5),
                                       epsilon_steps=epsilon_steps,
                                       epsilon_update_freq=1,
                                       epsilon_log_freq=1),
        TaskPricingDdpgAgent(2,
                             create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             epsilon_steps=epsilon_steps,
                             epsilon_update_freq=1,
                             epsilon_log_freq=1)
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(0,
                                  create_lstm_dqn_network(16, 5),
                                  epsilon_steps=epsilon_steps,
                                  epsilon_update_freq=1,
                                  epsilon_log_freq=1),
        ResourceWeightingCategoricalDqnAgent(
            1,
            create_lstm_categorical_dqn_network(16, 5),
            epsilon_steps=epsilon_steps,
            epsilon_update_freq=1,
            epsilon_log_freq=1),
        ResourceWeightingDdpgAgent(2,
                                   create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   epsilon_steps=epsilon_steps,
                                   epsilon_update_freq=1,
                                   epsilon_log_freq=1)
    ]

    # Generate a tf writer and generate actions that will update the epsilon values for both agents
    writer = tf.summary.create_file_writer(f'agent/tmp/testing_epsilon')
    num_steps = 10
    with writer.as_default():
        for _ in range(num_steps):
            for agent in pricing_agents:
                actions = {
                    server: agent.bid(state.auction_task,
                                      tasks,
                                      server,
                                      state.time_step,
                                      training=True)
                    for server, tasks in state.server_tasks.items()
                }

        state, rewards, done, _ = env.step(actions)

        for _ in range(num_steps):
            for agent in weighting_agents:
                actions = {
                    server: agent.weight(tasks,
                                         server,
                                         state.time_step,
                                         training=True)
                    for server, tasks in state.server_tasks.items()
                }

        state, rewards, done, _ = env.step(actions)

    # Check that the resulting total action are valid
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert agent.total_actions == num_steps * 3

    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert agent.total_actions == num_steps * 3

    # Check that the agent epsilon are correct
    assert pricing_agents[0].final_epsilon == pricing_agents[
        0].epsilon and pricing_agents[1].final_epsilon == pricing_agents[
            1].epsilon
    assert weighting_agents[0].final_epsilon == weighting_agents[
        0].epsilon and weighting_agents[1].final_epsilon == weighting_agents[
            1].epsilon
    assert pricing_agents[2].final_epsilon_std == pricing_agents[2].epsilon_std
    assert weighting_agents[2].final_epsilon_std == weighting_agents[
        2].epsilon_std
Exemple #5
0
def test_c51_actions():
    print()
    # Test the C51 agent actions
    pricing_agent = TaskPricingCategoricalDqnAgent(
        3, create_lstm_categorical_dqn_network(9, 5), initial_epsilon=0.5)
    weighting_agent = ResourceWeightingCategoricalDqnAgent(
        3, create_lstm_categorical_dqn_network(16, 5), initial_epsilon=0.5)

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')
    auction_actions = {
        server: pricing_agent.bid(state.auction_task, tasks, server,
                                  state.time_step)
        for server, tasks in state.server_tasks.items()
    }
    print(f'Greedy actions: {list(auction_actions.values())}')
    assert any(0 < action for server, action in auction_actions.items())

    server, tasks = next(
        (server, tasks) for server, tasks in state.server_tasks.items())
    observation = tf.expand_dims(pricing_agent._network_obs(
        state.auction_task, tasks, server, state.time_step),
                                 axis=0)
    network_output = pricing_agent.model_network(observation)
    probabilities = tf.nn.softmax(network_output)
    probability_value = probabilities * pricing_agent.z_values
    q_values = tf.reduce_sum(probability_value, axis=2)
    argmax_q_values = tf.math.argmax(q_values, axis=1, output_type=tf.int32)
    print(
        f'Network output: {network_output}\nProbabilities: {probabilities}\nProbability value: {probability_value}\n'
        f'Q value: {q_values}\nArgmax Q value: {argmax_q_values}')

    auction_actions = {
        server: pricing_agent.bid(state.auction_task,
                                  tasks,
                                  server,
                                  state.time_step,
                                  training=True)
        for server, tasks in state.server_tasks.items()
    }
    print(f'Epsilon Greedy actions: {list(auction_actions.values())}\n')
    assert any(0 < action for server, action in auction_actions.items())

    states, rewards, dones, _ = env.step(auction_actions)

    weighting_actions = {
        server: weighting_agent.weight(tasks, server, state.time_step)
        for server, tasks in state.server_tasks.items()
    }
    print(
        f'Greedy actions: {[list(actions.values()) for actions in weighting_actions.values()]}'
    )
    assert any(0 < action for server, action in auction_actions.items())

    weighting_actions = {
        server: weighting_agent.weight(tasks,
                                       server,
                                       state.time_step,
                                       training=True)
        for server, tasks in state.server_tasks.items()
    }
    print(
        f'Greedy actions: {[list(actions.values()) for actions in weighting_actions.values()]}'
    )
    assert any(0 < action
               for server, task_actions in weighting_actions.items()
               for task, action in task_actions.items())
Exemple #6
0
def test_build_agent():
    def assert_args(test_agent, args):
        """
        Asserts that the proposed arguments have assigned to the agent

        Args:
            test_agent: The test agent
            args: The argument used on the agent
        """
        for arg_name, arg_value in args.items():
            assert getattr(test_agent, arg_name) == arg_value, \
                f'Attr: {arg_name}, correct value: {arg_value}, actual value: {getattr(test_agent, arg_name)}'

    # Check inheritance arguments
    reinforcement_learning_arguments = {
        'batch_size': 16,
        'error_loss_fn': tf.compat.v1.losses.mean_squared_error,
        'initial_training_replay_size': 1000,
        'training_freq': 2,
        'replay_buffer_length': 20000,
        'save_frequency': 12500,
        'save_folder': 'test',
        'discount_factor': 0.9
    }
    dqn_arguments = {
        'target_update_tau': 1.0,
        'target_update_frequency': 2500,
        'optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon': 0.5,
        'final_epsilon': 0.2,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
    }
    ddpg_arguments = {
        'actor_optimiser': tf.keras.optimizers.Adadelta(),
        'critic_optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon_std': 0.8,
        'final_epsilon_std': 0.1,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
        'min_value': -15.0,
        'max_value': 15
    }
    pricing_arguments = {
        'failed_auction_reward': -100,
        'failed_multiplier': -100
    }
    weighting_arguments = {
        'other_task_discount': 0.2,
        'success_reward': 1,
        'failed_reward': -2
    }

    # DQN Agent arguments ----------------------------------------------------------------------
    dqn_pricing_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **pricing_arguments
    }
    dqn_weighting_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **weighting_arguments
    }

    pricing_network = create_lstm_dqn_network(9, 10)
    categorical_pricing_network = create_lstm_categorical_dqn_network(9, 10)
    pricing_agents = [
        TaskPricingDqnAgent(0, pricing_network, **dqn_pricing_arguments),
        TaskPricingDdqnAgent(1, pricing_network, **dqn_pricing_arguments),
        TaskPricingDuelingDqnAgent(2, pricing_network,
                                   **dqn_pricing_arguments),
        TaskPricingCategoricalDqnAgent(3, categorical_pricing_network,
                                       **dqn_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_pricing_arguments)

    weighting_network = create_lstm_dqn_network(16, 10)
    categorical_weighting_network = create_lstm_categorical_dqn_network(16, 10)
    weighting_agents = [
        ResourceWeightingDqnAgent(0, weighting_network,
                                  **dqn_weighting_arguments),
        ResourceWeightingDdqnAgent(1, weighting_network,
                                   **dqn_weighting_arguments),
        ResourceWeightingDuelingDqnAgent(2, weighting_network,
                                         **dqn_weighting_arguments),
        ResourceWeightingCategoricalDqnAgent(3, categorical_weighting_network,
                                             **dqn_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_weighting_arguments)

    # PG Agent arguments ----------------------------------------------------------------------------------
    ddpg_pricing_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **pricing_arguments
    }
    ddpg_weighting_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **weighting_arguments
    }

    pricing_agents = [
        TaskPricingDdpgAgent(3, create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             **ddpg_pricing_arguments),
        TaskPricingTD3Agent(4, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9),
                            **ddpg_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_pricing_arguments)

    weighting_agents = [
        ResourceWeightingDdpgAgent(3, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   **ddpg_weighting_arguments),
        ResourceWeightingTD3Agent(4, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16),
                                  **ddpg_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_weighting_arguments)