Ejemplo n.º 1
0
def test_saving_agent():
    print()
    # Different agents
    dqn_agent = TaskPricingDqnAgent(0,
                                    create_lstm_dqn_network(9, 10),
                                    save_folder='tmp')
    ddpg_agent = TaskPricingDdpgAgent(1,
                                      create_lstm_actor_network(9),
                                      create_lstm_critic_network(9),
                                      save_folder='tmp')
    td3_agent = TaskPricingTD3Agent(2,
                                    create_lstm_actor_network(9),
                                    create_lstm_critic_network(9),
                                    create_lstm_critic_network(9),
                                    save_folder='tmp')

    # Save the agent
    dqn_agent.save('agent/checkpoints/')
    ddpg_agent.save('agent/checkpoints/')
    td3_agent.save('agent/checkpoints/')

    # Check that loading works
    loaded_model = create_lstm_dqn_network(9, 10)
    loaded_model.load_weights(
        f'agent/checkpoints/tmp/Task_pricing_Dqn_agent_0/update_0')

    assert all(
        tf.reduce_all(weights == load_weights) for weights, load_weights in
        zip(dqn_agent.model_network.variables, loaded_model.variables))
Ejemplo n.º 2
0
def test_agent_actions():
    print()
    pricing_agents = [
        TaskPricingDqnAgent(0, create_lstm_dqn_network(9, 5)),
        TaskPricingDdqnAgent(1, create_lstm_dqn_network(9, 5)),
        TaskPricingDuelingDqnAgent(2, create_lstm_dueling_dqn_network(9, 5)),
        TaskPricingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(9, 5)),
        TaskPricingDdpgAgent(4, create_lstm_actor_network(9),
                             create_lstm_critic_network(9)),
        TaskPricingTD3Agent(5, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9))
    ]
    weighting_agents = [
        ResourceWeightingDqnAgent(0, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDdqnAgent(1, create_lstm_dqn_network(16, 5)),
        ResourceWeightingDuelingDqnAgent(
            2, create_lstm_dueling_dqn_network(16, 5)),
        ResourceWeightingCategoricalDqnAgent(
            3, create_lstm_categorical_dqn_network(16, 5)),
        ResourceWeightingDdpgAgent(4, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16)),
        ResourceWeightingTD3Agent(5, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16))
    ]

    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'agent/settings/actions.env')
    for agent in pricing_agents:
        actions = {
            server: agent.bid(state.auction_task, tasks, server,
                              state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    # noinspection PyUnboundLocalVariable
    print(
        f'Actions: {{{", ".join([f"{server.name}: {action}" for server, action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)

    for agent in weighting_agents:
        actions = {
            server: agent.weight(tasks, server, state.time_step)
            for server, tasks in state.server_tasks.items()
        }
    print(
        f'Actions: {{{", ".join([f"{server.name}: {list(task_action.values())}" for server, task_action in actions.items()])}}}'
    )

    state, rewards, done, _ = env.step(actions)
Ejemplo n.º 3
0
if __name__ == "__main__":
    folder = 'td3_auction_dqn_weighting_agents'
    writer, datetime = setup_tensorboard('training/results/logs/', folder)

    save_folder = f'{folder}_{datetime}'

    env = OnlineFlexibleResourceAllocationEnv([
        './training/settings/basic.env',
        './training/settings/large_tasks_servers.env',
        './training/settings/limited_resources.env',
        './training/settings/mixture_tasks_servers.env'
    ])
    eval_envs = generate_eval_envs(env, 5, f'./training/settings/eval_envs/algo/')

    task_pricing_agents = [
        TaskPricingTD3Agent(agent_num, create_lstm_actor_network(9), create_lstm_critic_network(9),
                            create_lstm_critic_network(9), save_folder=save_folder)
        for agent_num in range(3)
    ]

    network = tf.keras.models.load_model('training/algorithm/checkpoint/Resource_weighting_Double_Dqn_agent_0')
    resource_weighting_agents = [
        ResourceWeightingDqnAgent(0, network, save_folder=save_folder)
    ]

    with writer.as_default():
        run_training(env, eval_envs, 600, task_pricing_agents, resource_weighting_agents, 10)

    for agent in task_pricing_agents:
        agent.save()
    for agent in resource_weighting_agents:
        agent.save()
def test_task_price_training():
    print()
    setup_tensorboard('/tmp/results/', 'price_training')

    # List of agents
    agents: List[TaskPricingRLAgent] = [
        TaskPricingDqnAgent(0,
                            create_lstm_dqn_network(9, 10),
                            batch_size=4,
                            save_folder='tmp'),
        TaskPricingDdqnAgent(1,
                             create_lstm_dqn_network(9, 10),
                             batch_size=4,
                             save_folder='tmp'),
        TaskPricingDuelingDqnAgent(2,
                                   create_lstm_dueling_dqn_network(9, 10),
                                   batch_size=4,
                                   save_folder='tmp'),
        TaskPricingCategoricalDqnAgent(3,
                                       create_lstm_categorical_dqn_network(
                                           9, 10),
                                       batch_size=4,
                                       save_folder='tmp'),
        TaskPricingDdpgAgent(4,
                             create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             batch_size=4,
                             save_folder='tmp'),
        TaskPricingTD3Agent(5,
                            create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9),
                            batch_size=4,
                            save_folder='tmp')
    ]

    # Load the environment
    env, state = OnlineFlexibleResourceAllocationEnv.load_env(
        'training/settings/auction.env')

    # Servers
    server_1, server_2 = list(state.server_tasks.keys())
    # Actions
    actions = {server_1: 1.0, server_2: 2.0}

    # Environment step
    next_state, reward, done, info = env.step(actions)

    # Server states
    server_1_state = TaskPricingState(state.auction_task,
                                      state.server_tasks[server_1], server_1,
                                      state.time_step)
    server_2_state = TaskPricingState(state.auction_task,
                                      state.server_tasks[server_2], server_2,
                                      state.time_step)

    # Next server states
    next_server_1_state = TaskPricingState(next_state.auction_task,
                                           next_state.server_tasks[server_1],
                                           server_1, next_state.time_step)
    next_server_2_state = TaskPricingState(next_state.auction_task,
                                           next_state.server_tasks[server_2],
                                           server_2, next_state.time_step)
    # Finished auction task
    finished_task = next(finished_task
                         for finished_task in next_state.server_tasks[server_1]
                         if finished_task == state.auction_task)
    finished_task = finished_task._replace(stage=TaskStage.COMPLETED)
    failed_task = finished_task._replace(stage=TaskStage.FAILED)

    # Loop over the agents, add the observations and try training
    for agent in agents:
        agent.winning_auction_bid(server_1_state, actions[server_1],
                                  finished_task, next_server_1_state)
        agent.winning_auction_bid(server_1_state, actions[server_1],
                                  failed_task, next_server_1_state)
        agent.failed_auction_bid(server_2_state, actions[server_2],
                                 next_server_2_state)
        agent.failed_auction_bid(server_2_state, 0, next_server_2_state)

        agent.train()

    print(
        f'Rewards: {[trajectory[3] for trajectory in agents[0].replay_buffer]}'
    )
Ejemplo n.º 5
0
    env = OnlineFlexibleResourceAllocationEnv([
        './training/settings/basic.env',
        './training/settings/large_tasks_servers.env',
        './training/settings/limited_resources.env',
        './training/settings/mixture_tasks_servers.env'
    ])
    eval_envs = generate_eval_envs(env, 20,
                                   f'./training/settings/eval_envs/algo/')

    central_critic = create_lstm_critic_network(9)
    central_twin_critic = create_lstm_critic_network(9)
    task_pricing_agents = [
        TaskPricingTD3Agent(agent_num,
                            create_lstm_actor_network(9),
                            central_critic,
                            central_twin_critic,
                            save_folder=save_folder) for agent_num in range(3)
    ]
    resource_weighting_agents = [
        ResourceWeightingTD3Agent(0,
                                  create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16),
                                  save_folder=save_folder)
    ]

    with writer.as_default():
        run_training(env, eval_envs, 600, task_pricing_agents,
                     resource_weighting_agents, 10)
Ejemplo n.º 6
0
def test_build_agent():
    def assert_args(test_agent, args):
        """
        Asserts that the proposed arguments have assigned to the agent

        Args:
            test_agent: The test agent
            args: The argument used on the agent
        """
        for arg_name, arg_value in args.items():
            assert getattr(test_agent, arg_name) == arg_value, \
                f'Attr: {arg_name}, correct value: {arg_value}, actual value: {getattr(test_agent, arg_name)}'

    # Check inheritance arguments
    reinforcement_learning_arguments = {
        'batch_size': 16,
        'error_loss_fn': tf.compat.v1.losses.mean_squared_error,
        'initial_training_replay_size': 1000,
        'training_freq': 2,
        'replay_buffer_length': 20000,
        'save_frequency': 12500,
        'save_folder': 'test',
        'discount_factor': 0.9
    }
    dqn_arguments = {
        'target_update_tau': 1.0,
        'target_update_frequency': 2500,
        'optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon': 0.5,
        'final_epsilon': 0.2,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
    }
    ddpg_arguments = {
        'actor_optimiser': tf.keras.optimizers.Adadelta(),
        'critic_optimiser': tf.keras.optimizers.Adadelta(),
        'initial_epsilon_std': 0.8,
        'final_epsilon_std': 0.1,
        'epsilon_update_freq': 25,
        'epsilon_log_freq': 10,
        'min_value': -15.0,
        'max_value': 15
    }
    pricing_arguments = {
        'failed_auction_reward': -100,
        'failed_multiplier': -100
    }
    weighting_arguments = {
        'other_task_discount': 0.2,
        'success_reward': 1,
        'failed_reward': -2
    }

    # DQN Agent arguments ----------------------------------------------------------------------
    dqn_pricing_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **pricing_arguments
    }
    dqn_weighting_arguments = {
        **reinforcement_learning_arguments,
        **dqn_arguments,
        **weighting_arguments
    }

    pricing_network = create_lstm_dqn_network(9, 10)
    categorical_pricing_network = create_lstm_categorical_dqn_network(9, 10)
    pricing_agents = [
        TaskPricingDqnAgent(0, pricing_network, **dqn_pricing_arguments),
        TaskPricingDdqnAgent(1, pricing_network, **dqn_pricing_arguments),
        TaskPricingDuelingDqnAgent(2, pricing_network,
                                   **dqn_pricing_arguments),
        TaskPricingCategoricalDqnAgent(3, categorical_pricing_network,
                                       **dqn_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_pricing_arguments)

    weighting_network = create_lstm_dqn_network(16, 10)
    categorical_weighting_network = create_lstm_categorical_dqn_network(16, 10)
    weighting_agents = [
        ResourceWeightingDqnAgent(0, weighting_network,
                                  **dqn_weighting_arguments),
        ResourceWeightingDdqnAgent(1, weighting_network,
                                   **dqn_weighting_arguments),
        ResourceWeightingDuelingDqnAgent(2, weighting_network,
                                         **dqn_weighting_arguments),
        ResourceWeightingCategoricalDqnAgent(3, categorical_weighting_network,
                                             **dqn_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, dqn_weighting_arguments)

    # PG Agent arguments ----------------------------------------------------------------------------------
    ddpg_pricing_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **pricing_arguments
    }
    ddpg_weighting_arguments = {
        **reinforcement_learning_arguments,
        **ddpg_arguments,
        **weighting_arguments
    }

    pricing_agents = [
        TaskPricingDdpgAgent(3, create_lstm_actor_network(9),
                             create_lstm_critic_network(9),
                             **ddpg_pricing_arguments),
        TaskPricingTD3Agent(4, create_lstm_actor_network(9),
                            create_lstm_critic_network(9),
                            create_lstm_critic_network(9),
                            **ddpg_pricing_arguments)
    ]
    for agent in pricing_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_pricing_arguments)

    weighting_agents = [
        ResourceWeightingDdpgAgent(3, create_lstm_actor_network(16),
                                   create_lstm_critic_network(16),
                                   **ddpg_weighting_arguments),
        ResourceWeightingTD3Agent(4, create_lstm_actor_network(16),
                                  create_lstm_critic_network(16),
                                  create_lstm_critic_network(16),
                                  **ddpg_weighting_arguments)
    ]
    for agent in weighting_agents:
        print(f'Agent: {agent.name}')
        assert_args(agent, ddpg_weighting_arguments)