Exemple #1
0
def test_can_collect_one_hot_encoded_opponent_action_multi_env(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['use_agent_modelling'] = True
    expert_iteration_config_dict['request_observed_action'] = True
    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt-opponent_modelling-test')
    assert ex_it.requires_opponents_prediction

    random_agent = build_Random_Agent(Connect4Task, {}, agent_name='Random')

    _ = Connect4Task.run_episodes(
        agent_vector=[ex_it, random_agent],
        training=True,  # Required for ExIt agent to `handle_experience`s
        num_envs=2, num_episodes=2)
    # We only check for existance of the key, rather than it's content
    assert 'opponent_policy' in ex_it.algorithm.memory.keys
    assert 'opponent_s' in ex_it.algorithm.memory.keys
    # ex_it.algorithm.memory. Once you fix it. push!
    assert len(ex_it.algorithm.memory.opponent_policy) == len(ex_it.algorithm.memory.s)
    assert len(ex_it.algorithm.memory.opponent_policy) == len(ex_it.algorithm.memory.opponent_s)

    for opponent_action in ex_it.algorithm.memory.opponent_policy:
        # There is a single 1, all other elements are 0
        if torch.any(torch.isnan(opponent_action)): continue
        else:
            values, counts = opponent_action.unique(return_counts=True)
            assert torch.equal(torch.Tensor([0, 1]), values.float())
            assert torch.equal(torch.Tensor([Connect4Task.action_dim - 1, 1]), counts.float())
Exemple #2
0
def test_train_apprentice_using_dagger_against_random_connect4(Connect4Task, expert_iteration_config_dict, mcts_config_dict):
    # Train worthy params
    expert_iteration_config_dict['use_apprentice_in_expert'] = False
    expert_iteration_config_dict['games_per_iteration'] = 10

    expert_iteration_config_dict['mcts_budget'] = 500
    expert_iteration_config_dict['mcts_rollout_budget'] = 100
    expert_iteration_config_dict['initial_memory_size'] = 10000
    expert_iteration_config_dict['memory_size_increase_frequency'] = 5
    expert_iteration_config_dict['end_memory_size'] = 30000
    expert_iteration_config_dict['use_dirichlet'] = False

    expert_iteration_config_dict['learning_rate'] = 1.0e-2
    expert_iteration_config_dict['batch_size'] = 256
    expert_iteration_config_dict['num_epochs_per_iteration'] = 4
    expert_iteration_config_dict['residual_connections'] = [(1, 2), (2, 3), (3, 4)]

    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt-test')
    ex_it.algorithm.summary_writer = SummaryWriter('expert_iteration_test')

    random_agent = build_Random_Agent(Connect4Task, mcts_config_dict, agent_name=f"Random")

    parallel_learn_against_fix_opponent(ex_it,
            fixed_opponent=random_agent,
            agent_position=0,
            task=Connect4Task,
            training_episodes=5000,
            test_episodes=100,
            benchmarking_episodes=20,
            benchmark_every_n_episodes=500,
            reward_tolerance=0.2,
            maximum_average_reward=1.0,
            evaluation_method='last',
            show_progress=True,
            summary_writer=summary_writer)
Exemple #3
0
def test_can_control_age_of_replay_buffer(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['initial_max_generations_in_memory'] = 1
    expert_iteration_config_dict['increase_memory_every_n_generations'] = 3
    expert_iteration_config_dict['memory_increase_step'] = 10
    expert_iteration_config_dict['final_max_generations_in_memory'] = 10

    exit_alg = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict,
                                           agent_name='ExIt-control-dataset-test').algorithm
    # TODO: update this
    memory_1 = create_memory(1, fixed_policy_target=None, task=Connect4Task)  # Ignore last 2 parameters
    memory_2 = create_memory(2, fixed_policy_target=None, task=Connect4Task)
    memory_3 = create_memory(3, fixed_policy_target=None, task=Connect4Task)

    assert len(exit_alg.memory) == 0
    assert exit_alg.current_max_generations_in_memory == 1
    # Add first Storage on top of empty memory
    exit_alg.add_episode_trajectory(memory_1)
    exit_alg.update_storage(exit_alg.memory)
    assert len(exit_alg.memory) == len(memory_1)
    exit_alg.generation += 1
    # Add second, first should be gone
    exit_alg.add_episode_trajectory(memory_2)
    exit_alg.update_storage(exit_alg.memory)
    assert len(exit_alg.memory) == len(memory_2)
    exit_alg.generation += 1
    # Add third, second and third should remain
    exit_alg.add_episode_trajectory(memory_3)
    exit_alg.update_storage(exit_alg.memory)
    assert len(exit_alg.memory) == len(memory_2) + len(memory_3)
Exemple #4
0
def test_can_query_learnt_opponent_models_at_train_time(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['use_apprentice_in_expert'] = True
    expert_iteration_config_dict['use_learnt_opponent_models_in_mcts'] = True

    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict,
                                        agent_name='ExIt-opponent_modelling-test')
    assert ex_it.use_apprentice_in_expert
    assert ex_it.use_learnt_opponent_models_in_mcts
Exemple #5
0
def test_can_defeat_random_play_in_connect4_both_positions_single_env(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['mcts_budget'] = 100
    expert_iteration_config_dict['mcts_rollout_budget'] = 20
    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='MCTS1-test')

    random_agent = build_Random_Agent(Connect4Task, {}, agent_name='Random')

    trajectory = Connect4Task.run_episode([ex_it, random_agent], training=False)
    assert trajectory.winner == 0  # First player (index 0) has a much higher budget

    trajectory = Connect4Task.run_episode([random_agent, ex_it], training=False)
    assert trajectory.winner == 1  # Second player (index 1) has a much higher budget
Exemple #6
0
def _test_learn_against_fixed_distribution(task, config, prediction_process_fn: Callable, epochs: int=10):
    # Random stochstic policy
    target_policy = torch.Tensor([0.17, 0.03, 0.05, 0.4, 0.05, 0.2, 0.1])

    # Create agent and dataset
    memory = create_memory(size=100, fixed_policy_target=target_policy, task=task)
    ex_it = build_ExpertIteration_Agent(task, config, agent_name='ExIt-opponent_modelling-test')
    ex_it.algorithm.add_episode_trajectory(memory)

    # Train
    ex_it.algorithm.train(ex_it.apprentice)

    # Test
    _test_model(ex_it, target_policy, task, prediction_process_fn)
Exemple #7
0
def test_can_defeat_random_play_in_connect4_both_positions_multi_env(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['mcts_budget'] = 100
    expert_iteration_config_dict['mcts_rollout_budget'] = 20
    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='MCTS1-test')

    random_agent = build_Random_Agent(Connect4Task, {}, agent_name='Random')

    trajectories = Connect4Task.run_episodes(
            [ex_it, random_agent], training=False, num_envs=4, num_episodes=4)

    assert all(map(lambda t: t.winner == 0, trajectories))  # First player (index 0) has a much higher budget

    trajectories = Connect4Task.run_episodes(
            [random_agent, ex_it], training=False, num_envs=4, num_episodes=4)
    assert all(map(lambda t: t.winner == 1, trajectories))  # Second player (index 1) has a much higher budget
Exemple #8
0
def test_can_use_data_augmentation_to_double_experiences(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['state_preprocessing_fn'] = 'turn_into_single_element_batch'
    expert_iteration_config_dict['data_augmnentation_fn'] = {
        'name': 'generate_horizontal_symmetry', 'flip_obs_on_dim': 1
    }
    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt1-test')
    random_agent = build_Random_Agent(Connect4Task, {}, agent_name='Random')

    trajectories = Connect4Task.run_episodes(agent_vector=[ex_it, random_agent],
                              num_envs=2, num_episodes=1, training=True)
    import ipdb; ipdb.set_trace()
    # Add data augmentation as part of expert_iteration_config_dict
    # Ran episode against random opponent
    # Check that number of datapoints in storage is twice the number of datapoints elsewhere?
    # Check that there is a single "done" flag in the storage (i.e finished episodes is only 1 in agent)
    pass
Exemple #9
0
def test_can_collect_opponent_action_distributions_multi_env(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['use_agent_modelling'] = True
    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt-opponent_modelling-test')
    assert ex_it.requires_opponents_prediction

    random_agent = build_Random_Agent(Connect4Task, {}, agent_name='Random')

    _ = Connect4Task.run_episodes(
        agent_vector=[ex_it, random_agent],
        training=True,  # Required for ExIt agent to `handle_experience`s
        num_envs=2, num_episodes=2)
    # We only check for existance of the key, rather than it's content
    assert 'opponent_policy' in ex_it.algorithm.memory.keys
    assert 'opponent_s' in ex_it.algorithm.memory.keys
    # ex_it.algorithm.memory. Once you fix it. push!
    assert len(ex_it.algorithm.memory.opponent_policy) == len(ex_it.algorithm.memory.s)
    assert len(ex_it.algorithm.memory.opponent_policy) == len(ex_it.algorithm.memory.opponent_s)
Exemple #10
0
def test_train_vanilla_exit_against_random_connect4(Connect4Task, expert_iteration_config_dict, mcts_config_dict):
    # Train worthy params
    expert_iteration_config_dict['use_apprentice_in_expert'] = True
    expert_iteration_config_dict['games_per_iteration'] = 100

    expert_iteration_config_dict['mcts_budget'] = 200
    expert_iteration_config_dict['mcts_rollout_budget'] = 10000
    expert_iteration_config_dict['initial_memory_size'] = 6000
    expert_iteration_config_dict['memory_size_increase_frequency'] = 2
    expert_iteration_config_dict['end_memory_size'] = 30000
    expert_iteration_config_dict['dirichlet_alpha'] = 1

    expert_iteration_config_dict['batch_size'] = 256
    expert_iteration_config_dict['num_epochs_per_iteration'] = 4
    # expert_iteration_config_dict['residual_connections'] = [(2, 3), (3, 4)]

    ex_it = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name=f"ExIt-test:{expert_iteration_config_dict['mcts_budget']}")
    ex_it.algorithm.summary_writer = SummaryWriter('expert_iteration_test')

    mcts_config_dict['budget'] = 1
    mcts_agent = build_MCTS_Agent(Connect4Task, mcts_config_dict, agent_name=f"MCTS:{mcts_config_dict['budget']}")

    parallel_learn_against_fix_opponent(ex_it,
            fixed_opponent=mcts_agent,
            agent_position=0,
            task=Connect4Task,
            training_episodes=5000,
            test_episodes=100,
            benchmarking_episodes=20,
            benchmark_every_n_episodes=500,
            reward_tolerance=0.2,
            maximum_average_reward=1.0,
            evaluation_method='last',
            show_progress=True,
            num_envs=torch.multiprocessing.cpu_count(),
            summary_writer=summary_writer)
Exemple #11
0
 def partial_match_build_function(agent_name, task, config):
     if agent_name.startswith('tabularqlearning'): return build_TabularQ_Agent(task, config, agent_name)
     if agent_name.startswith('deepqlearning'): return build_DQN_Agent(task, config, agent_name)
     if agent_name.startswith('ppo'): return build_PPO_Agent(task, config, agent_name)
     if agent_name.startswith('expert_iteration'): return build_ExpertIteration_Agent(task, config, agent_name)
     else: raise ValueError('Unkown agent name: {agent_name}'.format(agent_name))
Exemple #12
0
def test_can_use_apprentice_in_expert_in_expansion_and_rollout_phase(Connect4Task, expert_iteration_config_dict):
    expert_iteration_config_dict['use_apprentice_in_expert'] = True
    expert_iteration_config_dict['rollout_budget'] = 0
    exIt_agent_1 = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt1-test')
    exIt_agent_2 = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt2-test')
    Connect4Task.run_episode([exIt_agent_1, exIt_agent_2], training=False)
Exemple #13
0
def test_expert_iteration_can_take_actions_discrete_obvservation_discrete_action(Connect4Task, expert_iteration_config_dict):
    exIt_agent_1 = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt1-test')
    exIt_agent_2 = build_ExpertIteration_Agent(Connect4Task, expert_iteration_config_dict, agent_name='ExIt2-test')
    Connect4Task.run_episode([exIt_agent_1, exIt_agent_2], training=False)