Ejemplo n.º 1
0
def test_cnn(model_class):
    # Fake grayscale with frameskip
    # Atari after preprocessing: 84x84x1, here we are using lower resolution
    # to check that the network handle it automatically
    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=1,
                       discrete=model_class not in {SAC, TD3})
    if model_class in {A2C, PPO}:
        kwargs = dict(n_steps=100)
    else:
        # Avoid memory error when using replay buffer
        # Reduce the size of the features
        kwargs = dict(buffer_size=250,
                      policy_kwargs=dict(features_extractor_kwargs=dict(
                          features_dim=32)))
    model = model_class('CnnPolicy', env, **kwargs).learn(250)

    obs = env.reset()

    action, _ = model.predict(obs, deterministic=True)

    model.save(SAVE_PATH)
    del model

    model = model_class.load(SAVE_PATH)

    # Check that the prediction is the same
    assert np.allclose(action, model.predict(obs, deterministic=True)[0])

    os.remove(SAVE_PATH)
def test_channel_first_env(tmp_path):
    # test_cnn uses environment with HxWxC setup that is transposed, but we
    # also want to work with CxHxW envs directly without transposing wrapper.
    SAVE_NAME = "cnn_model.zip"

    # Create environment with transposed images (CxHxW).
    # If underlying CNN processes the data in wrong format,
    # it will raise an error of negative dimension sizes while creating convolutions
    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=1,
                       discrete=True,
                       channel_first=True)

    model = A2C("CnnPolicy", env, n_steps=100).learn(250)

    assert not is_vecenv_wrapped(model.get_env(), VecTransposeImage)

    obs = env.reset()

    action, _ = model.predict(obs, deterministic=True)

    model.save(tmp_path / SAVE_NAME)
    del model

    model = A2C.load(tmp_path / SAVE_NAME)

    # Check that the prediction is the same
    assert np.allclose(action, model.predict(obs, deterministic=True)[0])

    os.remove(str(tmp_path / SAVE_NAME))
Ejemplo n.º 3
0
def test_cnn(tmp_path, model_class):
    SAVE_NAME = "cnn_model.zip"
    # Fake grayscale with frameskip
    # Atari after preprocessing: 84x84x1, here we are using lower resolution
    # to check that the network handle it automatically
    env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {SAC, TD3})
    if model_class in {A2C, PPO}:
        kwargs = dict(n_steps=64)
    else:
        # Avoid memory error when using replay buffer
        # Reduce the size of the features
        kwargs = dict(buffer_size=250, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)))
    model = model_class("CnnPolicy", env, **kwargs).learn(250)

    # FakeImageEnv is channel last by default and should be wrapped
    assert is_vecenv_wrapped(model.get_env(), VecTransposeImage)

    obs = env.reset()

    action, _ = model.predict(obs, deterministic=True)

    model.save(tmp_path / SAVE_NAME)
    del model

    model = model_class.load(tmp_path / SAVE_NAME)

    # Check that the prediction is the same
    assert np.allclose(action, model.predict(obs, deterministic=True)[0])

    os.remove(str(tmp_path / SAVE_NAME))
Ejemplo n.º 4
0
def test_cnn(tmp_path, model_class):
    SAVE_NAME = "cnn_model.zip"
    # Fake grayscale with frameskip
    # Atari after preprocessing: 84x84x1, here we are using lower resolution
    # to check that the network handle it automatically
    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=1,
                       discrete=model_class not in {TQC})
    kwargs = {}
    if model_class in {TQC, QRDQN}:
        # Avoid memory error when using replay buffer
        # Reduce the size of the features and the number of quantiles
        kwargs = dict(
            buffer_size=250,
            policy_kwargs=dict(
                n_quantiles=25,
                features_extractor_kwargs=dict(features_dim=32)),
        )
    model = model_class("CnnPolicy", env, **kwargs).learn(250)

    obs = env.reset()

    action, _ = model.predict(obs, deterministic=True)

    model.save(tmp_path / SAVE_NAME)
    del model

    model = model_class.load(tmp_path / SAVE_NAME)

    # Check that the prediction is the same
    assert np.allclose(action, model.predict(obs, deterministic=True)[0])

    os.remove(str(tmp_path / SAVE_NAME))
def test_cnn(tmp_path, model_class):
    SAVE_NAME = "cnn_model.zip"
    # Fake grayscale with frameskip
    # Atari after preprocessing: 84x84x1, here we are using lower resolution
    # to check that the network handle it automatically
    env = FakeImageEnv(
        screen_height=40,
        screen_width=40,
        n_channels=1,
        discrete=model_class not in {TQC},
    )
    kwargs = {}
    if model_class in {TQC, QRDQN}:
        # Avoid memory error when using replay buffer
        # Reduce the size of the features and the number of quantiles
        kwargs = dict(
            buffer_size=250,
            policy_kwargs=dict(
                n_quantiles=25,
                features_extractor_kwargs=dict(features_dim=32),
            ),
        )
    else:
        kwargs = dict(
            buffer_size=250,
            policy_kwargs=dict(features_extractor_kwargs=dict(
                features_dim=32)),
            seed=1,
        )

    model = model_class("CnnPolicy", env, **kwargs).learn(250)

    obs = env.reset()

    # FakeImageEnv is channel last by default and should be wrapped
    assert is_vecenv_wrapped(model.get_env(), VecTransposeImage)

    # Test stochastic predict with channel last input
    if model_class in {QRDQN, DQNClipped, DQNReg}:
        model.exploration_rate = 0.9

    for _ in range(10):
        model.predict(obs, deterministic=False)

    action, _ = model.predict(obs, deterministic=True)

    model.save(tmp_path / SAVE_NAME)
    del model

    model = model_class.load(tmp_path / SAVE_NAME)

    # Check that the prediction is the same
    assert np.allclose(action, model.predict(obs, deterministic=True)[0])

    os.remove(str(tmp_path / SAVE_NAME))
def test_non_default_spaces(new_obs_space):
    env = FakeImageEnv()
    env.observation_space = new_obs_space
    # Patch methods to avoid errors
    env.reset = new_obs_space.sample

    def patched_step(_action):
        return new_obs_space.sample(), 0.0, False, {}

    env.step = patched_step
    with pytest.warns(UserWarning):
        check_env(env)
Ejemplo n.º 7
0
def test_high_dimension_action_space():
    """
    Test for continuous action space
    with more than one action.
    """
    env = FakeImageEnv()
    # Patch the action space
    env.action_space = spaces.Box(low=-1, high=1, shape=(20,), dtype=np.float32)

    # Patch to avoid error
    def patched_step(_action):
        return env.observation_space.sample(), 0.0, False, {}
    env.step = patched_step
    check_env(env)
Ejemplo n.º 8
0
def test_save_load_env_cnn(tmp_path, model_class):
    """
    Test loading with an env that requires a ``CnnPolicy``.
    This is to test wrapping and observation space check.
    We test one on-policy and one off-policy
    algorithm as the rest share the loading part.
    """
    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=2,
                       discrete=False)
    kwargs = dict(policy_kwargs=dict(net_arch=[32]))
    if model_class == TD3:
        kwargs.update(dict(buffer_size=100, learning_starts=50))

    model = model_class("CnnPolicy", env, **kwargs).learn(100)
    model.save(tmp_path / "test_save")
    # Test loading with env and continuing training
    model = model_class.load(str(tmp_path / "test_save.zip"),
                             env=env).learn(100)
    # clear file from os
    os.remove(tmp_path / "test_save.zip")
Ejemplo n.º 9
0
def test_save_load_policy(tmp_path, model_class, policy_str):
    """
    Test saving and loading policy only.

    :param model_class: (BaseAlgorithm) A RL model
    :param policy_str: (str) Name of the policy.
    """
    kwargs = {}
    if policy_str == "MlpPolicy":
        env = select_env(model_class)
    else:
        if model_class in [SAC, TD3, DQN]:
            # Avoid memory error when using replay buffer
            # Reduce the size of the features
            kwargs = dict(buffer_size=250)
        env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == DQN)

    env = DummyVecEnv([lambda: env])

    # create model
    model = model_class(policy_str, env, policy_kwargs=dict(net_arch=[16]), verbose=1, **kwargs)
    model.learn(total_timesteps=500)

    env.reset()
    observations = np.concatenate([env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0)

    policy = model.policy
    policy_class = policy.__class__
    actor, actor_class = None, None
    if model_class in [SAC, TD3]:
        actor = policy.actor
        actor_class = actor.__class__

    # Get dictionary of current parameters
    params = deepcopy(policy.state_dict())

    # Modify all parameters to be random values
    random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items())

    # Update model parameters with the new random values
    policy.load_state_dict(random_params)

    new_params = policy.state_dict()
    # Check that all params are different now
    for k in params:
        assert not th.allclose(params[k], new_params[k]), "Parameters did not change as expected."

    params = new_params

    # get selected actions
    selected_actions, _ = policy.predict(observations, deterministic=True)
    # Should also work with the actor only
    if actor is not None:
        selected_actions_actor, _ = actor.predict(observations, deterministic=True)

    # Save and load policy
    policy.save(tmp_path / "policy.pkl")
    # Save and load actor
    if actor is not None:
        actor.save(tmp_path / "actor.pkl")

    del policy, actor

    policy = policy_class.load(tmp_path / "policy.pkl")
    if actor_class is not None:
        actor = actor_class.load(tmp_path / "actor.pkl")

    # check if params are still the same after load
    new_params = policy.state_dict()

    # Check that all params are the same as before save load procedure now
    for key in params:
        assert th.allclose(params[key], new_params[key]), "Policy parameters not the same after save and load."

    # check if model still selects the same actions
    new_selected_actions, _ = policy.predict(observations, deterministic=True)
    assert np.allclose(selected_actions, new_selected_actions, 1e-4)

    if actor_class is not None:
        new_selected_actions_actor, _ = actor.predict(observations, deterministic=True)
        assert np.allclose(selected_actions_actor, new_selected_actions_actor, 1e-4)
        assert np.allclose(selected_actions_actor, new_selected_actions, 1e-4)

    # clear file from os
    os.remove(tmp_path / "policy.pkl")
    if actor_class is not None:
        os.remove(tmp_path / "actor.pkl")
Ejemplo n.º 10
0
def test_save_load_q_net(tmp_path, model_class, policy_str):
    """
    Test saving and loading q-network/quantile net only.

    :param model_class: (BaseAlgorithm) A RL model
    :param policy_str: (str) Name of the policy.
    """
    kwargs = dict(policy_kwargs=dict(net_arch=[16]))
    if policy_str == "MlpPolicy":
        env = select_env(model_class)
    else:
        if model_class in [DQN]:
            # Avoid memory error when using replay buffer
            # Reduce the size of the features
            kwargs = dict(
                buffer_size=250,
                learning_starts=100,
                policy_kwargs=dict(features_extractor_kwargs=dict(
                    features_dim=32)),
            )
        env = FakeImageEnv(screen_height=40,
                           screen_width=40,
                           n_channels=2,
                           discrete=model_class == DQN)

    env = DummyVecEnv([lambda: env])

    # create model
    model = model_class(policy_str, env, verbose=1, **kwargs)
    model.learn(total_timesteps=300)

    env.reset()
    observations = np.concatenate(
        [env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0)

    q_net = model.q_net
    q_net_class = q_net.__class__

    # Get dictionary of current parameters
    params = deepcopy(q_net.state_dict())

    # Modify all parameters to be random values
    random_params = dict((param_name, th.rand_like(param))
                         for param_name, param in params.items())

    # Update model parameters with the new random values
    q_net.load_state_dict(random_params)

    new_params = q_net.state_dict()
    # Check that all params are different now
    for k in params:
        assert not th.allclose(
            params[k], new_params[k]), "Parameters did not change as expected."

    params = new_params

    # get selected actions
    selected_actions, _ = q_net.predict(observations, deterministic=True)

    # Save and load q_net
    q_net.save(tmp_path / "q_net.pkl")

    del q_net

    q_net = q_net_class.load(tmp_path / "q_net.pkl")

    # check if params are still the same after load
    new_params = q_net.state_dict()

    # Check that all params are the same as before save load procedure now
    for key in params:
        assert th.allclose(
            params[key], new_params[key]
        ), "Policy parameters not the same after save and load."

    # check if model still selects the same actions
    new_selected_actions, _ = q_net.predict(observations, deterministic=True)
    assert np.allclose(selected_actions, new_selected_actions, 1e-4)

    # clear file from os
    os.remove(tmp_path / "q_net.pkl")
Ejemplo n.º 11
0
def test_features_extractor_target_net(model_class, share_features_extractor):
    if model_class == DQN and share_features_extractor:
        pytest.skip()

    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=1,
                       discrete=model_class not in {SAC, TD3})
    # Avoid memory error when using replay buffer
    # Reduce the size of the features
    kwargs = dict(
        buffer_size=250,
        learning_starts=100,
        policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)))
    if model_class != DQN:
        kwargs["policy_kwargs"][
            "share_features_extractor"] = share_features_extractor

    # No delay for TD3 (changes when the actor and polyak update take place)
    if model_class == TD3:
        kwargs["policy_delay"] = 1

    model = model_class("CnnPolicy", env, seed=0, **kwargs)

    patch_dqn_names_(model)

    if share_features_extractor:
        # Check that the objects are the same and not just copied
        assert id(model.policy.actor.features_extractor) == id(
            model.policy.critic.features_extractor)
        if model_class == TD3:
            assert id(model.policy.actor_target.features_extractor) == id(
                model.policy.critic_target.features_extractor)
        # Actor and critic feature extractor should be the same
        td3_features_extractor_check = check_td3_feature_extractor_match
    else:
        # Actor and critic feature extractor should differ same
        td3_features_extractor_check = check_td3_feature_extractor_differ
        # Check that the object differ
        if model_class != DQN:
            assert id(model.policy.actor.features_extractor) != id(
                model.policy.critic.features_extractor)

        if model_class == TD3:
            assert id(model.policy.actor_target.features_extractor) != id(
                model.policy.critic_target.features_extractor)

    # Critic and target should be equal at the begginning of training
    params_should_match(model.critic.parameters(),
                        model.critic_target.parameters())

    # TD3 has also a target actor net
    if model_class == TD3:
        params_should_match(model.actor.parameters(),
                            model.actor_target.parameters())

    model.learn(200)

    # Critic and target should differ
    params_should_differ(model.critic.parameters(),
                         model.critic_target.parameters())

    if model_class == TD3:
        params_should_differ(model.actor.parameters(),
                             model.actor_target.parameters())
        td3_features_extractor_check(model)

    # Re-initialize and collect some random data (without doing gradient steps,
    # since 10 < learning_starts = 100)
    model = model_class("CnnPolicy", env, seed=0, **kwargs).learn(10)

    patch_dqn_names_(model)

    original_param = deepcopy(list(model.critic.parameters()))
    original_target_param = deepcopy(list(model.critic_target.parameters()))
    if model_class == TD3:
        original_actor_target_param = deepcopy(
            list(model.actor_target.parameters()))

    # Deactivate copy to target
    model.tau = 0.0
    model.train(gradient_steps=1)

    # Target should be the same
    params_should_match(original_target_param,
                        model.critic_target.parameters())

    if model_class == TD3:
        params_should_match(original_actor_target_param,
                            model.actor_target.parameters())
        td3_features_extractor_check(model)

    # not the same for critic net (updated by gradient descent)
    params_should_differ(original_param, model.critic.parameters())

    # Update the reference as it should not change in the next step
    original_param = deepcopy(list(model.critic.parameters()))

    if model_class == TD3:
        original_actor_param = deepcopy(list(model.actor.parameters()))

    # Deactivate learning rate
    model.lr_schedule = lambda _: 0.0
    # Re-activate polyak update
    model.tau = 0.01
    # Special case for DQN: target net is updated in the `collect_rollouts()`
    # not the `train()` method
    if model_class == DQN:
        model.target_update_interval = 1
        model._on_step()

    model.train(gradient_steps=1)

    # Target should have changed now (due to polyak update)
    params_should_differ(original_target_param,
                         model.critic_target.parameters())

    # Critic should be the same
    params_should_match(original_param, model.critic.parameters())

    if model_class == TD3:
        params_should_differ(original_actor_target_param,
                             model.actor_target.parameters())

        params_should_match(original_actor_param, model.actor.parameters())

        td3_features_extractor_check(model)
def test_feature_extractor_target_net(model_class, share_features_extractor):
    if (model_class == QRDQN or model_class == DQNReg
            or model_class == DQNClipped) and share_features_extractor:
        pytest.skip()

    env = FakeImageEnv(screen_height=40,
                       screen_width=40,
                       n_channels=1,
                       discrete=model_class not in {TQC})

    if model_class in {TQC, QRDQN}:
        # Avoid memory error when using replay buffer
        # Reduce the size of the features and the number of quantiles
        kwargs = dict(
            buffer_size=250,
            learning_starts=100,
            policy_kwargs=dict(
                n_quantiles=25,
                features_extractor_kwargs=dict(features_dim=32)),
        )
    else:
        kwargs = dict(buffer_size=250,
                      learning_starts=100,
                      policy_kwargs=dict(features_extractor_kwargs=dict(
                          features_dim=32)))
    if model_class not in {QRDQN, DQNClipped, DQNReg}:
        kwargs["policy_kwargs"][
            "share_features_extractor"] = share_features_extractor

    model = model_class("CnnPolicy", env, seed=0, **kwargs)

    patch_qrdqn_names_(model)

    if share_features_extractor:
        # Check that the objects are the same and not just copied
        assert id(model.policy.actor.features_extractor) == id(
            model.policy.critic.features_extractor)
    else:
        # Check that the objects differ
        if model_class not in {QRDQN, DQNClipped, DQNReg}:
            assert id(model.policy.actor.features_extractor) != id(
                model.policy.critic.features_extractor)

    # Critic and target should be equal at the beginning of training
    params_should_match(model.critic.parameters(),
                        model.critic_target.parameters())

    model.learn(200)

    # Critic and target should differ
    params_should_differ(model.critic.parameters(),
                         model.critic_target.parameters())

    # Re-initialize and collect some random data (without doing gradient steps)
    model = model_class("CnnPolicy", env, seed=0, **kwargs).learn(10)

    patch_qrdqn_names_(model)

    original_param = deepcopy(list(model.critic.parameters()))
    original_target_param = deepcopy(list(model.critic_target.parameters()))

    # Deactivate copy to target
    model.tau = 0.0
    model.train(gradient_steps=1)

    # Target should be the same
    params_should_match(original_target_param,
                        model.critic_target.parameters())

    # not the same for critic net (updated by gradient descent)
    params_should_differ(original_param, model.critic.parameters())

    # Update the reference as it should not change in the next step
    original_param = deepcopy(list(model.critic.parameters()))

    # Deactivate learning rate
    model.lr_schedule = lambda _: 0.0
    # Re-activate polyak update
    model.tau = 0.01
    # Special case for QRDQN: target net is updated in the `collect_rollouts()`
    # not the `train()` method
    if model_class in {QRDQN, DQNClipped, DQNReg}:
        model.target_update_interval = 1
        model._on_step()

    model.train(gradient_steps=1)

    # Target should have changed now (due to polyak update)
    params_should_differ(original_target_param,
                         model.critic_target.parameters())

    # Critic should be the same
    params_should_match(original_param, model.critic.parameters())