def test_channel_first_env(tmp_path): # test_cnn uses environment with HxWxC setup that is transposed, but we # also want to work with CxHxW envs directly without transposing wrapper. SAVE_NAME = "cnn_model.zip" # Create environment with transposed images (CxHxW). # If underlying CNN processes the data in wrong format, # it will raise an error of negative dimension sizes while creating convolutions env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=True, channel_first=True) model = A2C("CnnPolicy", env, n_steps=100).learn(250) assert not is_vecenv_wrapped(model.get_env(), VecTransposeImage) obs = env.reset() action, _ = model.predict(obs, deterministic=True) model.save(tmp_path / SAVE_NAME) del model model = A2C.load(tmp_path / SAVE_NAME) # Check that the prediction is the same assert np.allclose(action, model.predict(obs, deterministic=True)[0]) os.remove(str(tmp_path / SAVE_NAME))
def test_non_default_spaces(new_obs_space): env = FakeImageEnv() env.observation_space = new_obs_space # Patch methods to avoid errors env.reset = new_obs_space.sample def patched_step(_action): return new_obs_space.sample(), 0.0, False, {} env.step = patched_step with pytest.warns(UserWarning): check_env(env)
def test_non_default_action_spaces(new_action_space): env = FakeImageEnv(discrete=False) # Default, should pass the test with pytest.warns(None) as record: check_env(env) # No warnings for custom envs assert len(record) == 0 # Change the action space env.action_space = new_action_space with pytest.warns(UserWarning): check_env(env)
def test_vec_transpose_skip(tmp_path, model_class): # Fake grayscale with frameskip env = FakeImageEnv(screen_height=41, screen_width=40, n_channels=10, discrete=model_class not in {SAC, TD3}, channel_first=True) env = DummyVecEnv([lambda: env]) # Stack 5 frames so the observation is now (50, 40, 40) but the env is still channel first env = VecFrameStack(env, 5, channels_order="first") obs_shape_before = env.reset().shape # The observation space should be different as the heuristic thinks it is channel last assert not np.allclose(obs_shape_before, VecTransposeImage(env).reset().shape) env = VecTransposeImage(env, skip=True) # The observation space should be the same as we skip the VecTransposeImage assert np.allclose(obs_shape_before, env.reset().shape) kwargs = dict( n_steps=64, policy_kwargs=dict(features_extractor_kwargs=dict(features_dim=32)), seed=1, ) model = model_class("CnnPolicy", env, **kwargs).learn(250) obs = env.reset() action, _ = model.predict(obs, deterministic=True)
def test_cnn(tmp_path, model_class): SAVE_NAME = "cnn_model.zip" # Fake grayscale with frameskip # Atari after preprocessing: 84x84x1, here we are using lower resolution # to check that the network handle it automatically env = FakeImageEnv( screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {TQC}, ) kwargs = {} if model_class in {TQC, QRDQN}: # Avoid memory error when using replay buffer # Reduce the size of the features and the number of quantiles kwargs = dict( buffer_size=250, policy_kwargs=dict( n_quantiles=25, features_extractor_kwargs=dict(features_dim=32), ), ) model = model_class("CnnPolicy", env, **kwargs).learn(250) obs = env.reset() # FakeImageEnv is channel last by default and should be wrapped assert is_vecenv_wrapped(model.get_env(), VecTransposeImage) # Test stochastic predict with channel last input if model_class == QRDQN: model.exploration_rate = 0.9 for _ in range(10): model.predict(obs, deterministic=False) action, _ = model.predict(obs, deterministic=True) model.save(tmp_path / SAVE_NAME) del model model = model_class.load(tmp_path / SAVE_NAME) # Check that the prediction is the same assert np.allclose(action, model.predict(obs, deterministic=True)[0]) os.remove(str(tmp_path / SAVE_NAME))
def test_high_dimension_action_space(): """ Test for continuous action space with more than one action. """ env = FakeImageEnv() # Patch the action space env.action_space = spaces.Box(low=-1, high=1, shape=(20, ), dtype=np.float32) # Patch to avoid error def patched_step(_action): return env.observation_space.sample(), 0.0, False, {} env.step = patched_step check_env(env)
def test_save_load_env_cnn(tmp_path, model_class): """ Test loading with an env that requires a ``CnnPolicy``. This is to test wrapping and observation space check. We test one on-policy and one off-policy algorithm as the rest share the loading part. """ env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=False) kwargs = dict(policy_kwargs=dict(net_arch=[32])) if model_class == TD3: kwargs.update(dict(buffer_size=100, learning_starts=50, train_freq=4)) model = model_class("CnnPolicy", env, **kwargs).learn(100) model.save(tmp_path / "test_save") # Test loading with env and continuing training model = model_class.load(str(tmp_path / "test_save.zip"), env=env, **kwargs).learn(100) # clear file from os os.remove(tmp_path / "test_save.zip")
def test_save_load_q_net(tmp_path, model_class, policy_str): """ Test saving and loading q-network/quantile net only. :param model_class: (BaseAlgorithm) A RL model :param policy_str: (str) Name of the policy. """ kwargs = dict(policy_kwargs=dict(net_arch=[16])) if policy_str == "MlpPolicy": env = select_env(model_class) else: if model_class in [DQN]: # Avoid memory error when using replay buffer # Reduce the size of the features kwargs = dict( buffer_size=250, learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict( features_dim=32)), ) env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == DQN) env = DummyVecEnv([lambda: env]) # create model model = model_class(policy_str, env, verbose=1, **kwargs) model.learn(total_timesteps=300) env.reset() observations = np.concatenate( [env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0) q_net = model.q_net q_net_class = q_net.__class__ # Get dictionary of current parameters params = deepcopy(q_net.state_dict()) # Modify all parameters to be random values random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items()) # Update model parameters with the new random values q_net.load_state_dict(random_params) new_params = q_net.state_dict() # Check that all params are different now for k in params: assert not th.allclose( params[k], new_params[k]), "Parameters did not change as expected." params = new_params # get selected actions selected_actions, _ = q_net.predict(observations, deterministic=True) # Save and load q_net q_net.save(tmp_path / "q_net.pkl") del q_net q_net = q_net_class.load(tmp_path / "q_net.pkl") # check if params are still the same after load new_params = q_net.state_dict() # Check that all params are the same as before save load procedure now for key in params: assert th.allclose( params[key], new_params[key] ), "Policy parameters not the same after save and load." # check if model still selects the same actions new_selected_actions, _ = q_net.predict(observations, deterministic=True) assert np.allclose(selected_actions, new_selected_actions, 1e-4) # clear file from os os.remove(tmp_path / "q_net.pkl")
def test_save_load_policy(tmp_path, model_class, policy_str, use_sde): """ Test saving and loading policy only. :param model_class: (BaseAlgorithm) A RL model :param policy_str: (str) Name of the policy. """ kwargs = dict(policy_kwargs=dict(net_arch=[16])) # gSDE is only applicable for A2C, PPO and SAC if use_sde and model_class not in [A2C, PPO, SAC]: pytest.skip() if policy_str == "MlpPolicy": env = select_env(model_class) else: if model_class in [SAC, TD3, DQN, DDPG]: # Avoid memory error when using replay buffer # Reduce the size of the features kwargs = dict(buffer_size=250, learning_starts=100, policy_kwargs=dict(features_extractor_kwargs=dict( features_dim=32))) env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=2, discrete=model_class == DQN) if use_sde: kwargs["use_sde"] = True env = DummyVecEnv([lambda: env]) # create model model = model_class(policy_str, env, verbose=1, **kwargs) model.learn(total_timesteps=300) env.reset() observations = np.concatenate( [env.step([env.action_space.sample()])[0] for _ in range(10)], axis=0) policy = model.policy policy_class = policy.__class__ actor, actor_class = None, None if model_class in [SAC, TD3]: actor = policy.actor actor_class = actor.__class__ # Get dictionary of current parameters params = deepcopy(policy.state_dict()) # Modify all parameters to be random values random_params = dict((param_name, th.rand_like(param)) for param_name, param in params.items()) # Update model parameters with the new random values policy.load_state_dict(random_params) new_params = policy.state_dict() # Check that all params are different now for k in params: assert not th.allclose( params[k], new_params[k]), "Parameters did not change as expected." params = new_params # get selected actions selected_actions, _ = policy.predict(observations, deterministic=True) # Should also work with the actor only if actor is not None: selected_actions_actor, _ = actor.predict(observations, deterministic=True) # Save and load policy policy.save(tmp_path / "policy.pkl") # Save and load actor if actor is not None: actor.save(tmp_path / "actor.pkl") del policy, actor policy = policy_class.load(tmp_path / "policy.pkl") if actor_class is not None: actor = actor_class.load(tmp_path / "actor.pkl") # check if params are still the same after load new_params = policy.state_dict() # Check that all params are the same as before save load procedure now for key in params: assert th.allclose( params[key], new_params[key] ), "Policy parameters not the same after save and load." # check if model still selects the same actions new_selected_actions, _ = policy.predict(observations, deterministic=True) assert np.allclose(selected_actions, new_selected_actions, 1e-4) if actor_class is not None: new_selected_actions_actor, _ = actor.predict(observations, deterministic=True) assert np.allclose(selected_actions_actor, new_selected_actions_actor, 1e-4) assert np.allclose(selected_actions_actor, new_selected_actions, 1e-4) # clear file from os os.remove(tmp_path / "policy.pkl") if actor_class is not None: os.remove(tmp_path / "actor.pkl")
def test_feature_extractor_target_net(model_class, share_features_extractor): if model_class == QRDQN and share_features_extractor: pytest.skip() env = FakeImageEnv(screen_height=40, screen_width=40, n_channels=1, discrete=model_class not in {TQC}) if model_class in {TQC, QRDQN}: # Avoid memory error when using replay buffer # Reduce the size of the features and the number of quantiles kwargs = dict( buffer_size=250, learning_starts=100, policy_kwargs=dict(n_quantiles=25, features_extractor_kwargs=dict(features_dim=32)), ) if model_class != QRDQN: kwargs["policy_kwargs"]["share_features_extractor"] = share_features_extractor model = model_class("CnnPolicy", env, seed=0, **kwargs) patch_qrdqn_names_(model) if share_features_extractor: # Check that the objects are the same and not just copied assert id(model.policy.actor.features_extractor) == id(model.policy.critic.features_extractor) else: # Check that the objects differ if model_class != QRDQN: assert id(model.policy.actor.features_extractor) != id(model.policy.critic.features_extractor) # Critic and target should be equal at the begginning of training params_should_match(model.critic.parameters(), model.critic_target.parameters()) model.learn(200) # Critic and target should differ params_should_differ(model.critic.parameters(), model.critic_target.parameters()) # Re-initialize and collect some random data (without doing gradient steps) model = model_class("CnnPolicy", env, seed=0, **kwargs).learn(10) patch_qrdqn_names_(model) original_param = deepcopy(list(model.critic.parameters())) original_target_param = deepcopy(list(model.critic_target.parameters())) # Deactivate copy to target model.tau = 0.0 model.train(gradient_steps=1) # Target should be the same params_should_match(original_target_param, model.critic_target.parameters()) # not the same for critic net (updated by gradient descent) params_should_differ(original_param, model.critic.parameters()) # Update the reference as it should not change in the next step original_param = deepcopy(list(model.critic.parameters())) # Deactivate learning rate model.lr_schedule = lambda _: 0.0 # Re-activate polyak update model.tau = 0.01 # Special case for QRDQN: target net is updated in the `collect_rollouts()` # not the `train()` method if model_class == QRDQN: model.target_update_interval = 1 model._on_step() model.train(gradient_steps=1) # Target should have changed now (due to polyak update) params_should_differ(original_target_param, model.critic_target.parameters()) # Critic should be the same params_should_match(original_param, model.critic.parameters())