def test_dqn_compilation(self): """Test whether a DQNTrainer can be built on all frameworks.""" num_iterations = 1 config = dqn.dqn.DQNConfig().rollouts(num_rollout_workers=2) for _ in framework_iterator(config, with_eager_tracing=True): # Double-dueling DQN. print("Double-dueling") plain_config = deepcopy(config) trainer = dqn.DQNTrainer(config=plain_config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop() # Rainbow. print("Rainbow") rainbow_config = deepcopy(config).training(num_atoms=10, noisy=True, double_q=True, dueling=True, n_step=5) trainer = dqn.DQNTrainer(config=rainbow_config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop()
def test_evaluation_option_always_attach_eval_metrics(self): config = dqn.DEFAULT_CONFIG.copy() config.update({ "env": "CartPole-v0", "evaluation_interval": 2, "evaluation_duration": 2, "evaluation_duration_unit": "episodes", "evaluation_config": { "gamma": 0.98, }, "always_attach_evaluation_results": True, # Use a custom callback that asserts that we are running the # configured exact number of episodes per evaluation. "callbacks": AssertEvalCallback, }) for _ in framework_iterator(config, frameworks=("tf", "torch")): trainer = dqn.DQNTrainer(config=config) # Should always see latest available eval results. r0 = trainer.train() r1 = trainer.train() r2 = trainer.train() r3 = trainer.train() trainer.stop() # Eval results are not available at step 0. # But step 3 should still have it, even though no eval was # run during that step. self.assertTrue("evaluation" in r0) self.assertTrue("evaluation" in r1) self.assertTrue("evaluation" in r2) self.assertTrue("evaluation" in r3)
def test_evaluation_option(self): config = dqn.DEFAULT_CONFIG.copy() config.update({ "env": "CartPole-v0", "evaluation_interval": 2, "evaluation_duration": 2, "evaluation_config": { "gamma": 0.98, }, # Use a custom callback that asserts that we are running the # configured exact number of episodes per evaluation. "callbacks": AssertEvalCallback, }) for _ in framework_iterator(config, frameworks=("tf", "torch")): trainer = dqn.DQNTrainer(config=config) # Given evaluation_interval=2, r0, r2, r4 should not contain # evaluation metrics, while r1, r3 should. r0 = trainer.train() print(r0) r1 = trainer.train() print(r1) r2 = trainer.train() print(r2) r3 = trainer.train() print(r3) trainer.stop() self.assertFalse("evaluation" in r0) self.assertTrue("evaluation" in r1) self.assertFalse("evaluation" in r2) self.assertTrue("evaluation" in r3) self.assertTrue("episode_reward_mean" in r1["evaluation"]) self.assertNotEqual(r1["evaluation"], r3["evaluation"])
def test_on_sub_environment_created_with_remote_envs(self): base_config = { "env": "CartPole-v1", # Make each sub-environment a ray actor. "remote_worker_envs": True, # Create 4 sub-environments (ray remote actors) per remote # worker. "num_envs_per_worker": 4, # Create 2 remote workers. "num_workers": 2, } for callbacks in ( OnSubEnvironmentCreatedCallback, MultiCallbacks([OnSubEnvironmentCreatedCallback]), ): config = dict(base_config, callbacks=callbacks) for _ in framework_iterator(config, frameworks=("tf", "torch")): trainer = dqn.DQNTrainer(config=config) # Fake the counter on the local worker (doesn't have an env) and # set it to -1 so the below `foreach_worker()` won't fail. trainer.workers.local_worker().sum_sub_env_vector_indices = -1 # Get sub-env vector index sums from the 2 remote workers: sum_sub_env_vector_indices = trainer.workers.foreach_worker( lambda w: w.sum_sub_env_vector_indices) # Local worker has no environments -> Expect the -1 special # value returned by the above lambda. self.assertTrue(sum_sub_env_vector_indices[0] == -1) # Both remote workers (index 1 and 2) have a vector index counter # of 6 (sum of vector indices: 0 + 1 + 2 + 3). self.assertTrue(sum_sub_env_vector_indices[1] == 6) self.assertTrue(sum_sub_env_vector_indices[2] == 6) trainer.stop()
def test_leaky_policy(self): """Tests, whether our diagnostics tools can detect leaks in a policy.""" config = dqn.DEFAULT_CONFIG.copy() # Make sure we have an env to test on the local worker. # Otherwise, `check_memory_leaks` will complain. config["create_env_on_driver"] = True config["env"] = "CartPole-v0" config["multiagent"]["policies"] = { "default_policy": PolicySpec(policy_class=MemoryLeakingPolicy), } trainer = dqn.DQNTrainer(config=config) results = check_memory_leaks(trainer, to_check={"policy"}, repeats=300) assert results["policy"] trainer.stop()
def train_rllib_policy(config): """Trains a DQNTrainer on MsPacman-v0 for n iterations. Saves the trained Trainer to disk and returns the checkpoint path. Returns: str: The saved checkpoint to restore the trainer DQNTrainer from. """ # Create trainer from config. trainer = dqn.DQNTrainer(config=config) # Train for n iterations, then save. for _ in range(args.train_iters): print(trainer.train()) return trainer.save()
def test_traj_view_normal_case(self): """Tests, whether Model and Policy return the correct ViewRequirements.""" config = dqn.DEFAULT_CONFIG.copy() config["num_envs_per_worker"] = 10 config["rollout_fragment_length"] = 4 for _ in framework_iterator(config): trainer = dqn.DQNTrainer( config, env="ray.rllib.examples.env.debug_counter_env.DebugCounterEnv") policy = trainer.get_policy() view_req_model = policy.model.view_requirements view_req_policy = policy.view_requirements assert len(view_req_model) == 1, view_req_model assert len(view_req_policy) == 10, view_req_policy for key in [ SampleBatch.OBS, SampleBatch.ACTIONS, SampleBatch.REWARDS, SampleBatch.DONES, SampleBatch.NEXT_OBS, SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX, "weights", ]: assert key in view_req_policy # None of the view cols has a special underlying data_col, # except next-obs. if key != SampleBatch.NEXT_OBS: assert view_req_policy[key].data_col is None else: assert view_req_policy[key].data_col == SampleBatch.OBS assert view_req_policy[key].shift == 1 rollout_worker = trainer.workers.local_worker() sample_batch = rollout_worker.sample() expected_count = (config["num_envs_per_worker"] * config["rollout_fragment_length"]) assert sample_batch.count == expected_count for v in sample_batch.values(): assert len(v) == expected_count trainer.stop()
def __init__(self, config, checkpoint_path): # Create the Trainer. self.trainer = dqn.DQNTrainer(config=config) # Load an already trained state for the trainer. self.trainer.restore(checkpoint_path)
def test_dqn_exploration_and_soft_q_config(self): """Tests, whether a DQN Agent outputs exploration/softmaxed actions.""" config = (dqn.dqn.DQNConfig().rollouts( num_rollout_workers=0).environment(env_config={ "is_slippery": False, "map_name": "4x4" })) obs = np.array(0) # Test against all frameworks. for _ in framework_iterator(config): # Default EpsilonGreedy setup. trainer = dqn.DQNTrainer(config=config, env="FrozenLake-v1") # Setting explore=False should always return the same action. a_ = trainer.compute_single_action(obs, explore=False) for _ in range(50): a = trainer.compute_single_action(obs, explore=False) check(a, a_) # explore=None (default: explore) should return different actions. actions = [] for _ in range(50): actions.append(trainer.compute_single_action(obs)) check(np.std(actions), 0.0, false=True) trainer.stop() # Low softmax temperature. Behaves like argmax # (but no epsilon exploration). config.exploration(exploration_config={ "type": "SoftQ", "temperature": 0.000001 }) trainer = dqn.DQNTrainer(config=config, env="FrozenLake-v1") # Due to the low temp, always expect the same action. actions = [trainer.compute_single_action(obs)] for _ in range(50): actions.append(trainer.compute_single_action(obs)) check(np.std(actions), 0.0, decimals=3) trainer.stop() # Higher softmax temperature. config.exploration_config["temperature"] = 1.0 trainer = dqn.DQNTrainer(config=config, env="FrozenLake-v1") # Even with the higher temperature, if we set explore=False, we # should expect the same actions always. a_ = trainer.compute_single_action(obs, explore=False) for _ in range(50): a = trainer.compute_single_action(obs, explore=False) check(a, a_) # Due to the higher temp, expect different actions avg'ing # around 1.5. actions = [] for _ in range(300): actions.append(trainer.compute_single_action(obs)) check(np.std(actions), 0.0, false=True) trainer.stop() # With Random exploration. config.exploration(exploration_config={"type": "Random"}, explore=True) trainer = dqn.DQNTrainer(config=config, env="FrozenLake-v1") actions = [] for _ in range(300): actions.append(trainer.compute_single_action(obs)) check(np.std(actions), 0.0, false=True) trainer.stop()