def setup_method(self): self.env = DummyDiscreteEnv() self.policy = SimplePolicy(env_spec=self.env) self.epsilon_greedy_policy = EpsilonGreedyPolicy(env_spec=self.env, policy=self.policy, total_timesteps=100, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) self.env.reset()
class TestEpsilonGreedyPolicy: def setup_method(self): self.env = DummyDiscreteEnv() self.policy = SimplePolicy(env_spec=self.env) self.epsilon_greedy_policy = EpsilonGreedyPolicy(env_spec=self.env, policy=self.policy, total_timesteps=100, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) self.env.reset() def test_epsilon_greedy_policy(self): obs, _, _, _ = self.env.step(1) action, _ = self.epsilon_greedy_policy.get_action(obs) assert self.env.action_space.contains(action) # epsilon decay by 1 step, new epsilon = 1 - 0.98 = 0.902 random_rate = np.random.random( 100000) < self.epsilon_greedy_policy._epsilon assert np.isclose([0.902], [sum(random_rate) / 100000], atol=0.01) actions, _ = self.epsilon_greedy_policy.get_actions([obs] * 5) # epsilon decay by 6 steps in total, new epsilon = 1 - 6 * 0.98 = 0.412 random_rate = np.random.random( 100000) < self.epsilon_greedy_policy._epsilon assert np.isclose([0.412], [sum(random_rate) / 100000], atol=0.01) for action in actions: assert self.env.action_space.contains(action) def test_epsilon_greedy_policy_is_pickleable(self): obs, _, _, _ = self.env.step(1) for _ in range(5): self.epsilon_greedy_policy.get_action(obs) h_data = pickle.dumps(self.epsilon_greedy_policy) policy = pickle.loads(h_data) assert policy._epsilon == self.epsilon_greedy_policy._epsilon
def test_dqn_cartpole_pickle(self): """Test DQN with CartPole environment.""" with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size env = MetaRLEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(1e4)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy( env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=1e-4, discount=1.0, min_buffer_size=int(1e3), double_q=False, n_train_steps=500, grad_norm_clipping=5.0, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) with tf.compat.v1.variable_scope( 'DiscreteMLPQFunction/MLPModel/mlp/hidden_0', reuse=True): bias = tf.compat.v1.get_variable('bias') # assign it to all one old_bias = tf.ones_like(bias).eval() bias.load(old_bias) h = pickle.dumps(algo) with tf.compat.v1.Session(graph=tf.Graph()): pickle.loads(h) with tf.compat.v1.variable_scope( 'DiscreteMLPQFunction/MLPModel/mlp/hidden_0', reuse=True): new_bias = tf.compat.v1.get_variable('bias') new_bias = new_bias.eval() assert np.array_equal(old_bias, new_bias) env.close()
def dqn_cartpole(ctxt=None, seed=1): """Train TRPO with CubeCrash-v0 environment. Args: ctxt (metarl.experiment.ExperimentContext): The experiment configuration used by LocalRunner to create the snapshotter. seed (int): Used to seed the random number generator to produce determinism. """ set_seed(seed) with LocalTFRunner(ctxt) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size env = MetaRLEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(1e4)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=exploration_policy, replay_buffer=replay_buffer, steps_per_epoch=steps_per_epoch, qf_lr=1e-4, discount=1.0, min_buffer_size=int(1e3), double_q=True, n_train_steps=500, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)
def test_dqn_cartpole_grad_clip(self): """Test DQN with CartPole environment.""" with LocalTFRunner(snapshot_config, sess=self.sess) as runner: n_epochs = 10 steps_per_epoch = 10 sampler_batch_size = 500 num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size env = MetaRLEnv(gym.make('CartPole-v0')) replay_buffer = PathBuffer(capacity_in_transitions=int(1e4)) qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64)) policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) epilson_greedy_policy = EpsilonGreedyPolicy( env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=epilson_greedy_policy, replay_buffer=replay_buffer, qf_lr=1e-4, discount=1.0, min_buffer_size=int(1e3), double_q=False, n_train_steps=500, grad_norm_clipping=5.0, steps_per_epoch=steps_per_epoch, target_network_update_freq=1, buffer_batch_size=32) runner.setup(algo, env) last_avg_ret = runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size) assert last_avg_ret > 15 env.close()
def dqn_pong(ctxt=None, seed=1, buffer_size=int(5e4), max_path_length=None): """Train DQN on PongNoFrameskip-v4 environment. Args: ctxt (metarl.experiment.ExperimentContext): The experiment configuration used by LocalRunner to create the snapshotter. seed (int): Used to seed the random number generator to produce determinism. buffer_size (int): Number of timesteps to store in replay buffer. max_path_length (int): Maximum length of a path after which a path is considered complete. This is used during testing to minimize the memory required to store a single path. """ set_seed(seed) with LocalTFRunner(ctxt) as runner: n_epochs = 100 steps_per_epoch = 20 sampler_batch_size = 500 num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size env = gym.make('PongNoFrameskip-v4') env = Noop(env, noop_max=30) env = MaxAndSkip(env, skip=4) env = EpisodicLife(env) if 'FIRE' in env.unwrapped.get_action_meanings(): env = FireReset(env) env = Grayscale(env) env = Resize(env, 84, 84) env = ClipReward(env) env = StackFrames(env, 4) env = MetaRLEnv(env, is_image=True) replay_buffer = PathBuffer(capacity_in_transitions=buffer_size) qf = DiscreteCNNQFunction(env_spec=env.spec, filters=( (32, (8, 8)), (64, (4, 4)), (64, (3, 3)), ), strides=(4, 2, 1), dueling=False) # yapf: disable policy = DiscreteQfDerivedPolicy(env_spec=env.spec, qf=qf) exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec, policy=policy, total_timesteps=num_timesteps, max_epsilon=1.0, min_epsilon=0.02, decay_ratio=0.1) algo = DQN(env_spec=env.spec, policy=policy, qf=qf, exploration_policy=exploration_policy, replay_buffer=replay_buffer, qf_lr=1e-4, discount=0.99, min_buffer_size=int(1e4), max_path_length=max_path_length, double_q=False, n_train_steps=500, steps_per_epoch=steps_per_epoch, target_network_update_freq=2, buffer_batch_size=32) runner.setup(algo, env) runner.train(n_epochs=n_epochs, batch_size=sampler_batch_size)