def test_fit_batch_online_atari_with_dqn(): import d4rl_atari make_env = lambda: ChannelFirst(DummyAtari()) env = AsyncBatchEnv([make_env for _ in range(2)]) eval_env = ChannelFirst(DummyAtari()) algo = DQN(n_frames=4) buffer = BatchReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_batch_online( env, buffer, explorer, n_epochs=1, n_steps_per_epoch=500, n_updates_per_epoch=1, eval_env=eval_env, logdir="test_data", ) assert algo.impl.observation_shape == (4, 84, 84)
def train(params): # setup algorithm dqn = DQN(batch_size=params.get("batch_size"), learning_rate=params.get("learning_rate"), target_update_interval=params.get("target_update_interval"), q_func_factory=QRQFunctionFactory( n_quantiles=params.get("n_quantiles")), n_steps=params.get("train_freq"), gamma=params.get("gamma"), n_critics=1, target_reduction_type="min", use_gpu=True) # setup replay buffer buffer = ReplayBuffer(maxlen=params.get("buffer_size"), env=env) # setup explorers explorer = LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=params.get("exploration_final_eps"), duration=100000) # start training dqn.fit_online( env, buffer, n_steps=params.get("train_steps"), explorer= explorer, # you don't need this with probablistic policy algorithms tensorboard_dir=log_dir, eval_env=eval_env) dqn.save_model(exp_name)
def test_fit_batch_online_atari_with_dqn(): import d4rl_atari make_env = lambda: gym.make("breakout-mixed-v0", stack=False) env = AsyncBatchEnv([make_env for _ in range(2)]) eval_env = gym.make("breakout-mixed-v0", stack=False) algo = DQN(n_frames=4) buffer = BatchReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_batch_online( env, buffer, explorer, n_epochs=1, n_steps_per_epoch=500, n_updates_per_epoch=1, eval_env=eval_env, logdir="test_data", tensorboard=False, ) assert algo.impl.observation_shape == (4, 84, 84)
def test_collect_atari_with_dqn(): import d4rl_atari env = ChannelFirst(DummyAtari()) algo = DQN(n_frames=4) explorer = LinearDecayEpsilonGreedy() buffer = algo.collect(env, explorer=explorer, n_steps=100) assert algo.impl.observation_shape == (4, 84, 84) assert buffer.size() > 90 and buffer.size() < 100
def test_train_with_dqn(): env = gym.make('CartPole-v0') eval_env = gym.make('CartPole-v0') algo = DQN(n_epochs=1) buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() train(env, algo, buffer, explorer, eval_env=eval_env, logdir='test_data', tensorboard=False)
def test_fit_online_cartpole_with_dqn(): env = gym.make('CartPole-v0') eval_env = gym.make('CartPole-v0') algo = DQN() buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_online(env, buffer, explorer, n_epochs=1, eval_env=eval_env, logdir='test_data', tensorboard=False)
def test_fit_online_cartpole_with_dqn(): env = gym.make("CartPole-v0") eval_env = gym.make("CartPole-v0") algo = DQN() buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_online( env, buffer, explorer, n_steps=100, eval_env=eval_env, logdir="test_data", )
def test_fit_batch_online_cartpole_with_dqn(): make_env = lambda: gym.make("CartPole-v0") env = AsyncBatchEnv([make_env for _ in range(5)]) eval_env = gym.make("CartPole-v0") algo = DQN() buffer = BatchReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_batch_online( env, buffer, explorer, n_epochs=1, n_steps_per_epoch=500, n_updates_per_epoch=1, eval_env=eval_env, logdir="test_data", )
def test_train_atari_with_dqn(): import d4rl_atari env = gym.make('breakout-mixed-v0', stack=False) eval_env = gym.make('breakout-mixed-v0', stack=False) algo = DQN(n_frames=4) buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() train(env, algo, buffer, explorer, n_steps=100, eval_env=eval_env, logdir='test_data', tensorboard=False) assert algo.impl.observation_shape == (4, 84, 84)
def test_fit_online_atari_with_dqn(): import d4rl_atari env = ChannelFirst(DummyAtari()) eval_env = ChannelFirst(DummyAtari()) algo = DQN(n_frames=4) buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_online( env, buffer, explorer, n_steps=100, eval_env=eval_env, logdir="test_data", ) assert algo.impl.observation_shape == (4, 84, 84)
def test_fit_online_atari_with_dqn(): import d4rl_atari env = ChannelFirst(gym.make("breakout-mixed-v0")) eval_env = ChannelFirst(gym.make("breakout-mixed-v0")) algo = DQN(n_frames=4) buffer = ReplayBuffer(1000, env) explorer = LinearDecayEpsilonGreedy() algo.fit_online( env, buffer, explorer, n_steps=100, eval_env=eval_env, logdir="test_data", tensorboard=False, ) assert algo.impl.observation_shape == (4, 84, 84)
def test_linear_decay_epsilon_greedy(action_size, observation_shape, start_epsilon, end_epsilon, duration): explorer = LinearDecayEpsilonGreedy(start_epsilon, end_epsilon, duration) # check epsilon assert explorer.compute_epsilon(0) == start_epsilon assert explorer.compute_epsilon(duration) == end_epsilon base = start_epsilon - end_epsilon ref_epsilon = end_epsilon + base * (1.0 - 1.0 / duration) assert explorer.compute_epsilon(1) == ref_epsilon ref_x = np.random.random((1, ) + observation_shape) ref_y = np.random.randint(action_size, size=(1, )) class DummyAlgo: def predict(self, x): assert np.all(x == ref_x) return ref_y @property def impl(self): return self @property def action_size(self): return action_size algo = DummyAlgo() # check sample for i in range(10): action = np.random.randint(action_size) if action != explorer.sample(algo, ref_x, 0): break elif i == 9: assert False
terminate_on_life_loss=False) # setup algorithm dqn = DoubleDQN(batch_size=32, learning_rate=2.5e-4, optim_factory=AdamFactory(eps=1e-2 / 32), target_update_interval=10000, q_func_factory='mean', scaler='pixel', n_frames=4, use_gpu=True) # replay buffer for experience replay buffer = ReplayBuffer(maxlen=1000000, env=env) # epilon-greedy explorer explorer = LinearDecayEpsilonGreedy(start_epsilon=1.0, end_epsilon=0.1, duration=1000000) # start training dqn.fit_online(env, buffer, explorer, eval_env=eval_env, eval_epsilon=0.01, n_steps=50000000, n_steps_per_epoch=100000, update_interval=4, update_start_step=50000)
def train(params): # setup algorithm if pretrain: dqn = DQN(batch_size=params.get("batch_size"), learning_rate=params.get("learning_rate"), target_update_interval=params.get("target_update_interval"), q_func_factory=QRQFunctionFactory( n_quantiles=params.get("n_quantiles")), n_steps=params.get("train_freq"), gamma=params.get("gamma"), n_critics=1, target_reduction_type="min", use_gpu=True) # setup replay buffer buffer = ReplayBuffer(maxlen=params.get("buffer_size"), env=env) # setup explorers explorer = LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=params.get("exploration_final_eps"), duration=100000) # start training dqn.fit_online( env, buffer, n_steps=params.get("train_steps"), explorer= explorer, # you don't need this with probablistic policy algorithms tensorboard_dir=log_dir, eval_env=eval_env) print("Saving Model") dqn.save_model(exp_name) print("convert buffer to dataset") dataset = buffer.to_mdp_dataset() # save MDPDataset dataset.dump('{0}.h5'.format(exp_name)) print("Loading Dataset for Offline Training") dataset = d3rlpy.dataset.MDPDataset.load('{0}.h5'.format(exp_name)) train_episodes, test_episodes = train_test_split(dataset, test_size=0.2) # The dataset can then be used to train a d3rlpy model cql = DiscreteCQL(learning_rate=6.25e-05, encoder_factory='default', q_func_factory='mean', batch_size=32, n_frames=1, n_steps=1, gamma=0.99, n_critics=1, bootstrap=False, share_encoder=False, target_reduction_type='min', target_update_interval=8000, use_gpu=True, scaler=None, augmentation=None, generator=None, impl=None) cql_exp = params.get("model_name") + "_offline_" + params.get( "environment") cql_log = '../../../logs/' + cql_exp cql.fit(dataset.episodes, eval_episodes=test_episodes, n_epochs=1000, scorers={ 'environment': evaluate_on_environment(env, epsilon=0.05), 'td_error': td_error_scorer, 'discounted_advantage': discounted_sum_of_advantage_scorer, 'value_scale': average_value_estimation_scorer, }, tensorboard_dir=cql_log) cql.save_model(cql_exp)