def test_replay_buffer(n_episodes, batch_size, maxlen, create_mask, mask_size): env = gym.make("CartPole-v0") buffer = ReplayBuffer(maxlen, env, create_mask=create_mask, mask_size=mask_size) total_step = 0 for episode in range(n_episodes): observation, reward, terminal = env.reset(), 0.0, False while not terminal: action = env.action_space.sample() buffer.append(observation.astype("f4"), action, reward, terminal) observation, reward, terminal, _ = env.step(action) total_step += 1 buffer.append(observation.astype("f4"), action, reward, terminal) total_step += 1 assert len(buffer) == maxlen # check static dataset conversion dataset = buffer.to_mdp_dataset() transitions = [] for episode in dataset: transitions += episode.transitions assert len(transitions) >= len(buffer) observation_shape = env.observation_space.shape batch = buffer.sample(batch_size) assert len(batch) == batch_size assert batch.observations.shape == (batch_size, ) + observation_shape assert batch.actions.shape == (batch_size, ) assert batch.rewards.shape == (batch_size, 1) assert batch.next_observations.shape == (batch_size, ) + observation_shape assert batch.next_actions.shape == (batch_size, ) assert batch.next_rewards.shape == (batch_size, 1) assert batch.terminals.shape == (batch_size, 1) assert isinstance(batch.observations, np.ndarray) assert isinstance(batch.next_observations, np.ndarray) if create_mask: assert batch.masks.shape == (mask_size, batch_size, 1) else: assert batch.masks is None
def train(params): # setup algorithm if pretrain: dqn = DQN(batch_size=params.get("batch_size"), learning_rate=params.get("learning_rate"), target_update_interval=params.get("target_update_interval"), q_func_factory=QRQFunctionFactory( n_quantiles=params.get("n_quantiles")), n_steps=params.get("train_freq"), gamma=params.get("gamma"), n_critics=1, target_reduction_type="min", use_gpu=True) # setup replay buffer buffer = ReplayBuffer(maxlen=params.get("buffer_size"), env=env) # setup explorers explorer = LinearDecayEpsilonGreedy( start_epsilon=1.0, end_epsilon=params.get("exploration_final_eps"), duration=100000) # start training dqn.fit_online( env, buffer, n_steps=params.get("train_steps"), explorer= explorer, # you don't need this with probablistic policy algorithms tensorboard_dir=log_dir, eval_env=eval_env) print("Saving Model") dqn.save_model(exp_name) print("convert buffer to dataset") dataset = buffer.to_mdp_dataset() # save MDPDataset dataset.dump('{0}.h5'.format(exp_name)) print("Loading Dataset for Offline Training") dataset = d3rlpy.dataset.MDPDataset.load('{0}.h5'.format(exp_name)) train_episodes, test_episodes = train_test_split(dataset, test_size=0.2) # The dataset can then be used to train a d3rlpy model cql = DiscreteCQL(learning_rate=6.25e-05, encoder_factory='default', q_func_factory='mean', batch_size=32, n_frames=1, n_steps=1, gamma=0.99, n_critics=1, bootstrap=False, share_encoder=False, target_reduction_type='min', target_update_interval=8000, use_gpu=True, scaler=None, augmentation=None, generator=None, impl=None) cql_exp = params.get("model_name") + "_offline_" + params.get( "environment") cql_log = '../../../logs/' + cql_exp cql.fit(dataset.episodes, eval_episodes=test_episodes, n_epochs=1000, scorers={ 'environment': evaluate_on_environment(env, epsilon=0.05), 'td_error': td_error_scorer, 'discounted_advantage': discounted_sum_of_advantage_scorer, 'value_scale': average_value_estimation_scorer, }, tensorboard_dir=cql_log) cql.save_model(cql_exp)