def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=100000, exploration_fraction=0.1, exploration_final_eps=0.1, train_freq=1, batch_size=64, print_freq=1, eval_freq=2500, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, csv_path="results.csv", method_type="baseline", **network_kwargs): """Train a deepr model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepr.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepr/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) #q_func = build_q_func(network, **network_kwargs) q_func = build_q_func(mlp(num_layers=4, num_hidden=64), **network_kwargs) #q_func = build_q_func(mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu), **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * total_timesteps), #initial_p=1.0, initial_p=exploration_final_eps, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() eval_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) csvfile = open(csv_path, 'w', newline='') fieldnames = ['STEPS', 'REWARD'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for t in range(total_timesteps + 1): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: #update_eps = exploration.value(t) update_eps = exploration_final_eps update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action_mask = get_mask(env, method_type) a = act(np.array(obs)[None], unused_actions_neginf_mask=action_mask, update_eps=update_eps, **kwargs)[0] env_action = a reset = False new_obs, rew, done, _ = env.step(env_action) eval_rewards[-1] += rew action_mask_p = get_mask(env, method_type) # Shaping if method_type == 'shaping': ## look-ahead shaping ap = act(np.array(new_obs)[None], unused_actions_neginf_mask=action_mask_p, stochastic=False)[0] f = action_mask_p[ap] - action_mask[a] rew = rew + f # Store transition in the replay buffer. #replay_buffer.add(obs, a, rew, new_obs, float(done), action_mask_p) if method_type != 'shaping': replay_buffer.add(obs, a, rew, new_obs, float(done), np.zeros(env.action_space.n)) else: replay_buffer.add(obs, a, rew, new_obs, float(done), action_mask_p) obs = new_obs if t % eval_freq == 0: eval_rewards.append(0.0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, masks_tp1 = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights, masks_tp1) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_eval_reward = round(np.mean(eval_rewards[-1 - print_freq:-1]), 1) num_evals = len(eval_rewards) if t > 0 and t % eval_freq == 0 and print_freq is not None and t % ( print_freq * eval_freq) == 0: #if done and print_freq is not None and len(eval_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("evals", num_evals) logger.record_tabular("average reward in this eval", mean_eval_reward / (eval_freq)) logger.record_tabular("total reward in this eval", mean_eval_reward) logger.dump_tabular() writer.writerow({ "STEPS": t, "REWARD": mean_eval_reward / (eval_freq) }) csvfile.flush() if (checkpoint_freq is not None and t > learning_starts and num_evals > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_eval_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_eval_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_eval_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env, network, seed=None, lr=5e-5, total_timesteps=100000, buffer_size=500000, exploration_fraction=0.1, exploration_final_eps=0.01, train_freq=1, batch_size=32, print_freq=10, checkpoint_freq=100000, checkpoint_path=None, learning_starts=0, gamma=0.99, target_network_update_freq=10000, prioritized_replay=True, prioritized_replay_alpha=0.4, prioritized_replay_beta0=0.6, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-3, param_noise=False, callback=None, load_path=None, load_idx=None, demo_path=None, n_step=10, demo_prioritized_replay_eps=1.0, pre_train_timesteps=750000, epsilon_schedule="constant", **network_kwargs): # Create all the functions necessary to train the model set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) with tf.device('/GPU:0'): model = DQfD(q_func=q_func, observation_shape=env.observation_space.shape, num_actions=env.action_space.n, lr=lr, grad_norm_clipping=10, gamma=gamma, param_noise=param_noise) # Load model from checkpoint if load_path is not None: load_path = osp.expanduser(load_path) ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None) if load_idx is None: ckpt.restore(manager.latest_checkpoint) print("Restoring from {}".format(manager.latest_checkpoint)) else: ckpt.restore(manager.checkpoints[load_idx]) print("Restoring from {}".format(manager.checkpoints[load_idx])) # Setup demo trajectory assert demo_path is not None with open(demo_path, "rb") as f: trajectories = pickle.load(f) # Create the replay buffer replay_buffer = PrioritizedReplayBuffer(buffer_size, prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) temp_buffer = deque(maxlen=n_step) is_demo = True for epi in trajectories: for obs, action, rew, new_obs, done in epi: obs, new_obs = np.expand_dims( np.array(obs), axis=0), np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.demo_len += 1 replay_buffer.add(*n_step_sample) else: replay_buffer.demo_len += 1 replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), float(is_demo)) logger.log("trajectory length:", replay_buffer.demo_len) # Create the schedule for exploration if epsilon_schedule == "constant": exploration = ConstantSchedule(exploration_final_eps) else: # not used exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) model.update_target() # ============================================== pre-training ====================================================== start = time() num_episodes = 0 temp_buffer = deque(maxlen=n_step) for t in tqdm(range(pre_train_timesteps)): # sample and train experience = replay_buffer.sample(batch_size, beta=prioritized_replay_beta0) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) # Update priorities new_priorities = np.abs(td_errors) + np.abs( n_td_errors) + demo_prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # Update target network periodically if t > 0 and t % target_network_update_freq == 0: model.update_target() # Logging elapsed_time = timedelta(time() - start) if print_freq is not None and t % 10000 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", 0) logger.record_tabular("max 100 episode reward", 0) logger.record_tabular("min 100 episode reward", 0) logger.record_tabular("demo sample rate", 1) logger.record_tabular("epsilon", 0) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", True) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() # ============================================== exploring ========================================================= sample_counts = 0 demo_used_counts = 0 episode_rewards = deque(maxlen=100) this_episode_reward = 0. best_score = 0. saved_mean_reward = None is_demo = False obs = env.reset() # Always mimic the vectorized env obs = np.expand_dims(np.array(obs), axis=0) reset = True for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = tf.constant(exploration.value(t)) update_param_noise_threshold = 0. else: # not used update_eps = tf.constant(0.) update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action, epsilon, _, _ = model.step(tf.constant(obs), update_eps=update_eps, **kwargs) action = action[0].numpy() reset = False new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. new_obs = np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.add(*n_step_sample) else: replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), 0.) obs = new_obs # invert log scaled score for logging this_episode_reward += np.sign(rew) * (np.exp(np.sign(rew) * rew) - 1.) if done: num_episodes += 1 obs = env.reset() obs = np.expand_dims(np.array(obs), axis=0) episode_rewards.append(this_episode_reward) reset = True if this_episode_reward > best_score: best_score = this_episode_reward ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, './best_model', max_to_keep=1) manager.save(t) logger.log("saved best model") this_episode_reward = 0.0 if t % train_freq == 0: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) new_priorities = np.abs(td_errors) + np.abs( n_td_errors ) + demo_prioritized_replay_eps * is_demos + prioritized_replay_eps * ( 1. - is_demos) replay_buffer.update_priorities(batch_idxes, new_priorities) # for logging sample_counts += batch_size demo_used_counts += np.sum(is_demos) if t % target_network_update_freq == 0: # Update target network periodically. model.update_target() if t % checkpoint_freq == 0: save_path = checkpoint_path ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, save_path, max_to_keep=10) manager.save(t) logger.log("saved checkpoint") elapsed_time = timedelta(time() - start) if done and num_episodes > 0 and num_episodes % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", np.mean(episode_rewards)) logger.record_tabular("max 100 episode reward", np.max(episode_rewards)) logger.record_tabular("min 100 episode reward", np.min(episode_rewards)) logger.record_tabular("demo sample rate", demo_used_counts / sample_counts) logger.record_tabular("epsilon", epsilon.numpy()) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", False) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() return model
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, train_mode = True, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Examine environment parameters print(str(env)) # Set the default brain to work with default_brain = env.brain_names[0] brain = env.brains[default_brain] num_actions=brain.vector_action_space_size[0] # Create all the functions necessary to train the model sess = get_session() #set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph #observation_space = env.observation_space env_info = env.reset(train_mode=train_mode)[default_brain] state = get_obs_state_lidar(env_info) observation_space=state.copy() #def make_obs_ph(name,Num_action): # tf.placeholder(shape=(None,) + state.shape, dtype=state.dtype, name='st') # return tf.placeholder(tf.float32, shape = [None, Num_action],name=name) def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug =build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act