def __init__(self, network, obs_dim, num_actions, gamma=0.9, lam=0.95, reuse=None): self.num_actions = num_actions self.gamma = gamma self.lam = lam self.t = 0 self.obss = [] self.actions = [] self.rewards = [] self.values = [] self.next_values = [] act, train, update_old, backup_current = build_graph.build_train( network=network, obs_dim=obs_dim, num_actions=num_actions, gamma=gamma, reuse=reuse) self._act = act self._train = train self._update_old = update_old self._backup_current = backup_current
def __init__(self, actor, critic, obs_dim, num_actions,replay_buffer, batch_size=4, sequence_length=8, episode_update=True, gamma=0.9): self.batch_size = batch_size self.sequence_length = sequence_length self.episode_update = episode_update self.num_actions = num_actions self.gamma = gamma self.obs_dim = obs_dim self.last_obs = None self.t = 0 self.replay_buffer = replay_buffer self.actor_lstm_state = np.zeros((2, 1, 64), dtype=np.float32) self._act,\ self._train_actor,\ self._train_critic,\ self._update_actor_target,\ self._update_critic_target = build_graph.build_train( actor=actor, critic=critic, obs_dim=obs_dim, num_actions=num_actions, batch_size=batch_size, gamma=gamma )
def __init__(self, actor, critic, obs_dim, num_actions, replay_buffer, batch_size=16, gamma=0.9): self.batch_size = batch_size self.num_actions = num_actions self.gamma = gamma self.last_obs = None self.t = 0 self.exploration = 3 self.replay_buffer = replay_buffer self._act,\ self._train_actor,\ self._train_critic,\ self._update_actor_target,\ self._update_critic_target = build_graph.build_train( actor=actor, critic=critic, obs_dim=obs_dim, num_actions=num_actions, gamma=gamma )
def __init__(self, config, env_creator): self.config = config self.local_timestep = 0 self.episode_rewards = [0.0] self.episode_lengths = [0.0] if "cartpole" in self.config["env_config"]: self.env = env_creator(self.config["env_config"]) else: self.env = wrap_deepmind( env_creator(self.config["env_config"]), clip_rewards=False, frame_stack=True, scale=True) self.obs = self.env.reset() self.sess = U.make_session() self.sess.__enter__() # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = self.env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) if "cartpole" in self.config["env_config"]: q_func = models.mlp([64]) else: q_func = models.cnn_to_mlp( convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], hiddens=[256], dueling=True, ) act, self.train, self.update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=self.env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=self.config["lr"]), gamma=self.config["gamma"], grad_norm_clipping=10, param_noise=False ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': self.env.action_space.n, } self.act = ActWrapper(act, act_params) # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule( schedule_timesteps=int(self.config["exploration_fraction"] * self.config["schedule_max_timesteps"]), initial_p=1.0, final_p=self.config["exploration_final_eps"]) # Initialize the parameters and copy them to the target network. U.initialize() self.update_target()
def __init__(self, network, actions, optimizer, nenvs, gamma=0.9, lstm_unit=256, time_horizon=128, policy_factor=1.0, value_factor=0.5, entropy_factor=0.01, epsilon=0.2, lam=0.95, state_shape=[84, 84, 1], phi=lambda s: s, continuous=False, name='ppo'): self.actions = actions self.gamma = gamma self.lam = lam self.name = name self.nenvs = nenvs self.time_horizon = time_horizon self.state_shape = state_shape self.phi = phi self.continuous = continuous self._act,\ self._train,\ self._update_old,\ self._backup_current = build_graph.build_train( network=network, num_actions=num_actions, optimizer=optimizer, nenvs=nenvs, lstm_unit=lstm_unit, state_shape=state_shape, value_factor=value_factor, policy_factor=policy_factor, entropy_factor=entropy_factor, epsilon=epsilon, gamma=gamma, reuse=reuse, scope=name ) self.initial_state = np.zeros((nenvs, lstm_unit), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state self.last_obs = None self.last_action = None self.last_value = None self.rollouts = [Rollout() for _ in range(nenvs)] self.t = 0
def __init__(self, network, dnds, actions, state_shape, replay_buffer, exploration, constants, phi=lambda s: s, run_options=None, run_metadata=None): self.actions = actions self.num_actions = len(actions) self.replay_buffer = replay_buffer self.exploration = exploration self.constants = constants self.dnds = dnds self.phi = phi self.cache = Cache(constants.N_STEP, constants.GAMMA) self.last_obs = None self.t = 0 self.t_in_episode = 0 # TODO: remove self.run_options = run_options self.run_metadata = run_metadata if constants.OPTIMIZER == 'adam': optimizer = tf.train.AdamOptimizer(constants.LR) else: optimizer = tf.train.RMSPropOptimizer(learning_rate=constants.LR, momentum=constants.MOMENTUM, epsilon=constants.EPSILON) self._act,\ self._write,\ self._train = build_graph.build_train( encode=network, num_actions=self.num_actions, state_shape=state_shape, optimizer=optimizer, dnds=self.dnds, key_size=constants.DND_KEY_SIZE, grad_clipping=constants.GRAD_CLIPPING, run_options=self.run_options, run_metadata=self.run_metadata )
def __init__(self, model, actions, optimizer, gamma=0.99, lstm_unit=256, time_horizon=5, policy_factor=1.0, value_factor=0.5, entropy_factor=0.01, grad_clip=40.0, state_shape=[84, 84, 1], phi=lambda s: s, name='global'): self.actions = actions self.gamma = gamma self.name = name self.time_horizon = time_horizon self.state_shape = state_shape self.phi = phi self._act,\ self._train,\ self._update_local = build_graph.build_train( model=model, num_actions=len(actions), optimizer=optimizer, lstm_unit=lstm_unit, state_shape=state_shape, grad_clip=grad_clip, policy_factor=policy_factor, value_factor=value_factor, entropy_factor=entropy_factor, scope=name ) self.initial_state = np.zeros((1, lstm_unit), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state self.last_obs = None self.last_action = None self.last_value = None self.rollout = Rollout() self.t = 0
def __init__(self, model, dnds, num_actions, name='global', lr=2.5e-4, gamma=0.99, plotter=None): self.num_actions = num_actions self.gamma = gamma self.t = 0 self.name = name self.dnds = dnds self.plotter = plotter act, train, update_local, action_dist, state_value = build_graph.build_train( model=model, dnds=dnds, num_actions=num_actions, optimizer=tf.train.RMSPropOptimizer(learning_rate=7e-4, decay=.99, epsilon=0.1), scope=name ) self._act = act self._train = train self._update_local = update_local self._action_dist = action_dist self._state_value = state_value self.initial_state = np.zeros((1, 258), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state self.last_obs = None self.last_reward = None self.last_action = None self.last_value = None self.states = [] self.rewards = [] self.actions = [] self.values = [] self.encodes = [] self.rotations = [] self.movements = [] self.positions = [] self.directions = [] self.position_changes = [] self.pos_track = PositionTrack()
def __init__(self, model, icm_model, num_actions, name='global', lr=2.5e-4, gamma=0.99): self.num_actions = num_actions self.gamma = gamma self.t = 0 self.name = name act, train, update_local, state_value, bonus = build_graph.build_train( model=model, icm_model=icm_model, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=1e-4), scope=name) self._act = act self._train = train self._update_local = update_local self._state_value = state_value self._bonus = bonus self.initial_state = np.zeros((1, 256), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state self.last_obs = None self.last_reward = None self.last_action = None self.last_value = None self.states = [] self.next_states = [] self.rewards = [] self.actions = [] self.values = []
def __init__(self, q_func, actions, state_shape, replay_buffer, exploration, optimizer, gamma, grad_norm_clipping, phi=lambda s: s, batch_size=32, train_freq=4, learning_starts=1e4, target_network_update_freq=1e4): self.batch_size = batch_size self.train_freq = train_freq self.actions = actions self.learning_starts = learning_starts self.target_network_update_freq = target_network_update_freq self.exploration = exploration self.replay_buffer = replay_buffer self.phi = phi self._act,\ self._train,\ self._update_target,\ self._q_values = build_graph.build_train( q_func=q_func, num_actions=len(actions), state_shape=state_shape, optimizer=optimizer, gamma=gamma, grad_norm_clipping=grad_norm_clipping ) self.last_obs = None self.t = 0
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=100000, exploration_fraction=0.1, exploration_final_eps=0.1, train_freq=1, batch_size=64, print_freq=1, eval_freq=2500, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, csv_path="results.csv", method_type="baseline", **network_kwargs): """Train a deepr model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepr.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepr/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) #q_func = build_q_func(network, **network_kwargs) q_func = build_q_func(mlp(num_layers=4, num_hidden=64), **network_kwargs) #q_func = build_q_func(mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu), **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * total_timesteps), #initial_p=1.0, initial_p=exploration_final_eps, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() eval_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) csvfile = open(csv_path, 'w', newline='') fieldnames = ['STEPS', 'REWARD'] writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for t in range(total_timesteps + 1): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: #update_eps = exploration.value(t) update_eps = exploration_final_eps update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action_mask = get_mask(env, method_type) a = act(np.array(obs)[None], unused_actions_neginf_mask=action_mask, update_eps=update_eps, **kwargs)[0] env_action = a reset = False new_obs, rew, done, _ = env.step(env_action) eval_rewards[-1] += rew action_mask_p = get_mask(env, method_type) # Shaping if method_type == 'shaping': ## look-ahead shaping ap = act(np.array(new_obs)[None], unused_actions_neginf_mask=action_mask_p, stochastic=False)[0] f = action_mask_p[ap] - action_mask[a] rew = rew + f # Store transition in the replay buffer. #replay_buffer.add(obs, a, rew, new_obs, float(done), action_mask_p) if method_type != 'shaping': replay_buffer.add(obs, a, rew, new_obs, float(done), np.zeros(env.action_space.n)) else: replay_buffer.add(obs, a, rew, new_obs, float(done), action_mask_p) obs = new_obs if t % eval_freq == 0: eval_rewards.append(0.0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, masks_tp1 = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights, masks_tp1) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_eval_reward = round(np.mean(eval_rewards[-1 - print_freq:-1]), 1) num_evals = len(eval_rewards) if t > 0 and t % eval_freq == 0 and print_freq is not None and t % ( print_freq * eval_freq) == 0: #if done and print_freq is not None and len(eval_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("evals", num_evals) logger.record_tabular("average reward in this eval", mean_eval_reward / (eval_freq)) logger.record_tabular("total reward in this eval", mean_eval_reward) logger.dump_tabular() writer.writerow({ "STEPS": t, "REWARD": mean_eval_reward / (eval_freq) }) csvfile.flush() if (checkpoint_freq is not None and t > learning_starts and num_evals > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_eval_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_eval_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_eval_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def __init__(self, model, num_actions, nenvs, lr, epsilon, gamma=0.99, lam=0.95, lstm_unit=256, value_factor=0.5, entropy_factor=0.01, time_horizon=128, batch_size=32, epoch=3, grad_clip=40.0, state_shape=[84, 84, 1], phi=lambda s: s, use_lstm=False, continuous=False, upper_bound=1.0, name='ppo', training=True): self.num_actions = num_actions self.gamma = gamma self.lam = lam self.lstm_unit = lstm_unit self.name = name self.state_shape = state_shape self.nenvs = nenvs self.lr = lr self.epsilon = epsilon self.time_horizon = time_horizon self.batch_size = batch_size self.epoch = epoch self.phi = phi self.use_lstm = use_lstm self.continuous = continuous self.upper_bound = upper_bound self.episode_experience = [] self.all_experience = [] self.ep_count = 0 self._act, self._train = build_train( model=model, num_actions=num_actions, lr=lr.get_variable(), epsilon=epsilon.get_variable(), nenvs=nenvs, step_size=batch_size, lstm_unit=lstm_unit, state_shape=state_shape, grad_clip=grad_clip, value_factor=value_factor, entropy_factor=entropy_factor, continuous=continuous, scope=name ) self.initial_state = np.zeros((nenvs, lstm_unit*2), np.float32) self.rnn_state = self.initial_state self.state_tm1 = dict(obs=None, action=None, value=None, log_probs=None, done=None, rnn_state=None) self.rollouts = [Rollout() for _ in range(nenvs)] self.t = 0 self.training = training
def learn(env, q_func, lr=5e-4, lr_decay_factor = 0.99, lr_growth_factor = 1.01, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path = None, learning_starts=1000, gamma=0.9, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, callback=None, varTH=1e-05, noise = 0.0, epoch_steps=20000, alg='adfq', gpu_memory=1.0, act_policy='egreedy', save_dir='.', nb_test_steps = 10000, scope = 'deepadfq', test_eps = 0.05, init_t = 0, render = False, map_name = None, num_targets = 1, im_size=None, ): """Train a deepadfq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. varTH : variance threshold noise : noise for stochastic cases alg : 'adfq' or 'adfq-v2' gpu_memory : a fraction of a gpu memory when running multiple programs in the same gpu act_policy : action policy, 'egreedy' or 'bayesian' save_dir : path for saving results nb_test_steps : step bound in evaluation Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines0/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model config = tf.ConfigProto(allow_soft_placement=True) config.gpu_options.per_process_gpu_memory_fraction = gpu_memory config.gpu_options.polling_inactive_delay_msecs = 25 sess = tf.Session(config=config) sess.__enter__() num_actions=env.action_space.n varTH = np.float32(varTH) adfq_func = posterior_adfq if alg == 'adfq' else posterior_adfq_v2 # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph # observation_space_shape = env.observation_space.shape observation_space_shape = env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) act, act_test, q_target_vals, train, update_target, lr_decay_op, lr_growth_op = build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer_f=tf.train.AdamOptimizer, gamma=gamma, grad_norm_clipping=10, varTH=varTH, act_policy=act_policy, scope=scope, test_eps=test_eps, learning_rate = lr, learning_rate_decay_factor = lr_decay_factor, learning_rate_growth_factor = lr_growth_factor, ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None timelimit_env = env while( not hasattr(timelimit_env, '_elapsed_steps')): timelimit_env = timelimit_env.env if timelimit_env.env.spec: env_id = timelimit_env.env.spec.id else: env_id = timelimit_env.env.id obs = env.reset() reset = True num_eps = 0 # recording records = {'q_mean':[], 'q_sd':[], 'loss':[], 'online_reward':[], 'test_reward':[], 'learning_rate':[], 'time':[], 'eval_value':[]} with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_saved = False model_file = os.path.join(td, "model") if tf.train.latest_checkpoint(td) is not None: load_state(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True learning_starts += init_t ep_losses, ep_means, ep_sds, losses, means, sds = [], [], [], [], [], [] ep_mean_err, ep_sd_err, mean_errs, sd_errs = [], [], [], [] checkpt_loss = [] curr_lr = lr s_time = time.time() print("===== LEARNING STARTS =====") for t in range(init_t,max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} update_eps = exploration.value(t) action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, info = env.step(env_action) # Store transition in the replay buffer. if timelimit_env._elapsed_steps < timelimit_env._max_episode_steps: replay_buffer.add(obs, action, rew, new_obs, float(done)) else: replay_buffer.add(obs, action, rew, new_obs, float(not done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() reset = True num_eps += 1 episode_rewards.append(0.0) if losses: ep_losses.append(np.mean(losses)) ep_means.append(np.mean(means)) ep_sds.append(np.mean(sds)) losses, means, sds = [], [], [] ep_mean_err.append(np.mean(mean_errs)) ep_sd_err.append(np.mean(sd_errs)) mean_errs , sd_errs = [], [] if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None stats_t = q_target_vals(obses_t)[0] stats_tp1 = q_target_vals(obses_tp1)[0] ind = np.arange(batch_size) mean_t = stats_t[ind,actions.astype(int)] sd_t = np.exp(-stats_t[ind, actions.astype(int)+num_actions]) mean_tp1 = stats_tp1[:,:num_actions] sd_tp1 = np.exp(-stats_tp1[:,num_actions:]) var_t = np.maximum(varTH, np.square(sd_t)) var_tp1 = np.maximum(varTH, np.square(sd_tp1)) target_mean, target_var, _ = adfq_func(mean_tp1, var_tp1, mean_t, var_t, rewards, gamma, terminal=dones, asymptotic=False, batch=True, noise=noise, varTH = varTH) target_mean = np.reshape(target_mean, (-1)) target_sd = np.reshape(np.sqrt(target_var), (-1)) loss, m_err, s_err, curr_lr = train(obses_t, actions, target_mean, target_sd, weights) losses.append(loss) means.append(np.mean(mean_tp1)) sds.append(np.mean(sd_tp1)) mean_errs.append(np.mean(np.abs(m_err))) sd_errs.append(np.mean(np.abs(s_err))) if prioritized_replay: new_priorities = np.abs(m_err) + np.abs(s_err) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if render: env.render(traj_num=num_eps) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if (t+1) % (epoch_steps/2) == 0 and (t+1) > learning_starts: if ep_losses: mean_loss = np.float16(np.mean(ep_losses)) if len(checkpt_loss) > 2 and mean_loss > np.float16(max(checkpt_loss[-3:])) and lr_decay_factor < 1.0: sess.run(lr_decay_op) print("Learning rate decayed due to an increase in loss: %.4f -> %.4f"%(np.float16(max(checkpt_loss[-3:])),mean_loss)) elif len(checkpt_loss) > 2 and mean_loss < np.float16(min(checkpt_loss[-3:])) and lr_growth_factor > 1.0: sess.run(lr_growth_op) print("Learning rate grown due to a decrease in loss: %.4f -> %.4f"%( np.float16(min(checkpt_loss[-3:])),mean_loss)) checkpt_loss.append(mean_loss) if (t+1) % epoch_steps == 0 and (t+1) > learning_starts: records['time'].append(time.time() - s_time) test_reward, eval_value = test(env_id, act_test, nb_test_steps=nb_test_steps, map_name=map_name, num_targets=num_targets) records['test_reward'].append(test_reward) records['eval_value'].append(eval_value) records['q_mean'].append(np.mean(ep_means)) records['q_sd'].append(np.mean(ep_sds)) records['loss'].append(np.mean(ep_losses)) records['online_reward'].append(round(np.mean(episode_rewards[-101:-1]), 1)) records['learning_rate'].append(curr_lr) pickle.dump(records, open(os.path.join(save_dir,"records.pkl"),"wb")) print("==== EPOCH %d ==="%((t+1)/epoch_steps)) print(tabulate([[k,v[-1]] for (k,v) in records.items()])) s_time = time.time() if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("averaged loss", np.mean(ep_losses[-print_freq:])) logger.record_tabular("averaged output mean", np.mean(ep_means[-print_freq:])) logger.record_tabular("averaged output sd", np.mean(ep_sds[-print_freq:])) logger.record_tabular("averaged error mean", np.mean(ep_mean_err[-print_freq:])) logger.record_tabular("averaged error sds", np.mean(ep_sd_err[-print_freq:])) logger.record_tabular("learning rate", curr_lr) logger.dump_tabular() if (checkpoint_freq is not None and (t+1) > learning_starts and (t+1) % checkpoint_freq == 0): #num_episodes > 100 and print("Saving model to model_%d.pkl"%(t+1)) act.save(os.path.join(save_dir,"model_"+str(t+1)+".pkl")) if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_state(model_file) return act, records
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, param_noise=False, callback=None, tf_log_dir=None, tf_flush_freq=100, tf_model_freq=10000 ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() # inject some Tensorboard usage. tf_summary_writer = tf.summary.FileWriter('{}/summary'.format(tf_log_dir)) if tf_log_dir is not None else None tf_saver = tf.train.Saver(max_to_keep=10) if tf_log_dir is not None else None update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") print('====', model_file) for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] reset = False new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done and tf_summary_writer is not None: summary = tf.Summary() summary.value.add(tag='info/episode_reward', simple_value=float(episode_rewards[-1])) summary.value.add(tag='info/esp', simple_value=float(update_eps)) tf_summary_writer.add_summary(summary, t) if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if tf_summary_writer is not None: summary = tf.Summary() summary.value.add(tag='model/loss', simple_value=float(td_errors[0])) # TODO: mean the loss tf_summary_writer.add_summary(summary, t) if t % tf_flush_freq == 0: tf_summary_writer.flush() if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) logger.log("Saving model path: {}".format(model_file)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if tf_saver is not None and t % tf_model_freq == 0: assert tf_log_dir is not None tf_saver.save(sess=sess, save_path='{}/model/model'.format(tf_log_dir), global_step=t) if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
def learn( env, q_func, lr=5e-4, lr_decay_factor=0.99, lr_growth_factor=1.01, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=0.9, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, callback=None, scope='deepadfq', alg='adfq', sdMin=1e-5, sdMax=1e5, noise=0.0, act_policy='egreedy', epoch_steps=20000, eval_logger=None, save_dir='.', test_eps=0.05, init_t=0, gpu_memory=1.0, render=False, **kwargs, ): """Train a deepadfq model. Parameters ------- env: gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: float update the target network every `target_network_update_freq` steps. If it is less than 1, it performs the soft target network update with the given rate. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. scope : str scope of the network. alg : str 'adfq' or 'adfq-v2'. sdMin, sdMix : float The minimum and maximum values for the standard deviations. noise : flot noise for stochastic cases. act_policy : str action policy, 'egreedy' or 'bayesian'. epoch_step : int the number of steps per epoch. eval_logger : Logger() the Logger() class object under deep_adfq folder. save_dir : str path for saving results. test_eps : float epsilon of the epsilon greedy action policy during testing. init_t : int an initial learning step if you start training from a pre-trained model. gpu_memory : float a fraction of a gpu memory when running multiple programs in the same gpu. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines0/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model config = tf.compat.v1.ConfigProto(allow_soft_placement=True) config.gpu_options.per_process_gpu_memory_fraction = gpu_memory config.gpu_options.polling_inactive_delay_msecs = 25 sess = tf.compat.v1.Session(config=config) sess.__enter__() num_actions = env.action_space.n adfq_func = posterior_adfq if alg == 'adfq' else posterior_adfq_v2 # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph # observation_space_shape = env.observation_space.shape observation_space_shape = env.observation_space.shape def make_obs_ph(name): return BatchInput(observation_space_shape, name=name) target_network_update_rate = np.minimum(target_network_update_freq, 1.0) target_network_update_freq = np.maximum(target_network_update_freq, 1.0) act, act_test, q_target_vals, train, update_target, lr_decay_op, lr_growth_op = build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer_f=tf.compat.v1.train.AdamOptimizer, grad_norm_clipping=10, sdMin=sdMin, sdMax=sdMax, act_policy=act_policy, scope=scope, test_eps=test_eps, lr_init=lr, lr_decay_factor=lr_decay_factor, lr_growth_factor=lr_growth_factor, reuse=tf.compat.v1.AUTO_REUSE, tau=target_network_update_rate, ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) file_writer = tf.compat.v1.summary.FileWriter(save_dir, sess.graph) U.initialize() update_target() saved_mean_reward = None timelimit_env = env while (not hasattr(timelimit_env, '_elapsed_steps')): timelimit_env = timelimit_env.env obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_saved = False model_file = os.path.join(td, "model") if tf.train.latest_checkpoint(td) is not None: load_state(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True learning_starts += init_t checkpt_loss = [] eval_logger.log_epoch(act_test) for t in range(init_t, max_timesteps): if callback is not None and callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs_act = {} update_eps = exploration.value(t) action = act(np.array(obs)[None], update_eps=update_eps, **kwargs_act)[0] env_action = action reset = False new_obs, rew, done, info = env.step(env_action) # Store transition in the replay buffer. if timelimit_env._elapsed_steps < timelimit_env._max_episode_steps: replay_buffer.add(obs, action, rew, new_obs, float(done)) else: replay_buffer.add(obs, action, rew, new_obs, float(not done)) obs = new_obs eval_logger.log_reward(rew) if done: obs = env.reset() reset = True eval_logger.log_ep(info) if t > learning_starts and (t + 1) % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None stats_t = q_target_vals(obses_t)[0] stats_tp1 = q_target_vals(obses_tp1)[0] ind = np.arange(batch_size) mean_t = stats_t[ind, actions.astype(int)] sd_t = np.exp(-np.clip( stats_t[ind, actions.astype(int) + num_actions], -np.log(sdMax), -np.log(sdMin))) mean_tp1 = stats_tp1[:, :num_actions] sd_tp1 = np.exp(-np.clip(stats_tp1[:, num_actions:], -np.log(sdMax), -np.log(sdMin))) target_mean, target_var, _ = adfq_func(mean_tp1, np.square(sd_tp1), mean_t, np.square(sd_t), rewards, gamma, terminal=dones, asymptotic=False, batch=True, noise=noise, varTH=sdMin * sdMin) target_mean = np.reshape(target_mean, (-1)) target_sd = np.reshape(np.sqrt(target_var), (-1)) loss, m_err, s_err, summary = train(obses_t, actions, target_mean, target_sd, weights) file_writer.add_summary(summary, t) eval_logger.log_step(loss=loss) if prioritized_replay: new_priorities = np.abs(m_err) + np.abs( s_err) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if render: env.render() if t > learning_starts and (t + 1) % target_network_update_freq == 0: # Update target network periodically. update_target() if (t + 1) % epoch_steps == 0: eval_logger.log_epoch(act_test) if (checkpoint_freq is not None and t > learning_starts and (t + 1) % checkpoint_freq == 0 and eval_logger.get_num_episode() > 10): mean_loss = np.float16(np.mean(eval_logger.ep_history['loss'])) if len(checkpt_loss) > 2 and mean_loss > np.float16( max(checkpt_loss[-3:])) and lr_decay_factor < 1.0: sess.run(lr_decay_op) print( "Learning rate decayed due to an increase in loss: %.4f -> %.4f" % (np.float16(max(checkpt_loss[-3:])), mean_loss)) elif len(checkpt_loss) > 2 and mean_loss < np.float16( min(checkpt_loss[-3:])) and lr_growth_factor > 1.0: sess.run(lr_growth_op) print( "Learning rate grown due to a decrease in loss: %.4f -> %.4f" % (np.float16(min(checkpt_loss[-3:])), mean_loss)) checkpt_loss.append(mean_loss) # print("Saving model to model_%d.pkl"%(t+1)) # act.save(os.path.join(save_dir,"model_"+str(t+1)+".pkl")) mean_100ep_reward = eval_logger.get_100ep_reward() if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_state(model_file) eval_logger.finish(max_timesteps, epoch_steps, learning_starts) return act
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, train_mode = True, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batch sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Examine environment parameters print(str(env)) # Set the default brain to work with default_brain = env.brain_names[0] brain = env.brains[default_brain] num_actions=brain.vector_action_space_size[0] # Create all the functions necessary to train the model sess = get_session() #set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph #observation_space = env.observation_space env_info = env.reset(train_mode=train_mode)[default_brain] state = get_obs_state_lidar(env_info) observation_space=state.copy() #def make_obs_ph(name,Num_action): # tf.placeholder(shape=(None,) + state.shape, dtype=state.dtype, name='st') # return tf.placeholder(tf.float32, shape = [None, Num_action],name=name) def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug =build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(num_actions)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
def __init__(self, actions, optimizer, convs, fcs, padding, lstm, gamma=0.99, lstm_unit=256, time_horizon=5, policy_factor=1.0, value_factor=0.5, entropy_factor=0.01, grad_clip=40.0, state_shape=[84, 84, 1], buffer_size=2e3, rp_frame=3, phi=lambda s: s, name='global'): self.actions = actions self.gamma = gamma self.name = name self.time_horizon = time_horizon self.state_shape = state_shape self.rp_frame = rp_frame self.phi = phi self._act,\ self._train,\ self._update_local = build_graph.build_train( convs=convs, fcs=fcs, padding=padding, lstm=lstm, num_actions=len(actions), optimizer=optimizer, lstm_unit=lstm_unit, state_shape=state_shape, grad_clip=grad_clip, policy_factor=policy_factor, value_factor=value_factor, entropy_factor=entropy_factor, rp_frame=rp_frame, scope=name ) # rnn state variables self.initial_state = np.zeros((1, lstm_unit), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state # last state variables self.zero_state = np.zeros(state_shape, dtype=np.float32) self.initial_last_obs = [self.zero_state for _ in range(rp_frame)] self.last_obs = deque(self.initial_last_obs, maxlen=rp_frame) self.last_action = deque([0, 0], maxlen=2) self.value_tm1 = None self.reward_tm1 = 0.0 # buffers self.rollout = Rollout() self.buffer = ReplayBuffer(capacity=buffer_size) self.t = 0 self.t_in_episode = 0
def learn(env, q_func, alpha=1e-5, num_cpu=1, n_steps=100000, update_target_every=500, train_main_every=1, print_every=50, checkpoint_every=10000, buffer_size=50000, gamma=1.0, batch_size=32, param_noise=False, pre_run_steps=1000, exploration_fraction=0.1, final_epsilon=0.1, callback=None): """ :param env: gym.Env, environment from OpenAI :param q_func: (tf.Variable, int, str, bool) -> tf.Variable the q function takes the following inputs: input_ph: tf.placeholder, network input n_actions: int, number of possible actions scope: str, specifying the variable scope reuse: bool, whether to reuse the variable given in `scope` :param alpha: learning rate :param num_cpu: number of cpu to use :param n_steps: number of training steps :param update_target_every: frequency to update the target network :param train_main_every: frequency to update(train) the main network :param print_every: how often to print message to console :param checkpoint_every: how often to save the model. :param buffer_size: size of the replay buffer :param gamma: int, discount factor :param batch_size: int, size of the input batch :param param_noise: bool, whether to use parameter noise :param pre_run_steps: bool, pre-run steps to fill in the replay buffer. And only after `pre_run_steps` steps, will the main and target network begin to update. :param exploration_fraction: float, between 0 and 1. Fraction of the `n_steps` to linearly decrease the epsilon. After that, the epsilon will remain unchanged. :param final_epsilon: float, final epsilon value, usually a very small number towards zero. :param callback: (dict, dict) -> bool a function to decide whether it's time to stop training, takes following inputs: local_vars: dict, the local variables in the current scope global_vars: dict, the global variables in the current scope :return: ActWrapper, a callable function """ n_actions = env.action_space.n sess = U.make_session(num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) act, train, update_target, debug = build_train( make_obs_ph, q_func, n_actions, optimizer=tf.train.AdamOptimizer(alpha), gamma=gamma, param_noise=param_noise, grad_norm_clipping=10) act_params = { "q_func": q_func, "n_actions": env.action_space.n, "make_obs_ph": make_obs_ph, } buffer = ReplayBuffer(buffer_size) exploration = LinearSchedule(schedule_steps=int(exploration_fraction * n_steps), final_p=final_epsilon, initial_p=1.0) # writer = tf.summary.FileWriter("./log", sess.graph) U.initialize() # writer.close() update_target() # copy from the main network episode_rewards = [] current_episode_reward = 0.0 model_saved = False saved_mean_reward = 0.0 obs_t = env.reset() with tempfile.TemporaryDirectory() as td: model_file_path = os.path.join(td, "model") for step in range(n_steps): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: epsilon = exploration.value(step) else: assert False, "Not implemented" action = act(np.array(obs_t)[None], epsilon=epsilon, **kwargs)[0] obs_tp1, reward, done, _ = env.step(action) current_episode_reward += reward buffer.add(obs_t, action, reward, obs_tp1, done) obs_t = obs_tp1 if done: obs_t = env.reset() episode_rewards.append(current_episode_reward) current_episode_reward = 0.0 # given sometime to fill in the buffer if step < pre_run_steps: continue # q_value = debug["q_values"] # if step % 1000 == 0: # print(q_value(np.array(obs_t)[None])) if step % train_main_every == 0: obs_ts, actions, rewards, obs_tp1s, dones = buffer.sample( batch_size) weights = np.ones_like(dones) td_error = train(obs_ts, actions, rewards, obs_tp1s, dones, weights) if step % update_target_every == 0: update_target() mean_100eps_reward = float(np.mean(episode_rewards[-101:-1])) if done and print_every is not None and len( episode_rewards) % print_every == 0: print( "step %d, episode %d, epsilon %.2f, running mean reward %.2f" % (step, len(episode_rewards), epsilon, mean_100eps_reward)) if checkpoint_every is not None and step % checkpoint_every == 0: if saved_mean_reward is None or mean_100eps_reward > saved_mean_reward: U.save_state(model_file_path) model_saved = True if print_every is not None: print( "Dump model to file due to mean reward increase: %.2f -> %.2f" % (saved_mean_reward, mean_100eps_reward)) saved_mean_reward = mean_100eps_reward if model_saved: U.load_state(model_file_path) if print_every: print("Restore model from file with mean reward %.2f" % (saved_mean_reward, )) return ActWrapper(act, act_params)
def __init__(self, actions, optimizer, convs, fcs, padding, lstm, gamma=0.99, lstm_unit=256, time_horizon=5, policy_factor=1.0, value_factor=0.5, entropy_factor=0.01, grad_clip=40.0, state_shape=[84, 84, 1], buffer_size=2e3, rp_frame=3, phi=lambda s: s, name='global'): self.actions = actions self.gamma = gamma self.name = name self.time_horizon = time_horizon self.state_shape = state_shape self.rp_frame = rp_frame self.phi = phi self._act,\ self._train,\ self._update_local = build_graph.build_train( convs=convs, fcs=fcs, padding=padding, lstm=lstm, num_actions=len(actions), optimizer=optimizer, lstm_unit=lstm_unit, state_shape=state_shape, grad_clip=grad_clip, policy_factor=policy_factor, value_factor=value_factor, entropy_factor=entropy_factor, rp_frame=rp_frame, scope=name ) # rnn state variables self.initial_state = np.zeros((1, lstm_unit), np.float32) self.rnn_state0 = self.initial_state self.rnn_state1 = self.initial_state # last state variables self.zero_state = np.zeros(state_shape, dtype=np.float32) self.initial_last_obs = [self.zero_state for _ in range(rp_frame)] self.last_obs = deque(self.initial_last_obs, maxlen=rp_frame) self.last_action = deque([0, 0], maxlen=2) self.value_tm1 = None self.reward_tm1 = 0.0 # buffers self.rollout = Rollout() self.buffer = ReplayBuffer(capacity=buffer_size) self.t = 0 self.t_in_episode = 0
def learn(env, q_func, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16, callback=None): """Train a deepq model. Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput([84, 84], name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=2, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': 2, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.step(0) with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value action = act(np.array(obs)[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: #obs = env.reset() episode_rewards.append(0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: #logger.record_tabular("steps", t) #logger.record_tabular("episodes", num_episodes) #logger.record_tabular("mean 100 episode reward", mean_100ep_reward) #logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) #logger.dump_tabular() print("steps: {}".format(t)) print("episodes: {}".format(num_episodes)) print("mean 100 episode reward: {}".format(mean_100ep_reward)) print("% time spent exploring: {}".format( int(100 * exploration.value(t)))) if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: #if print_freq is not None: #logger.log("Saving model due to mean reward increase: {} -> {}".format( # saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: #if print_freq is not None: #logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return ActWrapper(act, act_params)
def learn(env_id, q_func, lr=5e-4, max_timesteps=10000, buffer_size=5000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, train_steps=10, learning_starts=500, batch_size=32, print_freq=10, checkpoint_freq=100, model_dir=None, gamma=1.0, target_network_update_freq=50, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, player_processes=None, player_connections=None): env, _, _ = create_gvgai_environment(env_id) # Create all the functions necessary to train the model # expert_decision_maker = ExpertDecisionMaker(env=env) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) session = tf.Session() session.__enter__() policy_path = os.path.join(model_dir, "Policy.pkl") model_path = os.path.join(model_dir, "model", "model") if os.path.isdir(os.path.join(model_dir, "model")): load_state(model_path) else: act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Initialize the parameters and copy them to the target network. U.initialize() update_target() act.save(policy_path) save_state(model_path) env.close() # Create the replay buffer if prioritized_replay: replay_buffer_path = os.path.join(model_dir, "Prioritized_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_path = os.path.join(model_dir, "Normal_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) episode_rewards = list() saved_mean_reward = -999999999 signal.signal(signal.SIGQUIT, signal_handler) global terminate_learning total_timesteps = 0 for timestep in range(max_timesteps): if terminate_learning: break for connection in player_connections: experiences, reward = connection.recv() episode_rewards.append(reward) for experience in experiences: replay_buffer.add(*experience) total_timesteps += 1 if total_timesteps < learning_starts: if timestep % 10 == 0: print("not strated yet", flush=True) continue if timestep % train_freq == 0: for i in range(train_steps): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(total_timesteps)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if timestep % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if print_freq is not None and timestep % print_freq == 0: logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular( "% time spent exploring", int(100 * exploration.value(total_timesteps))) logger.dump_tabular() if timestep % checkpoint_freq == 0 and mean_100ep_reward > saved_mean_reward: act.save(policy_path) save_state(model_path) saved_mean_reward = mean_100ep_reward with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) send_message_to_all(player_connections, Message.UPDATE) send_message_to_all(player_connections, Message.TERMINATE) if mean_100ep_reward > saved_mean_reward: act.save(policy_path) with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) for player_process in player_processes: player_process.join() # player_process.terminate() return act.load(policy_path)
def main(): # env = gym.make("CartPoleRob-v0") # env = gym.make("CartPole-v0") # env = gym.make("CartPole-v1") # env = gym.make("Acrobot-v1") # env = gym.make("MountainCarRob-v0") # env = gym.make("FrozenLake-v0") # env = gym.make("FrozenLake8x8-v0") env = gym.make("FrozenLake8x8nohole-v0") # robShape = (2,) # robShape = (3,) # robShape = (200,) # robShape = (16,) robShape = (64,) def make_obs_ph(name): # return U.BatchInput(env.observation_space.shape, name=name) return U.BatchInput(robShape, name=name) # # these params are specific to mountaincar # def getOneHotObs(obs): # obsFraction = (obs[0] + 1.2) / 1.8 # idx1 = np.int32(np.trunc(obsFraction*100)) # obsFraction = (obs[1] + 0.07) / 0.14 # idx2 = np.int32(np.trunc(obsFraction*100)) # ident = np.identity(100) # return np.r_[ident[idx1,:],ident[idx2,:]] # these params are specific to frozenlake def getOneHotObs(obs): # ident = np.identity(16) ident = np.identity(64) return ident[obs,:] model = models.mlp([32]) # model = models.mlp([64]) # model = models.mlp([64], layer_norm=True) # model = models.mlp([16, 16]) # parameters q_func=model lr=1e-3 # max_timesteps=100000 max_timesteps=50000 # max_timesteps=10000 buffer_size=50000 exploration_fraction=0.1 # exploration_fraction=0.3 exploration_final_eps=0.02 # exploration_final_eps=0.1 train_freq=1 batch_size=32 print_freq=10 checkpoint_freq=10000 learning_starts=1000 gamma=1.0 target_network_update_freq=500 # prioritized_replay=False prioritized_replay=True prioritized_replay_alpha=0.6 prioritized_replay_beta0=0.4 prioritized_replay_beta_iters=None prioritized_replay_eps=1e-6 num_cpu=16 # # try mountaincar w/ different input dimensions # inputDims = [50,2] sess = U.make_session(num_cpu) sess.__enter__() act, train, update_target, debug = build_graph.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10 ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() obs = getOneHotObs(obs) # with tempfile.TemporaryDirectory() as td: model_saved = False # model_file = os.path.join(td, "model") for t in range(max_timesteps): # Take action and update exploration to the newest value action = act(np.array(obs)[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) new_obs = getOneHotObs(new_obs) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() obs = getOneHotObs(obs) episode_rewards.append(0.0) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: # if done: print("steps: " + str(t) + ", episodes: " + str(num_episodes) + ", mean 100 episode reward: " + str(mean_100ep_reward) + ", % time spent exploring: " + str(int(100 * exploration.value(t)))) # if done and print_freq is not None and len(episode_rewards) % print_freq == 0: # logger.record_tabular("steps", t) # logger.record_tabular("episodes", num_episodes) # logger.record_tabular("mean 100 episode reward", mean_100ep_reward) # logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) # logger.dump_tabular() # sess num2avg = 20 rListAvg = np.convolve(episode_rewards,np.ones(num2avg))/num2avg plt.plot(rListAvg) # plt.plot(episode_rewards) plt.show() sess
def learn(env, q_func_dict, priorities, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, flat_decision_values=False, disable_dv=False, callback=None): # Create all the functions necessary to train the model gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) # sess = tf.Session() sess.__enter__() executor = ThreadPoolExecutor(max_workers=3) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space_shape = env.observation_space.shape def make_obs_ph(name): return U.BatchInput(observation_space_shape, name=name) objectives = env.env.get_objectives() act = {} train = {} update_target = {} debug = {} act_params = {} for ob in priorities: q_func = q_func_dict[ob] act[ob], train[ob], update_target[ob], debug[ob] = dqn_dv.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, double_q=True, grad_norm_clipping=10, scope=ob ) act_params[ob] = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, 'scope': ob, } multi_act = MultiActWrapper(act, act_params, priorities, env.action_space.n, disable_dv=disable_dv) replay_buffer = MultiObjectiveReplayBuffer(buffer_size, objectives) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() [update_target_fn() for update_target_fn in update_target.values()] episode_rewards = [0.0] objective_rewards = dict((k, [0.0]) for k in priorities) saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: model_saved = False model_file = os.path.join(td, "model") for t in range(max_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} update_eps = exploration.value(t) update_param_noise_threshold = 0. action, q_vals_sum, dvs, selected_dvs, extra_indicators = multi_act(np.array(obs)[None], update_eps=update_eps, **kwargs) if isinstance(env.action_space, gym.spaces.MultiBinary): env_action = np.zeros(env.action_space.n) env_action[action] = 1 else: env_action = action reset = False env.env.set_extra_indicators(extra_indicators) new_obs, rew, done, _ = env.step(env_action) rew_sum = sum(rew.values()) dv_rew = dict([(k, abs(v)) for k, v in rew.items()]) rew_with_bias = dict([(k, v + 0.1*rew_sum) for k, v in rew.items()]) # Store transition in the replay buffer. replay_buffer.add(obs, action, selected_dvs, rew_with_bias, dv_rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += np.sum(list(rew.values())) for ob in priorities: objective_rewards[ob][-1] += rew[ob] if done: obs = env.reset() episode_rewards.append(0.0) for ob in priorities: objective_rewards[ob].append(0.0) reset = True if t > learning_starts and t % train_freq == 0: obses_t, actions, dvs, rewards, dv_rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = {}, {} for ob in priorities: weights[ob], batch_idxes[ob] = np.ones_like(rewards[ob]), None train_threads = [] td_errors = {} def train_wrap(ob, session, args): with session.as_default(): td_error = train[ob](*args) return td_error for ob in priorities: args = (obses_t, actions, dvs[ob], rewards[ob], dv_rewards[ob], obses_tp1, dones, weights[ob]) train_wrap(ob, tf.get_default_session(), args) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. for ob in priorities: update_target[ob]() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) mean_5ep_reward = round(np.mean(episode_rewards[-6:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("mean 5 episode reward", mean_5ep_reward) for ob in priorities: obj_mean = round(np.mean(objective_rewards[ob][-6:-1]), 1) logger.record_tabular(ob + " mean 5ep", obj_mean) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) U.save_state(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) U.load_state(model_file) return multi_act