class A3CTrainingThread(CommonWorker): """Asynchronous Actor-Critic Training Thread Class.""" log_interval = 100 perf_log_interval = 1000 local_t_max = 20 entropy_beta = 0.01 gamma = 0.99 shaping_actions = -1 # -1 all actions, 0 exclude noop transformed_bellman = False clip_norm = 0.5 use_grad_cam = False use_sil = False log_idx = 0 reward_constant = 0 def __init__(self, thread_index, global_net, local_net, initial_learning_rate, learning_rate_input, grad_applier, device=None, no_op_max=30): """Initialize A3CTrainingThread class.""" assert self.action_size != -1 self.is_sil_thread = False self.is_refresh_thread = False self.thread_idx = thread_index self.learning_rate_input = learning_rate_input self.local_net = local_net self.no_op_max = no_op_max self.override_num_noops = 0 if self.no_op_max == 0 else None logger.info("===A3C thread_index: {}===".format(self.thread_idx)) logger.info("device: {}".format(device)) logger.info("use_sil: {}".format( colored(self.use_sil, "green" if self.use_sil else "red"))) logger.info("local_t_max: {}".format(self.local_t_max)) logger.info("action_size: {}".format(self.action_size)) logger.info("entropy_beta: {}".format(self.entropy_beta)) logger.info("gamma: {}".format(self.gamma)) logger.info("reward_type: {}".format(self.reward_type)) logger.info("transformed_bellman: {}".format( colored(self.transformed_bellman, "green" if self.transformed_bellman else "red"))) logger.info("clip_norm: {}".format(self.clip_norm)) logger.info("use_grad_cam: {}".format( colored(self.use_grad_cam, "green" if self.use_grad_cam else "red"))) reward_clipped = True if self.reward_type == 'CLIP' else False local_vars = self.local_net.get_vars with tf.device(device): self.local_net.prepare_loss(entropy_beta=self.entropy_beta, critic_lr=0.5) var_refs = [v._ref() for v in local_vars()] self.gradients = tf.gradients(self.local_net.total_loss, var_refs) global_vars = global_net.get_vars with tf.device(device): if self.clip_norm is not None: self.gradients, grad_norm = tf.clip_by_global_norm( self.gradients, self.clip_norm) self.gradients = list(zip(self.gradients, global_vars())) self.apply_gradients = grad_applier.apply_gradients(self.gradients) self.sync = self.local_net.sync_from(global_net) self.game_state = GameState(env_id=self.env_id, display=False, no_op_max=self.no_op_max, human_demo=False, episode_life=True, override_num_noops=self.override_num_noops) self.local_t = 0 self.initial_learning_rate = initial_learning_rate self.episode_reward = 0 self.episode_steps = 0 # variable controlling log output self.prev_local_t = 0 with tf.device(device): if self.use_grad_cam: self.action_meaning = self.game_state.env.unwrapped \ .get_action_meanings() self.local_net.build_grad_cam_grads() if self.use_sil: self.episode = SILReplayMemory( self.action_size, max_len=None, gamma=self.gamma, clip=reward_clipped, height=self.local_net.in_shape[0], width=self.local_net.in_shape[1], phi_length=self.local_net.in_shape[2], reward_constant=self.reward_constant) def train(self, sess, global_t, train_rewards): """Train A3C.""" states = [] fullstates = [] actions = [] rewards = [] values = [] rho = [] terminal_pseudo = False # loss of life terminal_end = False # real terminal # copy weights from shared to local sess.run(self.sync) start_local_t = self.local_t # t_max times loop for i in range(self.local_t_max): state = cv2.resize(self.game_state.s_t, self.local_net.in_shape[:-1], interpolation=cv2.INTER_AREA) fullstate = self.game_state.clone_full_state() pi_, value_, logits_ = self.local_net.run_policy_and_value( sess, state) action = self.pick_action(logits_) states.append(state) fullstates.append(fullstate) actions.append(action) values.append(value_) if self.thread_idx == self.log_idx \ and self.local_t % self.log_interval == 0: log_msg1 = "lg={}".format( np.array_str(logits_, precision=4, suppress_small=True)) log_msg2 = "pi={}".format( np.array_str(pi_, precision=4, suppress_small=True)) log_msg3 = "V={:.4f}".format(value_) logger.debug(log_msg1) logger.debug(log_msg2) logger.debug(log_msg3) # process game self.game_state.step(action) # receive game result reward = self.game_state.reward terminal = self.game_state.terminal self.episode_reward += reward if self.use_sil: # save states in episode memory self.episode.add_item(self.game_state.s_t, fullstate, action, reward, terminal) if self.reward_type == 'CLIP': reward = np.sign(reward) rewards.append(reward) self.local_t += 1 self.episode_steps += 1 global_t += 1 # s_t1 -> s_t self.game_state.update() if terminal: terminal_pseudo = True env = self.game_state.env name = 'EpisodicLifeEnv' if get_wrapper_by_name(env, name).was_real_done: # reduce log freq if self.thread_idx == self.log_idx: log_msg = "train: worker={} global_t={} local_t={}".format( self.thread_idx, global_t, self.local_t) score_str = colored( "score={}".format(self.episode_reward), "magenta") steps_str = colored( "steps={}".format(self.episode_steps), "blue") log_msg += " {} {}".format(score_str, steps_str) logger.debug(log_msg) train_rewards['train'][global_t] = (self.episode_reward, self.episode_steps) self.record_summary(score=self.episode_reward, steps=self.episode_steps, episodes=None, global_t=global_t, mode='Train') self.episode_reward = 0 self.episode_steps = 0 terminal_end = True self.game_state.reset(hard_reset=False) break cumsum_reward = 0.0 if not terminal: state = cv2.resize(self.game_state.s_t, self.local_net.in_shape[:-1], interpolation=cv2.INTER_AREA) cumsum_reward = self.local_net.run_value(sess, state) actions.reverse() states.reverse() rewards.reverse() values.reverse() batch_state = [] batch_action = [] batch_adv = [] batch_cumsum_reward = [] # compute and accumulate gradients for (ai, ri, si, vi) in zip(actions, rewards, states, values): if self.transformed_bellman: ri = np.sign(ri) * self.reward_constant + ri cumsum_reward = transform_h(ri + self.gamma * transform_h_inv(cumsum_reward)) else: cumsum_reward = ri + self.gamma * cumsum_reward advantage = cumsum_reward - vi # convert action to one-hot vector a = np.zeros([self.action_size]) a[ai] = 1 batch_state.append(si) batch_action.append(a) batch_adv.append(advantage) batch_cumsum_reward.append(cumsum_reward) cur_learning_rate = self._anneal_learning_rate( global_t, self.initial_learning_rate) feed_dict = { self.local_net.s: batch_state, self.local_net.a: batch_action, self.local_net.advantage: batch_adv, self.local_net.cumulative_reward: batch_cumsum_reward, self.learning_rate_input: cur_learning_rate, } sess.run(self.apply_gradients, feed_dict=feed_dict) t = self.local_t - self.prev_local_t if (self.thread_idx == self.log_idx and t >= self.perf_log_interval): self.prev_local_t += self.perf_log_interval elapsed_time = time.time() - self.start_time steps_per_sec = global_t / elapsed_time logger.info("worker-{}, log_worker-{}".format( self.thread_idx, self.log_idx)) logger.info("Performance : {} STEPS in {:.0f} sec. {:.0f}" " STEPS/sec. {:.2f}M STEPS/hour.".format( global_t, elapsed_time, steps_per_sec, steps_per_sec * 3600 / 1000000.)) # return advanced local step size diff_local_t = self.local_t - start_local_t return diff_local_t, terminal_end, terminal_pseudo
class RefreshThread(CommonWorker): """Rollout Thread Class.""" advice_confidence = 0.8 gamma = 0.99 def __init__(self, thread_index, action_size, env_id, global_a3c, local_a3c, update_in_rollout, nstep_bc, global_pretrained_model, local_pretrained_model, transformed_bellman=False, no_op_max=0, device='/cpu:0', entropy_beta=0.01, clip_norm=None, grad_applier=None, initial_learn_rate=0.007, learning_rate_input=None): """Initialize RolloutThread class.""" self.is_refresh_thread = True self.action_size = action_size self.thread_idx = thread_index self.transformed_bellman = transformed_bellman self.entropy_beta = entropy_beta self.clip_norm = clip_norm self.initial_learning_rate = initial_learn_rate self.learning_rate_input = learning_rate_input self.no_op_max = no_op_max self.override_num_noops = 0 if self.no_op_max == 0 else None logger.info("===REFRESH thread_index: {}===".format(self.thread_idx)) logger.info("device: {}".format(device)) logger.info("action_size: {}".format(self.action_size)) logger.info("reward_type: {}".format(self.reward_type)) logger.info("transformed_bellman: {}".format( colored(self.transformed_bellman, "green" if self.transformed_bellman else "red"))) logger.info("update in rollout: {}".format( colored(update_in_rollout, "green" if update_in_rollout else "red"))) logger.info("N-step BC: {}".format(nstep_bc)) self.reward_clipped = True if self.reward_type == 'CLIP' else False # setup local a3c self.local_a3c = local_a3c self.sync_a3c = self.local_a3c.sync_from(global_a3c) with tf.device(device): local_vars = self.local_a3c.get_vars self.local_a3c.prepare_loss( entropy_beta=self.entropy_beta, critic_lr=0.5) var_refs = [v._ref() for v in local_vars()] self.rollout_gradients = tf.gradients(self.local_a3c.total_loss, var_refs) global_vars = global_a3c.get_vars if self.clip_norm is not None: self.rollout_gradients, grad_norm = tf.clip_by_global_norm( self.rollout_gradients, self.clip_norm) self.rollout_gradients = list(zip(self.rollout_gradients, global_vars())) self.rollout_apply_gradients = grad_applier.apply_gradients(self.rollout_gradients) # setup local pretrained model self.local_pretrained = None if nstep_bc > 0: assert local_pretrained_model is not None assert global_pretrained_model is not None self.local_pretrained = local_pretrained_model self.sync_pretrained = self.local_pretrained.sync_from(global_pretrained_model) # setup env self.rolloutgame = GameState(env_id=env_id, display=False, no_op_max=0, human_demo=False, episode_life=True, override_num_noops=0) self.local_t = 0 self.episode_reward = 0 self.episode_steps = 0 self.action_meaning = self.rolloutgame.env.unwrapped.get_action_meanings() assert self.local_a3c is not None if nstep_bc > 0: assert self.local_pretrained is not None self.episode = SILReplayMemory( self.action_size, max_len=None, gamma=self.gamma, clip=self.reward_clipped, height=self.local_a3c.in_shape[0], width=self.local_a3c.in_shape[1], phi_length=self.local_a3c.in_shape[2], reward_constant=self.reward_constant) def record_rollout(self, score=0, steps=0, old_return=0, new_return=0, global_t=0, rollout_ctr=0, rollout_added_ctr=0, mode='Rollout', confidence=None, episodes=None): """Record rollout summary.""" summary = tf.Summary() summary.value.add(tag='{}/score'.format(mode), simple_value=float(score)) summary.value.add(tag='{}/old_return_from_s'.format(mode), simple_value=float(old_return)) summary.value.add(tag='{}/new_return_from_s'.format(mode), simple_value=float(new_return)) summary.value.add(tag='{}/steps'.format(mode), simple_value=float(steps)) summary.value.add(tag='{}/all_rollout_ctr'.format(mode), simple_value=float(rollout_ctr)) summary.value.add(tag='{}/rollout_added_ctr'.format(mode), simple_value=float(rollout_added_ctr)) if confidence is not None: summary.value.add(tag='{}/advice-confidence'.format(mode), simple_value=float(confidence)) if episodes is not None: summary.value.add(tag='{}/episodes'.format(mode), simple_value=float(episodes)) self.writer.add_summary(summary, global_t) self.writer.flush() def compute_return_for_state(self, rewards, terminal): """Compute expected return.""" length = np.shape(rewards)[0] returns = np.empty_like(rewards, dtype=np.float32) if self.reward_clipped: rewards = np.clip(rewards, -1., 1.) else: rewards = np.sign(rewards) * self.reward_constant + rewards for i in reversed(range(length)): if terminal[i]: returns[i] = rewards[i] if self.reward_clipped else transform_h(rewards[i]) else: if self.reward_clipped: returns[i] = rewards[i] + self.gamma * returns[i+1] else: # apply transformed expected return exp_r_t = self.gamma * transform_h_inv(returns[i+1]) returns[i] = transform_h(rewards[i] + exp_r_t) return returns[0] def update_a3c(self, sess, actions, states, rewards, values, global_t): cumsum_reward = 0.0 actions.reverse() states.reverse() rewards.reverse() values.reverse() batch_state = [] batch_action = [] batch_adv = [] batch_cumsum_reward = [] # compute and accumulate gradients for(ai, ri, si, vi) in zip(actions, rewards, states, values): if self.transformed_bellman: ri = np.sign(ri) * self.reward_constant + ri cumsum_reward = transform_h( ri + self.gamma * transform_h_inv(cumsum_reward)) else: cumsum_reward = ri + self.gamma * cumsum_reward advantage = cumsum_reward - vi # convert action to one-hot vector a = np.zeros([self.action_size]) a[ai] = 1 batch_state.append(si) batch_action.append(a) batch_adv.append(advantage) batch_cumsum_reward.append(cumsum_reward) cur_learning_rate = self._anneal_learning_rate(global_t, self.initial_learning_rate ) feed_dict = { self.local_a3c.s: batch_state, self.local_a3c.a: batch_action, self.local_a3c.advantage: batch_adv, self.local_a3c.cumulative_reward: batch_cumsum_reward, self.learning_rate_input: cur_learning_rate, } sess.run(self.rollout_apply_gradients, feed_dict=feed_dict) return batch_adv def rollout(self, a3c_sess, folder, pretrain_sess, global_t, past_state, add_all_rollout, ep_max_steps, nstep_bc, update_in_rollout): """Perform one rollout until terminal.""" a3c_sess.run(self.sync_a3c) if nstep_bc > 0: pretrain_sess.run(self.sync_pretrained) _, fs, old_a, old_return, _, _ = past_state states = [] actions = [] rewards = [] values = [] terminals = [] confidences = [] rollout_ctr, rollout_added_ctr = 0, 0 rollout_new_return, rollout_old_return = 0, 0 terminal_pseudo = False # loss of life terminal_end = False # real terminal add = False self.rolloutgame.reset(hard_reset=True) self.rolloutgame.restore_full_state(fs) # check if restore successful fs_check = self.rolloutgame.clone_full_state() assert fs_check.all() == fs.all() del fs_check start_local_t = self.local_t self.rolloutgame.step(0) # prevent rollout too long, set max_ep_steps to be lower than ALE default # see https://github.com/openai/gym/blob/54f22cf4db2e43063093a1b15d968a57a32b6e90/gym/envs/__init__.py#L635 # but in all games tested, no rollout exceeds ep_max_steps while ep_max_steps > 0: state = cv2.resize(self.rolloutgame.s_t, self.local_a3c.in_shape[:-1], interpolation=cv2.INTER_AREA) fullstate = self.rolloutgame.clone_full_state() if nstep_bc > 0: # LiDER-TA or BC model_pi = self.local_pretrained.run_policy(pretrain_sess, state) action, confidence = self.choose_action_with_high_confidence( model_pi, exclude_noop=False) confidences.append(confidence) # not using "confidences" for anything nstep_bc -= 1 else: # LiDER, refresh with current policy pi_, _, logits_ = self.local_a3c.run_policy_and_value(a3c_sess, state) action = self.pick_action(logits_) confidences.append(pi_[action]) value_ = self.local_a3c.run_value(a3c_sess, state) values.append(value_) states.append(state) actions.append(action) self.rolloutgame.step(action) ep_max_steps -= 1 reward = self.rolloutgame.reward terminal = self.rolloutgame.terminal terminals.append(terminal) self.episode_reward += reward self.episode.add_item(self.rolloutgame.s_t, fullstate, action, reward, terminal, from_rollout=True) if self.reward_type == 'CLIP': reward = np.sign(reward) rewards.append(reward) self.local_t += 1 self.episode_steps += 1 global_t += 1 self.rolloutgame.update() if terminal: terminal_pseudo = True env = self.rolloutgame.env name = 'EpisodicLifeEnv' rollout_ctr += 1 terminal_end = get_wrapper_by_name(env, name).was_real_done new_return = self.compute_return_for_state(rewards, terminals) if not add_all_rollout: if new_return > old_return: add = True else: add = True if add: rollout_added_ctr += 1 rollout_new_return += new_return rollout_old_return += old_return # update policy immediate using a good rollout if update_in_rollout: batch_adv = self.update_a3c(a3c_sess, actions, states, rewards, values, global_t) self.episode_reward = 0 self.episode_steps = 0 self.rolloutgame.reset(hard_reset=True) break diff_local_t = self.local_t - start_local_t return diff_local_t, terminal_end, terminal_pseudo, rollout_ctr, \ rollout_added_ctr, add, rollout_new_return, rollout_old_return