replay_buffer = ReplayBuffer(50000) # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] obs = env.reset() for t in itertools.count(): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0) is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if is_solved: # Show off the result env.render() else:
class QRM(RL): """ This class includes a list of policies (a.k.a neural nets) for decomposing the current reward machine """ def __init__(self, lp, num_features, num_actions, reward_machine): super().__init__() # learning parameters self.lp = lp self.rm = reward_machine self.num_features = num_features self.num_actions = num_actions # Creating the network self.sess = tf.Session() self._create_network() # create experience replay buffer if self.lp.prioritized_replay: self.replay_buffer = PrioritizedReplayBuffer(lp.buffer_size, alpha=lp.prioritized_replay_alpha) if lp.prioritized_replay_beta_iters is None: lp.prioritized_replay_beta_iters = lp.train_steps self.beta_schedule = LinearSchedule(lp.prioritized_replay_beta_iters, initial_p=lp.prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(lp.buffer_size) self.beta_schedule = None # count of the number of environmental steps self.step = 0 def _get_step(self): return self.step def _add_step(self): self.step += 1 def _create_network(self): n_features = self.num_features n_actions = self.num_actions n_policies = len(self.rm.get_states()) # Inputs to the network self.s1 = tf.placeholder(tf.float64, [None, n_features]) self.a = tf.placeholder(tf.int32) self.s2 = tf.placeholder(tf.float64, [None, n_features]) self.done = tf.placeholder(tf.float64, [None, n_policies]) self.ignore = tf.placeholder(tf.float64, [None, n_policies]) self.rewards = tf.placeholder(tf.float64, [None, n_policies]) self.next_policies = tf.placeholder(tf.int32, [None, n_policies]) self.IS_weights = tf.placeholder(tf.float64) # Importance sampling weights for prioritized ER # Adding one policy per state in the RM self.policies = [] for i in range(n_policies): # adding a policy of the RM state "i" policy = PolicyDQN("qrm_%d"%i, self.lp, n_features, n_actions, self.sess, self.s1, self.a, self.s2, self.IS_weights) self.policies.append(policy) # connecting all the networks into one big net self._reconnect() def _reconnect(self): # Redefining connections between the different DQN networks n_policies = len(self.policies) batch_size = self.lp.batch_size # concatenating q_target of every policy Q_target_all = tf.concat([self.policies[i].get_q_target_value() for i in range(len(self.policies))], 1) # Indexing the right target next policy aux_range = tf.reshape(tf.range(batch_size),[-1,1]) aux_ones = tf.ones([1, n_policies], tf.int32) delta = tf.matmul(aux_range * n_policies, aux_ones) Q_target_index = tf.reshape(self.next_policies+delta, [-1]) Q_target_flat = tf.reshape(Q_target_all, [-1]) Q_target = tf.reshape(tf.gather(Q_target_flat, Q_target_index),[-1,n_policies]) # Obs: Q_target is batch_size x n_policies tensor such that # Q_target[i,j] is the target Q-value for policy "j" in instance 'i' # Matching the loss to the right Q_target for i in range(n_policies): p = self.policies[i] # Adding the critic trainer p.add_optimizer(self.rewards[:,i], self.done[:,i], Q_target[:,i], self.ignore[:,i]) # Now that everything is set up, we initialize the weights p.initialize_variables() # Auxiliary variables to train all the critics, actors, and target networks self.train = [] for i in range(n_policies): p = self.policies[i] if self.lp.prioritized_replay: self.train.append(p.td_error) self.train.append(p.train) def get_best_action(self, s1, u1, epsilon): if self._get_step() <= self.lp.learning_starts or random.random() < epsilon: # epsilon greedy return random.randrange(self.num_actions) policy = self.policies[u1] s1 = s1.reshape((1,self.num_features)) return self.sess.run(policy.get_best_action(), {self.s1: s1})[0] def _train(self, s1, a, s2, rewards, next_policies, done, ignore, IS_weights): # Learning values = {self.s1: s1, self.a: a, self.s2: s2, self.rewards: rewards, self.next_policies: next_policies, self.done: done, self.ignore: ignore, self.IS_weights: IS_weights} res = self.sess.run(self.train, values) if self.lp.prioritized_replay: # Computing new priorities (max of the absolute td-errors) td_errors = np.array([np.abs(td_error) for td_error in res if td_error is not None]) td_errors_max = np.max(td_errors, axis=0) return td_errors_max def _learn(self): if self.lp.prioritized_replay: experience = self.replay_buffer.sample(self.lp.batch_size, beta=self.beta_schedule.value(self._get_step())) s1, a, s2, rewards, next_policies, done, ignore, weights, batch_idxes = experience else: s1, a, s2, rewards, next_policies, done, ignore = self.replay_buffer.sample(self.lp.batch_size) weights, batch_idxes = None, None td_errors = self._train(s1, a, s2, rewards, next_policies, done, ignore, weights) # returns the absolute td_error if self.lp.prioritized_replay: new_priorities = np.abs(td_errors) + self.lp.prioritized_replay_eps self.replay_buffer.update_priorities(batch_idxes, new_priorities) def _update_target_network(self): for i in range(len(self.policies)): self.policies[i].update_target_networks() def learn_if_needed(self): # Learning if self._get_step() > self.lp.learning_starts and self._get_step() % self.lp.train_freq == 0: self._learn() # Updating the target networks if self._get_step() > self.lp.learning_starts and self._get_step() % self.lp.target_network_update_freq == 0: self._update_target_network() def add_experience(self, o1_events, o1_features, u1, a, reward, o2_events, o2_features, u2, done): # NOTE: # - The reward estimation might change over time # - However, we are adding a fixed reward to the buffer (for simplicity) # - In the future, we might try to recompute the reward every time the experience is sampled # Using the RM to compute the rewards, next policies, and # whether it is a terminal transition or it should be ignored n_policies = len(self.policies) rewards, next_policies, done, ignore = [], [], [], [] for ui in range(n_policies): ui_r = self.rm.get_reward(ui, o1_events, a, o2_events) ui_np = self.rm.get_next_state(ui, o2_events) ui_d = self.rm.is_terminal_observation(o2_events) # NOTE: We ignore transitions that are impossible (as explained in Sect. 5 of the paper) ui_ig = self.rm.is_observation_impossible(ui, o1_events, o2_events) rewards.append(ui_r) next_policies.append(ui_np) done.append(float(ui_d)) ignore.append(float(ui_ig)) # Adding this experience to the replay buffer self.replay_buffer.add(o1_features, a, o2_features, rewards, next_policies, done, ignore) self._add_step()
def learn(env, seed=None, num_agents = 2, lr=0.00008, total_timesteps=100000, buffer_size=2000, exploration_fraction=0.2, exploration_final_eps=0.01, train_freq=1, batch_size=16, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=2000, gamma=0.99, target_network_update_freq=1000, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model set_global_seeds(seed) double_q = True grad_norm_clipping = True shared_weights = True play_test = 1000 nsteps = 16 agent_ids = env.agent_ids() # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) print(f'agent_ids {agent_ids}') num_actions = env.action_space.n print(f'num_actions {num_actions}') dqn_agent = MAgent(env, agent_ids, nsteps, lr, replay_buffer, shared_weights, double_q, num_actions, gamma, grad_norm_clipping, param_noise) if load_path is not None: load_path = osp.expanduser(load_path) ckpt = tf.train.Checkpoint(model=dqn_agent.q_network) manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None) ckpt.restore(manager.latest_checkpoint) print("Restoring from {}".format(manager.latest_checkpoint)) dqn_agent.update_target() episode_rewards = [0.0 for i in range(101)] saved_mean_reward = None obs_all = env.reset() obs_shape = obs_all reset = True done = False # Start total timer tstart = time.time() for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = tf.constant(exploration.value(t)) update_param_noise_threshold = 0. else: update_eps = tf.constant(0.) # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True if t % print_freq == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print(f'time spend to perform {t-print_freq} to {t} steps is {nseconds} ') print('eps update', exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for _ in range(nsteps): # Given observations, take action and value (V(s)) obs_ = tf.constant(obs_all) # print(f'obs_.shape is {obs_.shape}') # obs_ = tf.expand_dims(obs_, axis=1) # print(f'obs_.shape is {obs_.shape}') actions_list, fps_ = dqn_agent.choose_action(obs_, update_eps=update_eps, **kwargs) fps = [[] for _ in agent_ids] # print(f'fps_.shape is {np.asarray(fps_).shape}') for a in agent_ids: fps[a] = np.delete(fps_, a, axis=0) # print(fps) # print(f'actions_list is {actions_list}') # print(f'values_list is {values_list}') # Append the experiences mb_obs.append(obs_all.copy()) mb_actions.append(actions_list) mb_values.append(fps) mb_dones.append([float(done) for _ in range(num_agents)]) # Take actions in env and look the results obs1_all, rews, done, info = env.step(actions_list) rews = [np.max(rews) for _ in range(len(rews))] # for cooperative purpose same reward for every one # print(rews) mb_rewards.append(rews) obs_all = obs1_all # print(rewards, done, info) maybeepinfo = info[0].get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episode_rewards.append(0.0) obs_all = env.reset() reset = True mb_dones.append([float(done) for _ in range(num_agents)]) # print(f'mb_actions is {mb_actions}') # print(f'mb_rewards is {mb_rewards}') # print(f'mb_values is {mb_values}') # print(f'mb_dones is {mb_dones}') mb_obs = np.asarray(mb_obs, dtype=obs_all[0].dtype) mb_actions = np.asarray(mb_actions, dtype=actions_list[0].dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_values = np.asarray(mb_values, dtype=np.float32) # print(f'mb_values.shape is {mb_values.shape}') mb_dones = np.asarray(mb_dones, dtype=np.bool) mb_masks = mb_dones[:-1] mb_dones = mb_dones[1:] # print(f'mb_actions is {mb_actions}') # print(f'mb_rewards is {mb_rewards}') # print(f'mb_values is {mb_values}') # print(f'mb_dones is {mb_dones}') # print(f'mb_masks is {mb_masks}') # print(f'mb_masks.shape is {mb_masks.shape}') if gamma > 0.0: # Discount/bootstrap off value fn last_values = dqn_agent.value(tf.constant(obs_all)) # print(f'last_values is {last_values}') if mb_dones[-1][0] == 0: # print('================ hey ================ mb_dones[-1][0] == 0') mb_rewards = discount_with_dones(np.concatenate((mb_rewards, [last_values])), np.concatenate((mb_dones, [[float(False) for _ in range(num_agents)]])) , gamma)[:-1] else: mb_rewards = discount_with_dones(mb_rewards, mb_dones, gamma) # print(f'after discount mb_rewards is {mb_rewards}') if replay_buffer is not None: replay_buffer.add(mb_obs, mb_actions, mb_rewards, obs1_all, mb_masks[:,0], mb_values, np.tile([exploration.value(t), t], (nsteps, num_agents, 1))) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, fps, extra_datas = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None obses_t, obses_tp1 = tf.constant(obses_t), None actions, rewards, dones = tf.constant(actions), tf.constant(rewards, dtype=tf.float32), tf.constant(dones) weights, fps, extra_datas = tf.constant(weights), tf.constant(fps), tf.constant(extra_datas) s = obses_t.shape # print(f'obses_t.shape is {s}') obses_t = tf.reshape(obses_t, (s[0] * s[1], *s[2:])) s = actions.shape # print(f'actions.shape is {s}') actions = tf.reshape(actions, (s[0] * s[1], *s[2:])) s = rewards.shape # print(f'rewards.shape is {s}') rewards = tf.reshape(rewards, (s[0] * s[1], *s[2:])) s = weights.shape # print(f'weights.shape is {s}') weights = tf.reshape(weights, (s[0] * s[1], *s[2:])) s = fps.shape # print(f'fps.shape is {s}') fps = tf.reshape(fps, (s[0] * s[1], *s[2:])) # print(f'fps.shape is {fps.shape}') s = extra_datas.shape # print(f'extra_datas.shape is {s}') extra_datas = tf.reshape(extra_datas, (s[0] * s[1], *s[2:])) s = dones.shape # print(f'dones.shape is {s}') dones = tf.reshape(dones, (s[0], s[1], *s[2:])) # print(f'dones.shape is {s}') td_errors = dqn_agent.nstep_train(obses_t, actions, rewards, obses_tp1, dones, weights, fps, extra_datas) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. dqn_agent.update_target() if t % play_test == 0 and t != 0: play_test_games(dqn_agent) mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: print(f'last 100 episode mean reward {mean_100ep_reward} in {num_episodes} playing') logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular()
class DQN: def __init__(self, config): self.writer = SummaryWriter() self.device = 'cuda' if T.cuda.is_available() else 'cpu' self.dqn_type = config["dqn-type"] self.run_title = config["run-title"] self.env = gym.make(config["environment"]) self.num_states = np.prod(self.env.observation_space.shape) self.num_actions = self.env.action_space.n layers = [ self.num_states, *config["architecture"], self.num_actions ] self.policy_net = Q_Network(self.dqn_type, layers).to(self.device) self.target_net = Q_Network(self.dqn_type, layers).to(self.device) self.target_net.load_state_dict(self.policy_net.state_dict()) self.target_net.eval() capacity = config["max-experiences"] self.p_replay_eps = config["p-eps"] self.prioritized_replay = config["prioritized-replay"] self.replay_buffer = PrioritizedReplayBuffer(capacity, config["p-alpha"]) if self.prioritized_replay \ else ReplayBuffer(capacity) self.beta_scheduler = LinearSchedule(config["episodes"], initial_p=config["p-beta-init"], final_p=1.0) self.epsilon_decay = lambda e: max(config["epsilon-min"], e * config["epsilon-decay"]) self.train_freq = config["train-freq"] self.use_soft_update = config["use-soft-update"] self.target_update = config["target-update"] self.tau = config["tau"] self.gamma = config["gamma"] self.batch_size = config["batch-size"] self.time_step = 0 self.optim = T.optim.AdamW(self.policy_net.parameters(), lr=config["lr-init"], weight_decay=config["weight-decay"]) self.lr_scheduler = T.optim.lr_scheduler.StepLR(self.optim, step_size=config["lr-step"], gamma=config["lr-gamma"]) self.criterion = nn.SmoothL1Loss(reduction="none") # Huber Loss self.min_experiences = max(config["min-experiences"], config["batch-size"]) self.save_path = config["save-path"] def act(self, state, epsilon=0): """ Act on environment using epsilon-greedy policy """ if np.random.sample() < epsilon: return int(np.random.choice(np.arange(self.num_actions))) else: self.policy_net.eval() return self.policy_net(T.tensor(state, device=self.device).float().unsqueeze(0)).argmax().item() def _soft_update(self, tau): """ Polyak averaging: soft update model parameters. θ_target = τ*θ_current + (1 - τ)*θ_target """ for target_param, current_param in zip(self.target_net.parameters(), self.policy_net.parameters()): target_param.data.copy_(tau*target_param.data + (1.0-tau)*current_param.data) def update_target(self, tau): if self.use_soft_update: self._soft_update(tau) elif self.time_step % self.target_update == 0: self.target_net.load_state_dict(self.policy_net.state_dict()) def optimize(self, beta=None): if len(self.replay_buffer) < self.min_experiences: return None, None self.policy_net.train() if self.prioritized_replay: transitions, (is_weights, t_idxes) = self.replay_buffer.sample(self.batch_size, beta) else: transitions = self.replay_buffer.sample(self.batch_size) is_weights, t_idxes = np.ones(self.batch_size), None # transpose the batch --> transition of batch-arrays batch = Transition(*zip(*transitions)) # compute a mask of non-final states and concatenate the batch elements non_final_mask = T.tensor(tuple(map(lambda state: state is not None, batch.next_state)), device=self.device, dtype=T.bool) non_final_next_states = T.cat([T.tensor([state]).float() for state in batch.next_state if state is not None]).to(self.device) state_batch = T.tensor(batch.state, device=self.device).float() action_batch = T.tensor(batch.action, device=self.device).long() reward_batch = T.tensor(batch.reward, device=self.device).float() state_action_values = self.policy_net(state_batch).gather(1, action_batch.unsqueeze(1)) next_state_values = T.zeros(self.batch_size, device=self.device) if self.dqn_type == "vanilla": next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach() else: self.policy_net.eval() action_next_state = self.policy_net(non_final_next_states).max(1)[1] self.policy_net.train() next_state_values[non_final_mask] = self.target_net(non_final_next_states).gather(1, action_next_state.unsqueeze(1)).squeeze().detach() # compute the expected Q values (RHS of the Bellman equation) expected_state_action_values = (next_state_values * self.gamma) + reward_batch # compute temporal difference error td_error = T.abs(state_action_values.squeeze() - expected_state_action_values).detach().cpu().numpy() # compute Huber loss loss = self.criterion(state_action_values, expected_state_action_values.unsqueeze(1)) loss = T.mean(loss * T.tensor(is_weights, device=self.device)) # optimize the model self.optim.zero_grad() loss.backward() for param in self.policy_net.parameters(): param.grad.data.clamp_(-1, 1) self.optim.step() return td_error, t_idxes def run_episode(self, epsilon, beta): total_reward, done = 0, False state = self.env.reset() while not done: # use epsilon-greedy to get an action action = self.act(state, epsilon) # caching the information of current state prev_state = state # take action state, reward, done, _ = self.env.step(action) # accumulate reward total_reward += reward # store the transition in buffer if done: state = None self.replay_buffer.push(prev_state, action, state, reward) # optimize model if self.time_step % self.train_freq == 0: td_error, t_idxes = self.optimize(beta=beta) # update priorities if self.prioritized_replay and td_error is not None: self.replay_buffer.update_priorities(t_idxes, td_error + self.p_replay_eps) # update target network self.update_target(self.tau) # increment time-step self.time_step += 1 return total_reward def train(self, episodes, epsilon, solved_reward): total_rewards = np.zeros(episodes) for episode in range(episodes): # compute beta using linear scheduler beta = self.beta_scheduler.value(episode) # run episode and get rewards reward = self.run_episode(epsilon, beta) # exponentially decay epsilon epsilon = self.epsilon_decay(epsilon) # reduce learning rate by self.lr_scheduler.step() total_rewards[episode] = reward avg_reward = total_rewards[max(0, episode-100):(episode+1)].mean() last_lr = self.lr_scheduler.get_last_lr()[0] # log into tensorboard self.writer.add_scalar(f'dqn-{self.dqn_type}/reward', reward, episode) self.writer.add_scalar(f'dqn-{self.dqn_type}/reward_100', avg_reward, episode) self.writer.add_scalar(f'dqn-{self.dqn_type}/lr', last_lr, episode) self.writer.add_scalar(f'dqn-{self.dqn_type}/epsilon', epsilon, episode) print(f"Episode: {episode} | Last 100 Average Reward: {avg_reward:.5f} | Learning Rate: {last_lr:.5E} | Epsilon: {epsilon:.5E}", end='\r') if avg_reward > solved_reward: break self.writer.close() print(f"Environment solved in {episode} episodes") T.save(self.policy_net.state_dict(), os.path.join(self.save_path, f"{self.run_title}.pt")) def visualize(self, load_path=None): done = False state = self.env.reset() if load_path is not None: self.policy_net.load_state_dict(T.load(load_path, map_location=self.device)) self.policy_net.eval() while not done: self.env.render() action = self.act(state) state, _, done, _ = self.env.step(int(action)) sleep(0.01)
def learn_att(env, q_func, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) # q_func = build_q_func(network, **network_kwargs) since no network setting # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train_att( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, #add a mask function for the choice of actions mask_func= ) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log("Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format(saved_mean_reward)) load_variables(model_file) return act
class Agent(tf.Module): def __init__(self, config, env): self.config = config self.agent_ids = [a for a in range(config.num_agents)] self.env = env self.optimizer = tf.keras.optimizers.Adam(self.config.lr) self.replay_memory, self.beta_schedule = init_replay_memory(config) # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule(schedule_timesteps=int( config.exploration_fraction * config.num_timesteps), initial_p=1.0, final_p=config.exploration_final_eps) self.loss = self.nstep_loss self.eps = tf.Variable(0.0) # init model self.network = Network(config) @tf.function def choose_action(self, obs, stochastic=True, update_eps=-1): """ :param obs: list observations one for each agent :param stochastic: True for Train phase and False for test phase :param update_eps: epsilon update for eps-greedy :return: actions: list of actions chosen by agents based on observation one for each agent """ # actions = [] # fps = [] deterministic_actions, fps = self.network.step(obs) # print(f'deterministic_actions {deterministic_actions}') batch_size = len(self.agent_ids) random_actions = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=self.config.num_actions, dtype=tf.int64) # print(f'random_actions {random_actions}') chose_random = tf.random.uniform( tf.stack([batch_size ]), minval=0, maxval=1, dtype=tf.float32) < self.eps # print(f'chose_random {chose_random}') stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) # print(f'stochastic_actions.numpy() {stochastic_actions.numpy()}') if stochastic: actions = stochastic_actions.numpy() else: actions = deterministic_actions if update_eps >= 0: self.eps.assign(update_eps) # print(f'fps.shape {np.array(fps).shape}') return actions, fps @tf.function() def nstep_loss(self, obses_t_a, actions_a, rewards_a, dones_a, weights_a, fps_a, agent_id): # print(f'obses_t_a.shape {obses_t_a.shape}') q_t = self.network.value(obses_t_a, fps_a, agent_id) q_t_selected = tf.reduce_sum( q_t * tf.one_hot(actions_a, self.config.num_actions, dtype=tf.float32), 1) # print(f'q_t_selected.shape is {q_t_selected.shape}') td_error = q_t_selected - tf.stop_gradient(rewards_a) errors = huber_loss(td_error) weighted_loss = tf.reduce_mean(weights_a * errors) return weighted_loss, td_error @tf.function() def train(self, obses_t, actions, rewards, dones, weights, fps): td_errors = [] loss = [] with tf.GradientTape() as tape: for a in self.agent_ids: if self.config.network == 'tdcnn_rnn': loss_a, td_error = self.loss(obses_t[a], actions[a, :, -1], rewards[a, :, -1], dones[a, :, -1], weights[a, :, -1], fps[a], a) else: loss_a, td_error = self.loss(obses_t[a], actions[a], rewards[a], dones[a], weights[a], fps[a], a) loss.append(loss_a) td_errors.append(td_error) sum_loss = tf.reduce_sum(loss) sum_td_error = tf.reduce_sum(td_errors) # print(f'sum_loss is {sum_loss}, loss is {loss}') param = self.network.model.trainable_variables for a in self.agent_ids: param += self.network.agent_heads[a].trainable_variables # print(f'param {param}') grads = tape.gradient(sum_loss, param) if self.config.grad_norm_clipping: clipped_grads = [] for grad in grads: clipped_grads.append( tf.clip_by_norm(grad, self.config.grad_norm_clipping)) grads = clipped_grads grads_and_vars = list(zip(grads, param)) self.optimizer.apply_gradients(grads_and_vars) return sum_loss.numpy(), sum_td_error.numpy() def learn(self): self.network.soft_update_target() episode_rewards = [0.0] obs = self.env.reset() done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] for t in range(self.config.num_timesteps): update_eps = tf.constant(self.exploration.value(t)) if t % (self.config.print_freq) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq} to {t} steps: {nseconds}' ) mb_obs, mb_rewards, mb_actions, mb_fps, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for nstep in range(self.config.n_steps): actions, fps_ = self.choose_action(tf.constant(obs), update_eps=update_eps) fps = [] if self.config.num_agents > 1: for a in self.agent_ids: fp = fps_[:a] fp.extend(fps_[a + 1:]) fp_a = np.concatenate( (fp, [[self.exploration.value(t) * 100, t]]), axis=None) fps.append(fp_a) # print(f'fps.shape {np.array(fps).shape}') mb_obs.append(obs.copy()) mb_actions.append(actions) mb_fps.append(fps) mb_dones.append([float(done) for _ in self.agent_ids]) obs1, rews, done, info = self.env.step(actions.tolist()) if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_rewards.append(rews) obs = obs1 maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f'before discount mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.network.last_value(tf.constant(obs1)) # print(f'last_values {last_values}') for n, (rewards, dones, value) in enumerate( zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[n] = rewards # print(f'after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add( (mb_obs, mb_actions, mb_rewards, obs1, mb_masks, mb_fps)) if t > self.config.learning_starts and t % self.config.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, fps, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) actions = actions.swapaxes(0, 1) rewards = rewards.swapaxes(0, 1) obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) fps = fps.swapaxes(0, 1) weights = weights.swapaxes(0, 1) if self.config.network == 'cnn': shape = obses_t.shape obses_t = np.reshape( obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) shape = actions.shape actions = np.reshape( actions, (shape[0], shape[1] * shape[2], *shape[3:])) shape = rewards.shape rewards = np.reshape( rewards, (shape[0], shape[1] * shape[2], *shape[3:])) shape = dones.shape dones = np.reshape( dones, (shape[0], shape[1] * shape[2], *shape[3:])) shape = weights.shape weights = np.reshape( weights, (shape[0], shape[1] * shape[2], *shape[3:])) shape = fps.shape fps = np.reshape( fps, (shape[0], shape[1] * shape[2], *shape[3:])) # shape format is (agent_num, batch_size, n_steps, ...) obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) fps = tf.constant(fps) # print(f'obses_t.shape {obses_t.shape}') # print(f'actions.shape {actions.shape}') # print(f'rewards.shape {rewards.shape}') # print(f'dones.shape {dones.shape}') # print(f'weights.shape {weights.shape}') # print(f'fps.shape {fps.shape}') loss, td_errors = self.train(obses_t, actions, rewards, dones, weights, fps) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. self.network.soft_update_target() if t % self.config.playing_test == 0 and t != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[ 1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular() def play_test_games(self): num_tests = self.config.num_tests test_env = init_env(self.config, mode='test') test_rewards = np.zeros(num_tests) for i in range(num_tests): test_done = False test_obs_all = test_env.reset() while not test_done: test_obs_all = tf.constant(test_obs_all) test_action_list, _ = self.choose_action(test_obs_all, stochastic=False) test_new_obs_list, test_rew_list, test_done, _ = test_env.step( test_action_list) test_obs_all = test_new_obs_list if test_done: test_rewards[i] = np.mean(test_rew_list) print( f'test_rewards: {test_rewards} \n mean reward of {num_tests} tests: {np.mean(test_rewards)}' ) test_env.close()
class DQNAgent(object): """ refs: https://github.com/skumar9876/Hierarchical-DQN/blob/master/dqn.py """ def __init__(self, states_n: tuple, actions_n: int, hidden_layers: list, scope_name: str, sess=None, learning_rate=1e-4, discount=0.98, replay_memory_size=100000, batch_size=32, begin_train=1000, targetnet_update_freq=1000, epsilon_start=1.0, epsilon_end=0.1, epsilon_decay_step=50000, seed=1, logdir='logs', savedir='save', save_freq=10000, use_tau=False, tau=0.001): """ :param states_n: tuple :param actions_n: int :param hidden_layers: list :param scope_name: str :param sess: tf.Session :param learning_rate: float :param discount: float :param replay_memory_size: int :param batch_size: int :param begin_train: int :param targetnet_update_freq: int :param epsilon_start: float :param epsilon_end: float :param epsilon_decay_step: int :param seed: int :param logdir: str """ self.states_n = states_n self.actions_n = actions_n self._hidden_layers = hidden_layers self._scope_name = scope_name self.lr = learning_rate self._target_net_update_freq = targetnet_update_freq self._current_time_step = 0 self._epsilon_schedule = LinearSchedule(epsilon_decay_step, epsilon_end, epsilon_start) self._train_batch_size = batch_size self._begin_train = begin_train self._gamma = discount self._use_tau = use_tau self._tau = tau self.savedir = savedir self.save_freq = save_freq self.qnet_optimizer = tf.train.AdamOptimizer(self.lr) self._replay_buffer = ReplayBuffer(replay_memory_size) self._seed(seed) with tf.Graph().as_default(): self._build_graph() self._merged_summary = tf.summary.merge_all() if sess is None: self.sess = tf.Session() else: self.sess = sess self.sess.run(tf.global_variables_initializer()) self._saver = tf.train.Saver() self._summary_writer = tf.summary.FileWriter(logdir=logdir) self._summary_writer.add_graph(tf.get_default_graph()) def show_memory(self): print(self._replay_buffer.show()) def _q_network(self, state, hidden_layers, outputs, scope_name, trainable): with tf.variable_scope(scope_name): out = state for ly in hidden_layers: out = layers.fully_connected(out, ly, activation_fn=tf.nn.relu, trainable=trainable) out = layers.fully_connected(out, outputs, activation_fn=None, trainable=trainable) return out def _build_graph(self): self._state = tf.placeholder(dtype=tf.float32, shape=(None, ) + self.states_n, name='state_input') with tf.variable_scope(self._scope_name): self._q_values = self._q_network(self._state, self._hidden_layers, self.actions_n, 'q_network', True) self._target_q_values = self._q_network(self._state, self._hidden_layers, self.actions_n, 'target_q_network', False) with tf.variable_scope('q_network_update'): self._actions_onehot = tf.placeholder(dtype=tf.float32, shape=(None, self.actions_n), name='actions_onehot_input') self._td_targets = tf.placeholder(dtype=tf.float32, shape=(None, ), name='td_targets') self._q_values_pred = tf.reduce_sum(self._q_values * self._actions_onehot, axis=1) self._error = tf.abs(self._q_values_pred - self._td_targets) quadratic_part = tf.clip_by_value(self._error, 0.0, 1.0) linear_part = self._error - quadratic_part self._loss = tf.reduce_mean(0.5 * tf.square(quadratic_part) + linear_part) qnet_gradients = self.qnet_optimizer.compute_gradients( self._loss, tf.trainable_variables()) for i, (grad, var) in enumerate(qnet_gradients): if grad is not None: qnet_gradients[i] = (tf.clip_by_norm(grad, 10), var) self.train_op = self.qnet_optimizer.apply_gradients(qnet_gradients) tf.summary.scalar('loss', self._loss) with tf.name_scope('target_network_update'): q_network_params = [ t for t in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope_name + '/q_network') if t.name.startswith(self._scope_name + '/q_network/') ] target_q_network_params = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope=self._scope_name + '/target_q_network') self.target_update_ops = [] for var, var_target in zip( sorted(q_network_params, key=lambda v: v.name), sorted(target_q_network_params, key=lambda v: v.name)): # self.target_update_ops.append(var_target.assign(var)) # soft target update self.target_update_ops.append( var_target.assign( tf.multiply(var_target, 1 - self._tau) + tf.multiply(var, self._tau))) self.target_update_ops = tf.group(*self.target_update_ops) def choose_action(self, state, epsilon=None): """ for one agent :param state: :param epsilon: :return: """ if epsilon is not None: epsilon_used = epsilon else: epsilon_used = self._epsilon_schedule.value( self._current_time_step) if np.random.random() < epsilon_used: return np.random.randint(0, self.actions_n) else: q_values = self.sess.run(self._q_values, feed_dict={self._state: state[None]}) return np.argmax(q_values[0]) def choose_actions(self, states, epsilons=None): """ for multi-agent :param states: :param epsilon: :return: """ if epsilons is not None: epsilons_used = epsilons else: epsilons_used = self._epsilon_schedule.value( self._current_time_step) actions = [] for i, state in enumerate(states): if np.random.random() < epsilons_used[i]: actions.append(np.random.randint(0, self.actions_n)) else: q_values = self.sess.run(self._q_values, feed_dict={self._state: state[None]}) actions.append(np.argmax(q_values[0])) return actions def check_network_output(self, state): q_values = self.sess.run(self._q_values, feed_dict={self._state: state[None]}) print(q_values[0]) def store(self, state, action, reward, next_state, terminate): self._replay_buffer.add(state, action, reward, next_state, terminate) def get_max_target_Q_s_a(self, next_states): next_state_q_values = self.sess.run( self._q_values, feed_dict={self._state: next_states}) next_state_target_q_values = self.sess.run( self._target_q_values, feed_dict={self._state: next_states}) next_select_actions = np.argmax(next_state_q_values, axis=1) bt_sz = len(next_states) next_select_actions_onehot = np.zeros((bt_sz, self.actions_n)) for i in range(bt_sz): next_select_actions_onehot[i, next_select_actions[i]] = 1. next_state_max_q_values = np.sum(next_state_target_q_values * next_select_actions_onehot, axis=1) return next_state_max_q_values def train(self): self._current_time_step += 1 if self._current_time_step == 1: print('Training starts.') self.sess.run(self.target_update_ops) if self._current_time_step > self._begin_train: states, actions, rewards, next_states, terminates = self._replay_buffer.sample( batch_size=self._train_batch_size) actions_onehot = np.zeros((self._train_batch_size, self.actions_n)) for i in range(self._train_batch_size): actions_onehot[i, actions[i]] = 1. next_state_q_values = self.sess.run( self._q_values, feed_dict={self._state: next_states}) next_state_target_q_values = self.sess.run( self._target_q_values, feed_dict={self._state: next_states}) next_select_actions = np.argmax(next_state_q_values, axis=1) next_select_actions_onehot = np.zeros( (self._train_batch_size, self.actions_n)) for i in range(self._train_batch_size): next_select_actions_onehot[i, next_select_actions[i]] = 1. next_state_max_q_values = np.sum(next_state_target_q_values * next_select_actions_onehot, axis=1) td_targets = rewards + self._gamma * next_state_max_q_values * ( 1 - terminates) _, str_ = self.sess.run( [self.train_op, self._merged_summary], feed_dict={ self._state: states, self._actions_onehot: actions_onehot, self._td_targets: td_targets }) self._summary_writer.add_summary(str_, self._current_time_step) # update target_net if self._use_tau: self.sess.run(self.target_update_ops) else: if self._current_time_step % self._target_net_update_freq == 0: self.sess.run(self.target_update_ops) # save model if self._current_time_step % self.save_freq == 0: # TODO save the model with highest performance self._saver.save(sess=self.sess, save_path=self.savedir + '/my-model', global_step=self._current_time_step) def train_without_replaybuffer(self, states, actions, target_values): self._current_time_step += 1 if self._current_time_step == 1: print('Training starts.') self.sess.run(self.target_update_ops) bt_sz = len(states) actions_onehot = np.zeros((bt_sz, self.actions_n)) for i in range(bt_sz): actions_onehot[i, actions[i]] = 1. _, str_ = self.sess.run( [self.train_op, self._merged_summary], feed_dict={ self._state: states, self._actions_onehot: actions_onehot, self._td_targets: target_values }) self._summary_writer.add_summary(str_, self._current_time_step) # update target_net if self._use_tau: self.sess.run(self.target_update_ops) else: if self._current_time_step % self._target_net_update_freq == 0: self.sess.run(self.target_update_ops) # save model if self._current_time_step % self.save_freq == 0: # TODO save the model with highest performance self._saver.save(sess=self.sess, save_path=self.savedir + '/my-model', global_step=self._current_time_step) def load_model(self): self._saver.restore(self.sess, tf.train.latest_checkpoint(self.savedir)) def _seed(self, lucky_number): tf.set_random_seed(lucky_number) np.random.seed(lucky_number) random.seed(lucky_number)
def main(env_name='KungFuMasterNoFrameskip-v0', train_freq=4, target_update_freq=10000, checkpoint_freq=100000, log_freq=1, batch_size=32, train_after=200000, max_timesteps=5000000, buffer_size=50000, vmin=-10, vmax=10, n=51, gamma=0.99, final_eps=0.1, final_eps_update=1000000, learning_rate=0.00025, momentum=0.95): env = gym.make(env_name) env = wrap_env(env) state_dim = (4, 84, 84) action_count = env.action_space.n with C.default_options(activation=C.relu, init=C.he_uniform()): model_func = Sequential([ Convolution2D((8, 8), 32, strides=4, name='conv1'), Convolution2D((4, 4), 64, strides=2, name='conv2'), Convolution2D((3, 3), 64, strides=1, name='conv3'), Dense(512, name='dense1'), Dense((action_count, n), activation=None, name='out') ]) agent = CategoricalAgent(state_dim, action_count, model_func, vmin, vmax, n, gamma, lr=learning_rate, mm=momentum, use_tensorboard=True) logger = agent.writer epsilon_schedule = LinearSchedule(1.0, final_eps, final_eps_update) replay_buffer = ReplayBuffer(buffer_size) try: obs = env.reset() episode = 0 rewards = 0 steps = 0 for t in range(max_timesteps): # Take action if t > train_after: action = agent.act(obs, epsilon=epsilon_schedule.value(t)) else: action = np.random.choice(action_count) obs_, reward, done, _ = env.step(action) # Store transition in replay buffer replay_buffer.add(obs, action, reward, obs_, float(done)) obs = obs_ rewards += reward if t > train_after and (t % train_freq) == 0: # Minimize error in projected Bellman update on a batch sampled from replay buffer experience = replay_buffer.sample(batch_size) agent.train(*experience) # experience is (s, a, r, s_, t) tuple logger.write_value('loss', agent.trainer.previous_minibatch_loss_average, t) if t > train_after and (t % target_update_freq) == 0: agent.update_target() if t > train_after and (t % checkpoint_freq) == 0: agent.checkpoint('checkpoints/model_{}.chkpt'.format(t)) if done: episode += 1 obs = env.reset() if episode % log_freq == 0: steps = t - steps + 1 logger.write_value('rewards', rewards, episode) logger.write_value('steps', steps, episode) logger.write_value('epsilon', epsilon_schedule.value(t), episode) logger.flush() rewards = 0 steps = t finally: agent.save_model('checkpoints/{}.cdqn'.format(env_name))
class LypSarsaAgent(object): def __init__(self, args, env, writer = None): """ init agent """ self.eval_env = copy.deepcopy(env) self.args = args self.state_dim = env.reset().shape self.action_dim = env.action_space.n self.device = torch.device("cuda" if (torch.cuda.is_available() and self.args.gpu) else "cpu") # set the random seed the same as the main launcher random.seed(self.args.seed) torch.manual_seed(self.args.seed) np.random.seed(self.args.seed) if self.args.gpu: torch.cuda.manual_seed(self.args.seed ) self.writer = writer if self.args.env_name == "grid": self.dqn = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.dqn_target = OneHotDQN(self.state_dim, self.action_dim).to(self.device) # create more networks here self.cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.target_cost_model = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.target_cost_model.load_state_dict(self.cost_model.state_dict()) else: raise Exception("what kind of DQN env is this?") # copy parameters self.dqn_target.load_state_dict(self.dqn.state_dict()) self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=self.args.lr) self.critic_optimizer = optim.Adam(self.cost_model.parameters(), lr=self.args.cost_q_lr) # make the envs def make_env(): def _thunk(): env = create_env(args) return env return _thunk envs = [make_env() for i in range(self.args.num_envs)] self.envs = SubprocVecEnv(envs) # create epsilon and beta schedule self.eps_decay = LinearSchedule(50000 * 200, 0.01, 1.0) # self.eps_decay = LinearSchedule(self.args.num_episodes * 200, 0.01, 1.0) self.total_steps = 0 self.num_episodes = 0 # for storing resutls self.results_dict = { "train_rewards" : [], "train_constraints" : [], "eval_rewards" : [], "eval_constraints" : [], } self.cost_indicator = "none" if "grid" in self.args.env_name: self.cost_indicator = 'pit' else: raise Exception("not implemented yet") self.eps = self.eps_decay.value(self.total_steps) def pi(self, state, current_cost=0.0, greedy_eval=False): """ take the action based on the current policy """ with torch.no_grad(): # to take random action or not if (random.random() > self.eps_decay.value(self.total_steps)) or greedy_eval: q_value = self.dqn(state) # chose the max/greedy actions action = q_value.max(1)[1].cpu().numpy() else: action = np.random.randint(0, high=self.action_dim, size = (self.args.num_envs, )) return action def safe_deterministic_pi(self, state, current_cost=0.0, greedy_eval=False): """ take the action based on the current policy """ with torch.no_grad(): # to take random action or not if (random.random() > self.eps_decay.value(self.total_steps)) or greedy_eval: # No random action q_value = self.dqn(state) # Q_D(s,a) cost_q_val = self.cost_model(state) max_q_val = cost_q_val.max(1)[0].unsqueeze(1) # find the action set epsilon = (1 - self.args.gamma) * (self.args.d0 - current_cost) # create the filtered mask here constraint_mask = torch.le(cost_q_val , epsilon + max_q_val).float() filtered_Q = (q_value + 1000.0) * (constraint_mask) filtered_action = filtered_Q.max(1)[1].cpu().numpy() # alt action to take if infeasible solution # minimize the cost alt_action = (-1. * cost_q_val).max(1)[1].cpu().numpy() c_sum = constraint_mask.sum(1) action_mask = ( c_sum == torch.zeros_like(c_sum)).cpu().numpy() action = (1 - action_mask) * filtered_action + action_mask * alt_action return action else: # create an array of random indices, for all the environments action = np.random.randint(0, high=self.action_dim, size = (self.args.num_envs, )) return action def compute_n_step_returns(self, next_value, rewards, masks): """ n-step SARSA returns """ R = next_value returns = [] for step in reversed(range(len(rewards))): R = rewards[step] + self.args.gamma * R * masks[step] returns.insert(0, R) return returns def compute_reverse_n_step_returns(self, prev_value, costs, begin_masks): """ n-step SARSA returns (backward in time) """ R = prev_value returns = [] for step in range(len(costs)): R = costs[step] + self.args.gamma * R * begin_masks[step] returns.append(R) return returns def log_episode_stats(self, ep_reward, ep_constraint): """ log the stats for environment performance """ # log episode statistics self.results_dict["train_rewards"].append(ep_reward) self.results_dict["train_constraints"].append(ep_constraint) if self.writer: self.writer.add_scalar("Return", ep_reward, self.num_episodes) self.writer.add_scalar("Constraint", ep_constraint, self.num_episodes) log( 'Num Episode {}\t'.format(self.num_episodes) + \ 'E[R]: {:.2f}\t'.format(ep_reward) +\ 'E[C]: {:.2f}\t'.format(ep_constraint) +\ 'avg_train_reward: {:.2f}\t'.format(np.mean(self.results_dict["train_rewards"][-100:])) +\ 'avg_train_constraint: {:.2f}\t'.format(np.mean(self.results_dict["train_constraints"][-100:])) ) def run(self): """ learning happens here """ self.total_steps = 0 self.eval_steps = 0 # reset state and env state = self.envs.reset() prev_state = torch.FloatTensor(state).to(self.device) tensor_state = torch.FloatTensor(state).to(self.device) current_cost = self.cost_model(tensor_state).max(1)[0].unsqueeze(1) ep_reward = 0 ep_len = 0 ep_constraint = 0 start_time = time.time() while self.num_episodes < self.args.num_episodes: values = [] c_q_vals = [] c_r_vals = [] states = [] actions = [] mus = [] prev_states = [] rewards = [] done_masks = [] begin_masks = [] constraints = [] # n-step sarsa for _ in range(self.args.traj_len): state = torch.FloatTensor(state).to(self.device) # get the expl action action = self.safe_deterministic_pi(state, current_cost= current_cost) next_state, reward, done, info = self.envs.step(action) # convert it back to tensor action = torch.LongTensor(action).unsqueeze(1).to(self.device) q_values = self.dqn(state) Q_value = q_values.gather(1, action) c_q_values = self.cost_model(state) cost_q_val = c_q_values.gather(1, action) # logging mode for only agent 1 ep_reward += reward[0] ep_constraint += info[0][self.cost_indicator] values.append(Q_value) c_q_vals.append(cost_q_val) rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(self.device)) done_masks.append(torch.FloatTensor(1.0 - done).unsqueeze(1).to(self.device)) begin_masks.append(torch.FloatTensor([(1.0 - ci['begin']) for ci in info]).unsqueeze(1).to(self.device)) constraints.append(torch.FloatTensor([ci[self.cost_indicator] for ci in info]).unsqueeze(1).to(self.device)) prev_states.append(prev_state) states.append(state) actions.append(action) # update the costs prev_state = state state = next_state # update the current cost # if done flag is true for the current env, this implies that the next_state cost = 0.0 # because the agent starts with 0.0 cost (or doesn't have access to it anyways) # this is V_{D}(x_0) for Lyapnuv agent tensor_state = torch.FloatTensor(state).to(self.device) next_cost = self.cost_model(tensor_state).max(1)[0].unsqueeze(1).detach() cost_mask = torch.FloatTensor(1.0 - done).unsqueeze(1).to(self.device) current_cost = ((1.0 - cost_mask) * next_cost + cost_mask * current_cost).detach() self.total_steps += (1 * self.args.num_envs) # hack to reuse the same code # iteratively add each done episode, so that can eval at regular interval for d_idx in range(done.sum()): if done[0] and d_idx==0: if self.num_episodes % self.args.log_every == 0: self.log_episode_stats(ep_reward, ep_constraint) # reset the rewards anyways ep_reward = 0 ep_constraint = 0 self.num_episodes += 1 # eval the policy here after eval_every steps if self.num_episodes % self.args.eval_every == 0: eval_reward, eval_constraint = self.eval() self.results_dict["eval_rewards"].append(eval_reward) self.results_dict["eval_constraints"].append(eval_constraint) log('----------------------------------------') log('Eval[R]: {:.2f}\t'.format(eval_reward) +\ 'Eval[C]: {}\t'.format(eval_constraint) +\ 'Episode: {}\t'.format(self.num_episodes) +\ 'avg_eval_reward: {:.2f}\t'.format(np.mean(self.results_dict["eval_rewards"][-10:])) +\ 'avg_eval_constraint: {:.2f}\t'.format(np.mean(self.results_dict["eval_constraints"][-10:])) ) log('----------------------------------------') if self.writer: self.writer.add_scalar("eval_reward", eval_reward, self.eval_steps) self.writer.add_scalar("eval_constraint", eval_constraint, self.eval_steps) self.eval_steps += 1 # break here if self.num_episodes >= self.args.num_episodes: break # calculate targets here next_state = torch.FloatTensor(next_state).to(self.device) next_q_values = self.dqn(next_state) next_action = self.safe_deterministic_pi(next_state, current_cost) next_action = torch.LongTensor(next_action).unsqueeze(1).to(self.device) next_q_values = next_q_values.gather(1, next_action) # calculate targets target_Q_vals = self.compute_n_step_returns(next_q_values, rewards, done_masks) Q_targets = torch.cat(target_Q_vals).detach() Q_values = torch.cat(values) # loss loss = F.mse_loss(Q_values, Q_targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # calculate the cost-targets next_c_value = self.cost_model(next_state) next_c_value = next_c_value.gather(1, next_action) cq_targets = self.compute_n_step_returns(next_c_value, constraints, done_masks) C_q_targets = torch.cat(cq_targets).detach() C_q_vals = torch.cat(c_q_vals) cost_critic_loss = F.mse_loss(C_q_vals, C_q_targets) self.critic_optimizer.zero_grad() cost_critic_loss.backward() self.critic_optimizer.step() # done with all the training # save the models self.save_models() def eval(self): """ evaluate the current policy and log it """ avg_reward = [] avg_constraint = [] with torch.no_grad(): for _ in range(self.args.eval_n): state = self.eval_env.reset() done = False ep_reward = 0 ep_constraint = 0 ep_len = 0 start_time = time.time() state = torch.FloatTensor(state).unsqueeze(0).to(self.device) current_cost = self.cost_model(state).max(1)[0].unsqueeze(1) while not done: # get the policy action action = self.safe_deterministic_pi(state, current_cost=current_cost, greedy_eval=True)[0] next_state, reward, done, info = self.eval_env.step(action) ep_reward += reward ep_len += 1 ep_constraint += info[self.cost_indicator] # update the state state = next_state state = torch.FloatTensor(state).unsqueeze(0).to(self.device) avg_reward.append(ep_reward) avg_constraint.append(ep_constraint) return np.mean(avg_reward), np.mean(avg_constraint) def save_models(self): """create results dict and save""" torch.save(self.results_dict, os.path.join(self.args.out, 'results_dict.pt')) models = { "dqn" : self.dqn.state_dict(), "cost_model" : self.cost_model.state_dict(), "env" : copy.deepcopy(self.eval_env), } torch.save(models,os.path.join(self.args.out, 'models.pt')) def load_models(self): models = torch.load(os.path.join(self.args.out, 'models.pt')) self.dqn.load_state_dict(models["dqn"]) self.eval_env = models["env"]
exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02) #매개 변수를 초기화하고 대상 네트워크에 복사. U.initialize() update_target() reward_list = [] #reward들을 파일에 저장하기 위한 list. episode_rewards = [0.0] obs = env.reset() # 환경을 초기화 #총 보상과 에피소드별 단계를 포함하는 목록 작성 for t in itertools.count(): #action을 취하고, 최신의 exploration로 update action = act(obs[None], update_eps=exploration.value(t))[0] #에이전트의 움직임. new_obs, rew, done, _ = env.step( action) #움직임에 따른 결과값들, 환경으로부터 새로운 상태 및 보상 받기 #replay buffer에 transition을 저장. replay_buffer.add(obs, action, rew, new_obs, float(done)) reward_list.append(rew) #리스트에 reward를 추가. obs = new_obs #Step()함수의 새로운 결과 값을 obs에 저장. episode_rewards[-1] += rew #현재 episode의 reward에 나온 reward 값을 합산. if done: obs = env.reset() #완료 되었다면, 다시 반복하기 위해 환경 초기화 episode_rewards.append( 0) #episode_rewards에 다음 episode에서 학습할 리스트를 추가 #Reward 파일에 저장(파일명 변경) with open("../../32neurons_1.txt", "a") as f:
def learn_continuous_tasks(env, q_func, env_name, dir_path, time_stamp, total_num_episodes, num_actions_pad=33, lr=1e-4, grad_norm_clipping=10, max_timesteps=int(1e8), buffer_size=int(1e6), train_freq=1, batch_size=64, print_freq=10, learning_starts=1000, gamma=0.99, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=int(1e8), num_cpu=16, epsilon_greedy=False, timesteps_std=1e6, initial_std=0.4, final_std=0.05, eval_freq=100, n_eval_episodes=10, eval_std=0.01, log_index=0, log_prefix='q', loss_type="L2", model_file='./', callback=None): """Train a branching deepq model to solve continuous control tasks via discretization. Current assumptions in the implementation: - for solving continuous control domains via discretization (can be adjusted to be compatible with naturally disceret-action domains using 'env.action_space.n') - uniform number of sub-actions per action dimension (can be generalized to heterogeneous number of sub-actions across branches) Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions_pad: int number of sub-actions per action dimension (= num of discretization grains/bars + 1) lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimize for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed 0.1 for dqn-baselines exploration_final_eps: float final value of random action probability 0.02 for dqn-baselines train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor grad_norm_clipping: int set None for no clipping target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the unified TD error for updating priorities. Erratum: The camera-ready copy of this paper incorrectly reported 1e-8. The value used to produece the results is 1e8. num_cpu: int number of cpus to use for training dir_path: str path for logs and results to be stored in callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) print('Observation shape:' + str(env.observation_space.shape)) num_action_grains = num_actions_pad - 1 num_action_dims = env.action_space.shape[0] num_action_streams = num_action_dims num_actions = num_actions_pad * num_action_streams # total numb network outputs for action branching with one action dimension per branch print('Number of actions in total:' + str(num_actions)) act, q_val, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, num_action_streams=num_action_streams, batch_size=batch_size, optimizer_name="Adam", learning_rate=lr, grad_norm_clipping=grad_norm_clipping, gamma=gamma, double_q=True, scope="deepq", reuse=None, loss_type="L2") print('TRAIN VARS:') print(tf.trainable_variables()) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, 'num_action_streams': num_action_streams, } print('Create the log writer for TensorBoard visualizations.') log_dir = "{}/tensorboard_logs/{}".format(dir_path, env_name) if not os.path.exists(log_dir): os.makedirs(log_dir) score_placeholder = tf.placeholder(tf.float32, [], name='score_placeholder') tf.summary.scalar('score', score_placeholder) lr_constant = tf.constant(lr, name='lr_constant') tf.summary.scalar('learning_rate', lr_constant) eval_placeholder = tf.placeholder(tf.float32, [], name='eval_placeholder') eval_summary = tf.summary.scalar('evaluation', eval_placeholder) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None if epsilon_greedy: approximate_num_iters = 2e6 / 4 exploration = PiecewiseSchedule([(0, 1.0), (approximate_num_iters / 50, 0.1), (approximate_num_iters / 5, 0.01)], outside_value=0.01) else: exploration = ConstantSchedule(value=0.0) # greedy policy std_schedule = LinearSchedule(schedule_timesteps=timesteps_std, initial_p=initial_std, final_p=final_std) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # Initialize the parameters used for converting branching, discrete action indeces to continuous actions low = env.action_space.low high = env.action_space.high actions_range = np.subtract(high, low) print('###################################') print(low) print(high) print('###################################') episode_rewards = [] reward_sum = 0.0 time_steps = [0] time_spent_exploring = [0] prev_time = time.time() n_trainings = 0 # Open a dircetory for recording results results_dir = "{}/results/{}".format(dir_path, env_name) if not os.path.exists(results_dir): os.makedirs(results_dir) displayed_mean_reward = None score_timesteps = [] game_scores = [] def evaluate(step, episode_number): global max_eval_reward_mean, model_saved print('Evaluate...') eval_reward_sum = 0.0 # Run evaluation episodes for eval_episode in range(n_eval_episodes): obs = env.reset() done = False while not done: # Choose action action_idxes = np.array( act(np.array(obs)[None], stochastic=False)) # deterministic actions_greedy = action_idxes / num_action_grains * actions_range + low if eval_std == 0.0: action = actions_greedy else: action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: a_stoch = np.random.normal(loc=a_greedy, scale=eval_std) a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action.append(a_stoch) out_of_range_action = False # Step obs, rew, done, _ = env.step(action) eval_reward_sum += rew # Average the rewards and log eval_reward_mean = eval_reward_sum / n_eval_episodes print(eval_reward_mean, 'over', n_eval_episodes, 'episodes') game_scores.append(eval_reward_mean) score_timesteps.append(step) if max_eval_reward_mean is None or eval_reward_mean > max_eval_reward_mean: logger.log( "Saving model due to mean eval increase: {} -> {}".format( max_eval_reward_mean, eval_reward_mean)) U.save_state(model_file) model_saved = True max_eval_reward_mean = eval_reward_mean intact = ActWrapper(act, act_params) intact.save(model_file + "_" + str(episode_number) + "_" + str(int(np.round(max_eval_reward_mean)))) print('Act saved to ' + model_file + "_" + str(episode_number) + "_" + str(int(np.round(max_eval_reward_mean)))) with tempfile.TemporaryDirectory() as td: td = './logs' evaluate(0, 0) obs = env.reset() t = -1 all_means = [] q_stats = [] current_qs = [] training_game_scores = [] training_timesteps = [] while True: t += 1 # Select action and update exploration probability action_idxes = np.array( act(np.array(obs)[None], update_eps=exploration.value(t))) qs = np.array(q_val(np.array(obs)[None], stochastic=False)) # deterministic tt = [] for val in qs: tt.append(np.std(val)) current_qs.append(tt) # Convert sub-actions indexes (discrete sub-actions) to continuous controls action = action_idxes / num_action_grains * actions_range + low if not epsilon_greedy: # Gaussian noise actions_greedy = action action_idx_stoch = [] action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: # Sample from a Gaussian with mean at the greedy action and a std following a schedule of choice a_stoch = np.random.normal(loc=a_greedy, scale=std_schedule.value(t)) # Convert sampled cont action to an action idx a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) # Check if action is in range if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action_idx_stoch.append(a_idx_stoch) action.append(a_stoch) out_of_range_action = False action_idxes = action_idx_stoch new_obs, rew, done, _ = env.step(np.array(action)) # Store transition in the replay buffer replay_buffer.add(obs, action_idxes, rew, new_obs, float(done)) obs = new_obs reward_sum += rew if done: obs = env.reset() time_spent_exploring[-1] = int(100 * exploration.value(t)) time_spent_exploring.append(0) episode_rewards.append(reward_sum) training_game_scores.append(reward_sum) training_timesteps.append(t) time_steps[-1] = t reward_sum = 0.0 time_steps.append(0) q_stats.append(np.mean(current_qs, 0)) current_qs = [] if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train( obses_t, actions, rewards, obses_tp1, dones, weights) # np.ones_like(rewards)) #TEMP AT NEW if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) n_trainings += 1 if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically update_target() if len(episode_rewards) == 0: mean_100ep_reward = 0 elif len(episode_rewards) < 100: mean_100ep_reward = np.mean(episode_rewards) else: mean_100ep_reward = np.mean(episode_rewards[-100:]) all_means.append(mean_100ep_reward) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) current_time = time.time() logger.record_tabular("trainings per second", n_trainings / (current_time - prev_time)) logger.dump_tabular() n_trainings = 0 prev_time = current_time if t > learning_starts and num_episodes > 100: if displayed_mean_reward is None or mean_100ep_reward > displayed_mean_reward: if print_freq is not None: logger.log("Mean reward increase: {} -> {}".format( displayed_mean_reward, mean_100ep_reward)) displayed_mean_reward = mean_100ep_reward # Performance evaluation with a greedy policy if done and num_episodes % eval_freq == 0: evaluate(t + 1, num_episodes) obs = env.reset() # STOP training if num_episodes >= total_num_episodes: break pickle.dump(q_stats, open( str(log_index) + "q_stat_stds99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) pickle.dump(game_scores, open( str(log_index) + "q_stat_scores99_" + log_prefix + ".pkl", 'wb'), protocol=pickle.HIGHEST_PROTOCOL) return ActWrapper(act, act_params)
def learn(env, network, seed=None, lr=5e-4, total_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model sess = get_session() set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() reset = True with tempfile.TemporaryDirectory() as td: td = checkpoint_path or td model_file = os.path.join(td, "model") model_saved = False if tf.train.latest_checkpoint(td) is not None: load_variables(model_file) logger.log('Loaded model from {}'.format(model_file)) model_saved = True elif load_path is not None: load_variables(load_path) logger.log('Loaded model from {}'.format(load_path)) for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break # Take action and update exploration to the newest value kwargs = {} if not param_noise: update_eps = exploration.value(t) update_param_noise_threshold = 0. else: update_eps = 0. # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log(1. - exploration.value( t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0] env_action = action reset = False new_obs, rew, done, _ = env.step(env_action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}" .format(saved_mean_reward, mean_100ep_reward)) save_variables(model_file) model_saved = True saved_mean_reward = mean_100ep_reward if model_saved: if print_freq is not None: logger.log("Restored model with mean reward: {}".format( saved_mean_reward)) load_variables(model_file) return act
def learn(env_id, q_func, lr=5e-4, max_timesteps=10000, buffer_size=5000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, train_steps=10, learning_starts=500, batch_size=32, print_freq=10, checkpoint_freq=100, model_dir=None, gamma=1.0, target_network_update_freq=50, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, player_processes=None, player_connections=None): env, _, _ = create_gvgai_environment(env_id) # Create all the functions necessary to train the model # expert_decision_maker = ExpertDecisionMaker(env=env) # capture the shape outside the closure so that the env object is not serialized # by cloudpickle when serializing make_obs_ph observation_space = env.observation_space def make_obs_ph(name): return ObservationInput(observation_space, name=name) act, train, update_target, debug = build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=env.action_space.n, optimizer=tf.train.AdamOptimizer(learning_rate=lr), gamma=gamma, grad_norm_clipping=10, param_noise=param_noise) session = tf.Session() session.__enter__() policy_path = os.path.join(model_dir, "Policy.pkl") model_path = os.path.join(model_dir, "model", "model") if os.path.isdir(os.path.join(model_dir, "model")): load_state(model_path) else: act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': env.action_space.n, } act = ActWrapper(act, act_params) # Initialize the parameters and copy them to the target network. U.initialize() update_target() act.save(policy_path) save_state(model_path) env.close() # Create the replay buffer if prioritized_replay: replay_buffer_path = os.path.join(model_dir, "Prioritized_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer_path = os.path.join(model_dir, "Normal_replay.pkl") if os.path.isfile(replay_buffer_path): with open(replay_buffer_path, 'rb') as input_file: replay_buffer = pickle.load(input_file) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) episode_rewards = list() saved_mean_reward = -999999999 signal.signal(signal.SIGQUIT, signal_handler) global terminate_learning total_timesteps = 0 for timestep in range(max_timesteps): if terminate_learning: break for connection in player_connections: experiences, reward = connection.recv() episode_rewards.append(reward) for experience in experiences: replay_buffer.add(*experience) total_timesteps += 1 if total_timesteps < learning_starts: if timestep % 10 == 0: print("not strated yet", flush=True) continue if timestep % train_freq == 0: for i in range(train_steps): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(total_timesteps)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if timestep % target_network_update_freq == 0: # Update target network periodically. update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if print_freq is not None and timestep % print_freq == 0: logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular( "% time spent exploring", int(100 * exploration.value(total_timesteps))) logger.dump_tabular() if timestep % checkpoint_freq == 0 and mean_100ep_reward > saved_mean_reward: act.save(policy_path) save_state(model_path) saved_mean_reward = mean_100ep_reward with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) send_message_to_all(player_connections, Message.UPDATE) send_message_to_all(player_connections, Message.TERMINATE) if mean_100ep_reward > saved_mean_reward: act.save(policy_path) with open(replay_buffer_path, 'wb') as output_file: pickle.dump(replay_buffer, output_file, pickle.HIGHEST_PROTOCOL) for player_process in player_processes: player_process.join() # player_process.terminate() return act.load(policy_path)
def learn_continuous_tasks(env, q_func, env_name, time_stamp, total_num_episodes, num_actions_pad=33, lr=1e-4, grad_norm_clipping=10, max_timesteps=int(1e8), buffer_size=int(1e6), train_freq=1, batch_size=64, print_freq=10, learning_starts=1000, gamma=0.99, target_network_update_freq=500, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=2e6, prioritized_replay_eps=int(1e8), num_cpu=16, timesteps_std=1e6, initial_std=0.4, final_std=0.05, eval_freq=100, n_eval_episodes=10, eval_std=0.01, callback=None): """Train a branching deepq model to solve continuous control tasks via discretization. Current assumptions in the implementation: - for solving continuous control domains via discretization (can be adjusted to be compatible with naturally disceret-action domains using 'env.action_space.n') - uniform number of sub-actions per action dimension (can be generalized to heterogeneous number of sub-actions across branches) Parameters ------- env : gym.Env environment to train on q_func: (tf.Variable, int, str, bool) -> tf.Variable the model that takes the following inputs: observation_in: object the output of observation placeholder num_actions: int number of actions scope: str reuse: bool should be passed to outer variable scope and returns a tensor of shape (batch_size, num_actions) with values of every action. num_actions_pad: int number of sub-actions per action dimension (= num of discretization grains/bars + 1) lr: float learning rate for adam optimizer max_timesteps: int number of env steps to optimize for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed 0.1 for dqn-baselines exploration_final_eps: float final value of random action probability 0.02 for dqn-baselines train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor grad_norm_clipping: int set None for no clipping target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to max_timesteps. prioritized_replay_eps: float epsilon to add to the unified TD error for updating priorities. Erratum: The camera-ready copy of this paper incorrectly reported 1e-8. The value used to produece the results is 1e8. num_cpu: int number of cpus to use for training losses_version: int optimization version number dir_path: str path for logs and results to be stored in callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ sess = U.make_session(num_cpu=num_cpu) sess.__enter__() def make_obs_ph(name): return U.BatchInput(env.observation_space.shape, name=name) num_action_grains = num_actions_pad - 1 num_action_dims = env.action_space.shape[0] num_action_streams = num_action_dims num_actions = num_actions_pad * num_action_streams # total numb network outputs for action branching with one action dimension per branch act, train, update_target, debug = deepq.build_train( make_obs_ph=make_obs_ph, q_func=q_func, num_actions=num_actions, num_action_streams=num_action_streams, batch_size=batch_size, learning_rate=lr, grad_norm_clipping=grad_norm_clipping, gamma=gamma, scope="deepq", reuse=None) act_params = { 'make_obs_ph': make_obs_ph, 'q_func': q_func, 'num_actions': num_actions, 'num_action_streams': num_action_streams, } # prioritized_replay: create the replay buffer replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) # epsilon_greedy = False: just greedy policy exploration = ConstantSchedule(value=0.0) # greedy policy std_schedule = LinearSchedule(schedule_timesteps=timesteps_std, initial_p=initial_std, final_p=final_std) # Initialize the parameters and copy them to the target network. U.initialize() update_target() # Initialize the parameters used for converting branching, discrete action indeces to continuous actions low = env.action_space.low high = env.action_space.high actions_range = np.subtract(high, low) episode_rewards = [] reward_sum = 0.0 num_episodes = 0 time_steps = [0] time_spent_exploring = [0] prev_time = time.time() n_trainings = 0 # Set up on-demand rendering of Gym environments using keyboard controls: 'r'ender or 's'top import termios, fcntl, sys fd = sys.stdin.fileno() oldterm = termios.tcgetattr(fd) newattr = termios.tcgetattr(fd) newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO render = False displayed_mean_reward = None def evaluate(step, episode_number): global max_eval_reward_mean, model_saved print('Evaluate...') eval_reward_sum = 0.0 # Run evaluation episodes for eval_episode in range(n_eval_episodes): obs = env.reset() done = False while not done: # Choose action action_idxes = np.array( act(np.array(obs)[None], stochastic=False)) # deterministic actions_greedy = action_idxes / num_action_grains * actions_range + low if eval_std == 0.0: action = actions_greedy else: action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: a_stoch = np.random.normal(loc=a_greedy, scale=eval_std) a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action.append(a_stoch) out_of_range_action = False # Step obs, rew, done, _ = env.step(action) eval_reward_sum += rew # Average the rewards and log eval_reward_mean = eval_reward_sum / n_eval_episodes print(eval_reward_mean, 'over', n_eval_episodes, 'episodes') with open("results/{}_{}_eval.csv".format(time_stamp, env_name), "a") as eval_fw: eval_writer = csv.writer( eval_fw, delimiter="\t", lineterminator="\n", ) eval_writer.writerow([episode_number, step, eval_reward_mean]) if max_eval_reward_mean is None or eval_reward_mean > max_eval_reward_mean: logger.log( "Saving model due to mean eval increase: {} -> {}".format( max_eval_reward_mean, eval_reward_mean)) U.save_state(model_file) model_saved = True max_eval_reward_mean = eval_reward_mean with tempfile.TemporaryDirectory() as td: model_file = os.path.join(td, "model") evaluate(0, 0) obs = env.reset() with open("results/{}_{}.csv".format(time_stamp, env_name), "w") as fw: writer = csv.writer( fw, delimiter="\t", lineterminator="\n", ) t = -1 while True: t += 1 # Select action and update exploration probability action_idxes = np.array( act(np.array(obs)[None], update_eps=exploration.value(t))) # Convert sub-actions indexes (discrete sub-actions) to continuous controls action = action_idxes / num_action_grains * actions_range + low # epsilon_greedy = False: use Gaussian noise actions_greedy = action action_idx_stoch = [] action = [] for index in range(len(actions_greedy)): a_greedy = actions_greedy[index] out_of_range_action = True while out_of_range_action: # Sample from a Gaussian with mean at the greedy action and a std following a schedule of choice a_stoch = np.random.normal(loc=a_greedy, scale=std_schedule.value(t)) # Convert sampled cont action to an action idx a_idx_stoch = np.rint( (a_stoch + high[index]) / actions_range[index] * num_action_grains) # Check if action is in range if a_idx_stoch >= 0 and a_idx_stoch < num_actions_pad: action_idx_stoch.append(a_idx_stoch) action.append(a_stoch) out_of_range_action = False action_idxes = action_idx_stoch new_obs, rew, done, _ = env.step(action) # On-demand rendering if (t + 1) % 100 == 0: # TO DO better? termios.tcsetattr(fd, termios.TCSANOW, newattr) oldflags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK) try: try: c = sys.stdin.read(1) if c == 'r': print() print('Rendering begins...') render = True elif c == 's': print() print('Stop rendering!') render = False env.render(close=True) except IOError: pass finally: termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm) fcntl.fcntl(fd, fcntl.F_SETFL, oldflags) # Visualize Gym environment on render if render: env.render() # Store transition in the replay buffer replay_buffer.add(obs, action_idxes, rew, new_obs, float(done)) obs = new_obs reward_sum += rew if done: obs = env.reset() time_spent_exploring[-1] = int(100 * exploration.value(t)) time_spent_exploring.append(0) episode_rewards.append(reward_sum) time_steps[-1] = t reward_sum = 0.0 time_steps.append(0) # Frequently log to file writer.writerow( [len(episode_rewards), t, episode_rewards[-1]]) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer # prioritized_replay experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience td_errors = train( obses_t, actions, rewards, obses_tp1, dones, weights) #np.ones_like(rewards)) #TEMP AT NEW # prioritized_replay new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) n_trainings += 1 if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically update_target() if len(episode_rewards) == 0: mean_100ep_reward = 0 elif len(episode_rewards) < 100: mean_100ep_reward = np.mean(episode_rewards) else: mean_100ep_reward = np.mean(episode_rewards[-100:]) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) current_time = time.time() logger.record_tabular( "trainings per second", n_trainings / (current_time - prev_time)) logger.dump_tabular() n_trainings = 0 prev_time = current_time if t > learning_starts and num_episodes > 100: if displayed_mean_reward is None or mean_100ep_reward > displayed_mean_reward: if print_freq is not None: logger.log("Mean reward increase: {} -> {}".format( displayed_mean_reward, mean_100ep_reward)) displayed_mean_reward = mean_100ep_reward # Performance evaluation with a greedy policy if done and num_episodes % eval_freq == 0: evaluate(t + 1, num_episodes) obs = env.reset() # STOP training if num_episodes >= total_num_episodes: break if model_saved: logger.log("Restore model with mean eval: {}".format( max_eval_reward_mean)) U.load_state(model_file) data_to_log = { 'time_steps': time_steps, 'episode_rewards': episode_rewards, 'time_spent_exploring': time_spent_exploring } # Write to file the episodic rewards, number of steps, and the time spent exploring with open("results/{}_{}.txt".format(time_stamp, env_name), 'wb') as fp: pickle.dump(data_to_log, fp) return ActWrapper(act, act_params)
# Create the replay buffer replay_buffer = ReplayBuffer(50000) # Create the schedule for exploration starting from 1 (every action is random) down to # 0.02 (98% of actions are selected according to values predicted by the model). exploration = LinearSchedule(schedule_timesteps=10000, initial_p=1.0, final_p=0.02) # Initialize the parameters and copy them to the target network. U.initialize() update_target() episode_rewards = [0.0] obs = env.reset() # for t in itertools.count(): for t in range(100000): # Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0) is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if is_solved: # Show off the result env.render() else:
class Agent(tf.Module): def __init__(self, config, env): self.config = config self.agent_ids = [a for a in range(config.num_agents)] self.env = env # self.optimizer = tf.keras.optimizers.Adam(self.config.lr) self.optimizer = tf.keras.optimizers.Adadelta( learning_rate=self.config.lr, rho=0.95, epsilon=1e-07, name='Adadelta') self.replay_memory, self.beta_schedule = init_replay_memory(config) self.model = init_network_2(config) self.target_model = init_network_2(config) self.model.summary() # tf.keras.utils.plot_model(self.model, to_file='./model.png') if self.config.dueling: self.agent_heads = self.build_agent_heads_dueling() self.target_agent_heads = self.build_agent_heads_dueling() else: self.agent_heads = self.build_agent_heads() self.target_agent_heads = self.build_agent_heads() self.agent_heads[0].summary() # tf.keras.utils.plot_model(self.agent_heads[0], to_file='./agent_heads_model.png') # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule(schedule_timesteps=int( config.exploration_fraction * config.num_timesteps), initial_p=1.0, final_p=config.exploration_final_eps) if config.load_path is not None: self.load_models(config.load_path) self.loss = self.nstep_loss self.eps = tf.Variable(0.0) self.one_hot_agents = tf.expand_dims(tf.one_hot(self.agent_ids, len(self.agent_ids), dtype=tf.float32), axis=1) print(f'self.onehot_agent.shape is {self.one_hot_agents.shape}') self.initialize_dummy_variabels() def initialize_dummy_variabels(self): self.dummy_nstep_obs = tf.zeros( ((self.config.n_steps - 1), *self.config.obs_shape), dtype=tf.float32) self.dummy_fps = tf.zeros( (1, 1, (self.config.num_agents - 1) * self.config.num_actions + self.config.num_extra_data)) # print(f'self.dummy_fps.shape is {self.dummy_fps.shape}') self.dummy_done_mask = tf.zeros((1, 1)) # print(f'tile done_mask shape is : {tf.tile(self.dummy_done_mask, (2, 1, 1)).shape}') def build_agent_heads(self): """ :return: list of heads for agents - gets tensorflow model and adds heads for each agent """ input_shape = self.model.output_shape[-1] print(input_shape) heads = [] inputs = tf.keras.layers.Input(input_shape) for a in self.agent_ids: name = 'head_agent_' + str(a) head_a = tf.keras.layers.Dense( units=self.config.num_actions, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name=name)(inputs) head_a = tf.keras.Model(inputs=inputs, outputs=head_a) heads.append(head_a) return heads def build_agent_heads_dueling(self): """ :return: list of heads for agents - gets tensorflow model and adds heads for each agent """ input_shape = self.model.output_shape[-1] print(input_shape) heads = [] inputs = tf.keras.layers.Input(input_shape) for a in self.agent_ids: name = 'head_agent_' + str(a) with tf.name_scope(f'action_value_{name}'): action_head_a = tf.keras.layers.Dense( units=self.config.num_actions, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name='action_' + name)(inputs) with tf.name_scope(f'state_value_{name}'): state_head_a = tf.keras.layers.Dense( units=1, activation=None, kernel_initializer=tf.keras.initializers.Orthogonal(1.0), bias_initializer=tf.keras.initializers.Constant(0.0), name='state_' + name)(inputs) action_scores_mean = tf.reduce_mean(action_head_a, 1) action_scores_centered = action_head_a - tf.expand_dims( action_scores_mean, 1) head_a = state_head_a + action_scores_centered head_a = tf.keras.Model(inputs=inputs, outputs=head_a) heads.append(head_a) return heads @tf.function def choose_action(self, obs, stochastic=True, update_eps=-1): """ :param obs: list observations one for each agent :param stochastic: True for Train phase and False for test phase :param update_eps: epsilon update for eps-greedy :return: actions: list of actions chosen by agents based on observation one for each agent """ actions = [] fps = [] for a in self.agent_ids: # print(f'tf.expand_dims(obs[a], 0), {tf.expand_dims(obs[a], 0).shape}') inputs = { '0': tf.expand_dims(tf.expand_dims(obs[a], 0), 0), '1': self.one_hot_agents[a], '2': self.dummy_fps, '3': self.dummy_done_mask } fc_values = self.model(inputs) # print(f'fc_values.shape {fc_values.shape}') q_values = self.agent_heads[a](fc_values[:, -1, :]) # [:, -1, :] fps.append(q_values.numpy().tolist()[0]) # print(f'q_values.shape {q_values.shape}') deterministic_actions = tf.argmax(q_values, axis=1) # print(f'deterministic_actions {deterministic_actions}') batch_size = 1 random_actions = tf.random.uniform(tf.stack([batch_size]), minval=0, maxval=self.config.num_actions, dtype=tf.int64) # print(f'random_actions {random_actions}') chose_random = tf.random.uniform( tf.stack([batch_size ]), minval=0, maxval=1, dtype=tf.float32) < self.eps # print(f'chose_random {chose_random}') stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) # print(f'stochastic_actions {stochastic_actions}') if stochastic: actions.append(stochastic_actions.numpy()[0]) else: actions.append(deterministic_actions.numpy()[0]) if update_eps >= 0: self.eps.assign(update_eps) # print(f'actions {actions}') return actions, fps @tf.function def value(self, obs): """ :param obs: list observations one for each agent :return: best values based on Q-Learning formula max Q(s',a') """ values = [] for a in self.agent_ids: # print(f'tf.expand_dims(obs[a], 0), {tf.expand_dims(obs[a], 0).shape}') inputs = { '0': tf.expand_dims(tf.expand_dims(obs[a], 0), 0), '1': self.one_hot_agents[a], '2': self.dummy_fps, '3': self.dummy_done_mask } fc_values = self.target_model(inputs) q_values = self.target_agent_heads[a]( fc_values[:, -1, :]) # [:, -1, :] if self.config.double_q: fc_values_using_online_net = self.model(inputs) q_values_using_online_net = self.agent_heads[a]( fc_values_using_online_net[:, -1, :]) # [:, -1, :] q_value_best_using_online_net = tf.argmax( q_values_using_online_net, 1) q_tp1_best = tf.reduce_sum( q_values * tf.one_hot(q_value_best_using_online_net, self.config.num_actions, dtype=tf.float32), 1) else: q_tp1_best = tf.reduce_max(q_values, 1) values.append(q_tp1_best.numpy()[0]) return values @tf.function() def nstep_loss(self, obses_t_a, actions_a, rewards_a, dones_a, weights_a, fps_a, agent_id): # print(f'obses_t_a.shape {obses_t_a.shape}') s = obses_t_a.shape # obses_t_a = tf.reshape(obses_t_a, (s[0] * s[1], *s[2:])) # s = fps_a.shape # fps_a = tf.reshape(fps_a, (s[0] * s[1], *s[2:])) # s = dones_a.shape # dones_a = tf.reshape(dones_a, (s[0], s[1])) # s = actions_a.shape # actions_a = tf.reshape(actions_a, (s[0] * s[1], *s[2:])) # s = rewards_a.shape # rewards_a = tf.reshape(rewards_a, (s[0] * s[1], *s[2:])) # s = weights_a.shape # weights_a = tf.reshape(weights_a, (s[0] * s[1], *s[2:])) inputs_a = { '0': obses_t_a, '1': tf.tile(self.one_hot_agents[agent_id], (s[0] * s[1], 1)), '2': fps_a, '3': dones_a } fc_values = self.model(inputs_a) s = fc_values.shape # print(f'fc_values.shape {fc_values.shape}') # fc_values = tf.reshape(fc_values, (s[0] * s[1], *s[2:])) q_t = self.agent_heads[agent_id](fc_values[:, -1, :]) q_t_selected = tf.reduce_sum( q_t * tf.one_hot( actions_a[:, -1], self.config.num_actions, dtype=tf.float32), 1) # print(f'q_t_selected.shape is {q_t_selected.shape}') td_error = q_t_selected - tf.stop_gradient(rewards_a[:, -1]) errors = huber_loss(td_error) weighted_loss = tf.reduce_mean(weights_a[:, -1] * errors) return weighted_loss, td_error @tf.function() def train(self, obses_t, actions, rewards, dones, weights, fps): td_errors = [] loss = [] # print(f'obses_t.shape {obses_t.shape}') with tf.GradientTape() as tape: for a in self.agent_ids: loss_a, td_error = self.loss(obses_t[:, a], actions[:, a], rewards[:, a], dones[:, a], weights[:, a], fps[:, a], a) loss.append(loss_a) td_errors.append(td_error) sum_loss = tf.reduce_sum(loss) sum_td_error = tf.reduce_sum(td_error) # print(f'sum_loss is {sum_loss}, loss is {loss}') param = self.model.trainable_variables for a in self.agent_ids: param += self.agent_heads[a].trainable_variables # print(f'param {param}') grads = tape.gradient(sum_loss, param) if self.config.grad_norm_clipping: clipped_grads = [] for grad in grads: clipped_grads.append( tf.clip_by_norm(grad, self.config.grad_norm_clipping)) grads = clipped_grads grads_and_vars = list(zip(grads, param)) self.optimizer.apply_gradients(grads_and_vars) return sum_loss.numpy(), sum_td_error.numpy() @tf.function(autograph=False) def update_target(self): for var, var_target in zip(self.model.trainable_variables, self.target_model.trainable_variables): var_target.assign(var) vars_, target_vars = [], [] for a in self.agent_ids: vars_.extend(self.agent_heads[a].trainable_variables) target_vars.extend(self.target_agent_heads[a].trainable_variables) for var, var_target in zip(vars_, target_vars): var_target.assign(var) @tf.function(autograph=False) def soft_update_target(self): for var, var_target in zip(self.model.trainable_variables, self.target_model.trainable_variables): var_target.assign(self.config.tau * var + (1.0 - self.config.tau) * var_target) vars, target_vars = [], [] for a in self.agent_ids: vars.extend(self.agent_heads[a].trainable_variables) target_vars.extend(self.target_agent_heads[a].trainable_variables) for var, var_target in zip(vars, target_vars): var_target.assign(self.config.tau * var + (1.0 - self.config.tau) * var_target) def save(self, save_path): self.model.save_weights(f'{save_path}/value_network.h5') self.target_model.save_weights(f'{save_path}/target_network.h5') for a in self.agent_ids: self.agent_heads[a].save_weights(f'{save_path}/agent_{a}_head.h5') self.target_agent_heads[a].save_weights( f'{save_path}/target_agent_{a}_head.h5') def load(self, load_path): self.model.load_weights(f'{load_path}/value_network.h5') self.target_model.load_weights(f'{load_path}/target_network.h5') for a in self.agent_ids: self.agent_heads[a].load_weights(f'{load_path}/agent_{a}_head.h5') self.target_agent_heads[a].load_weights( f'{load_path}/target_agent_{a}_head.h5') def learn(self): self.soft_update_target() episode_rewards = [0.0] obs = self.env.reset() done = False # Start total timer tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] for t in range(self.config.num_timesteps): update_eps = tf.constant(self.exploration.value(t)) if t % (self.config.print_freq) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'eps_update {self.exploration.value(t)} -- time {t - self.config.print_freq} to {t} steps: {nseconds} ' ) mb_obs, mb_rewards, mb_actions, mb_fps, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for nstep in range(self.config.n_steps): actions, fps_ = self.choose_action(tf.constant( obs, dtype=tf.float32), update_eps=update_eps) # print(f'actions is {actions}') # print(f'fps_ is {fps_}') fps = [] if self.config.num_agents > 1: for a in self.agent_ids: fp = fps_[:a] fp.extend(fps_[a + 1:]) fp_a = np.concatenate( (fp, [[self.exploration.value(t) * 100, t]]), axis=None) fps.append(fp_a) mb_obs.append(obs.copy()) mb_actions.append(actions) mb_fps.append(fps) mb_dones.append([float(done) for _ in self.agent_ids]) obs1, rews, done, info = self.env.step(actions) if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_rewards.append(rews) obs = obs1 maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f'mb_masks.shape is {mb_masks.shape}') # print(f'mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.value(tf.constant(obs1, dtype=tf.float32)) # print(f'last_values {last_values}') for n, (rewards, dones, value) in enumerate( zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[n] = rewards # print(f'after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add( (mb_obs, mb_actions, mb_rewards, obs1, mb_masks, mb_fps)) if t > self.config.learning_starts and t % self.config.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, fps, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None obses_t = tf.constant(obses_t, dtype=tf.float32) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) fps = tf.constant(fps) loss, td_errors = self.train(obses_t, actions, rewards, dones, weights, fps) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. self.soft_update_target() if t % self.config.playing_test == 0 and t != 0: self.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[ 1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular() def play_test_games(self): num_tests = self.config.num_tests test_env = init_env(self.config, mode='test') test_rewards = np.zeros(num_tests) for i in range(num_tests): test_done = False test_obs_all = test_env.reset() # print(np.asarray(test_obs_all).shape) while not test_done: test_obs_all = tf.constant(test_obs_all, dtype=tf.float32) test_action_list, _ = self.choose_action(test_obs_all, stochastic=False) test_new_obs_list, test_rew_list, test_done, _ = test_env.step( test_action_list) test_obs_all = test_new_obs_list if test_done: test_rewards[i] = np.mean(test_rew_list) print( f'test_rewards: {test_rewards} \n mean reward of {num_tests} tests: {np.mean(test_rewards)}' ) test_env.close()
resume=True, mode="evaluation", write_upon_reset=True) steps, total_return = play_once(demo_env, 0.05, render=True) print("Demo for %d steps, Return %d" % (steps, total_return)) summary = tf.Summary() summary.value.add(tag="demo/return", simple_value=total_return) summary.value.add(tag="demo/steps", simple_value=steps) demo_env.close() return summary linear_schedule = LinearSchedule(int(EPSILON_STEPS), final_p=EPSILON_MIN, initial_p=EPSILON_MAX) epsilon = linear_schedule.value(session.run(global_step)) # Populate replay buffer print("Populating replay buffer with epsilon %f..." % epsilon) while MINIMAL_SAMPLES > replay_buffer.number_of_samples(): steps, total_return = play_once(env, epsilon, render=False) print("Played %d < %d steps" % (replay_buffer.number_of_samples(), MINIMAL_SAMPLES)) # Main loop print("Start Main Loop...") for n in range(ITERATIONS): gstep = tf.train.global_step(session, global_step) epsilon = linear_schedule.value(gstep) steps, total_return = play_once(env, epsilon) t0 = datetime.now() train_summary = train(steps)
def main(env_name, train_freq=1, target_update_freq=1000, batch_size=32, train_after=64, final_gamma=0.02, max_timesteps=2000000, buffer_size=10000, prioritized_replay_alpha=0.6, prioritized_replay_beta=0.4, prioritized_replay_eps=1e-6, log_freq=1, checkpoint_freq=10000): env = gym.make(env_name) env = wrap_env(env) state_dim = (4, 84, 84) action_dim = env.action_space.n agent = LearningAgent(state_dim, action_dim) logger = agent.writer eps_sched = LinearSchedule(1.0, final_gamma, max_timesteps) beta_sched = LinearSchedule(prioritized_replay_beta, 1.0, max_timesteps) replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) try: obs = env.reset() episode = 0 rewards = 0 steps = 0 for t in range(max_timesteps): # Take action and update exploration to newest value action = agent.act(obs, epsilon=eps_sched.value(t)) obs_, reward, done, _ = env.step(action) # Store transition in replay buffer replay_buffer.add(obs, action, reward, obs_, float(done)) obs = obs_ rewards += reward if done: steps = t - steps episode += 1 obs = env.reset() if t > train_after and (t % train_freq) == 0: print('Training...') # Minimize the error in Bellman's equation on a batch sampled from replay buffer experience = replay_buffer.sample(batch_size, beta=beta_sched.value(t)) (s, a, r, s_, t, weights, batch_idxes) = experience td_errors = agent.train(s, a, r, s_, t, weights) new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > train_after and (t % target_update_freq) == 0: agent.update_target() if done and (episode % log_freq) == 0: logger.write_value('rewards', rewards, episode) logger.write_value('steps', steps, episode) logger.write_value('epsilon', eps_sched.value(t), episode) agent.trainer.summarize_training_progress() logger.flush() rewards = 0 steps = t if t > train_after and (t % checkpoint_freq) == 0: agent.checkpoint('model_{}.chkpt'.format(t)) finally: agent.save_model('model.dnn')
def learn(env, network, seed=None, lr=5e-5, total_timesteps=100000, buffer_size=500000, exploration_fraction=0.1, exploration_final_eps=0.01, train_freq=1, batch_size=32, print_freq=10, checkpoint_freq=100000, checkpoint_path=None, learning_starts=0, gamma=0.99, target_network_update_freq=10000, prioritized_replay=True, prioritized_replay_alpha=0.4, prioritized_replay_beta0=0.6, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-3, param_noise=False, callback=None, load_path=None, load_idx=None, demo_path=None, n_step=10, demo_prioritized_replay_eps=1.0, pre_train_timesteps=750000, epsilon_schedule="constant", **network_kwargs): # Create all the functions necessary to train the model set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) with tf.device('/GPU:0'): model = DQfD(q_func=q_func, observation_shape=env.observation_space.shape, num_actions=env.action_space.n, lr=lr, grad_norm_clipping=10, gamma=gamma, param_noise=param_noise) # Load model from checkpoint if load_path is not None: load_path = osp.expanduser(load_path) ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None) if load_idx is None: ckpt.restore(manager.latest_checkpoint) print("Restoring from {}".format(manager.latest_checkpoint)) else: ckpt.restore(manager.checkpoints[load_idx]) print("Restoring from {}".format(manager.checkpoints[load_idx])) # Setup demo trajectory assert demo_path is not None with open(demo_path, "rb") as f: trajectories = pickle.load(f) # Create the replay buffer replay_buffer = PrioritizedReplayBuffer(buffer_size, prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) temp_buffer = deque(maxlen=n_step) is_demo = True for epi in trajectories: for obs, action, rew, new_obs, done in epi: obs, new_obs = np.expand_dims( np.array(obs), axis=0), np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.demo_len += 1 replay_buffer.add(*n_step_sample) else: replay_buffer.demo_len += 1 replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), float(is_demo)) logger.log("trajectory length:", replay_buffer.demo_len) # Create the schedule for exploration if epsilon_schedule == "constant": exploration = ConstantSchedule(exploration_final_eps) else: # not used exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) model.update_target() # ============================================== pre-training ====================================================== start = time() num_episodes = 0 temp_buffer = deque(maxlen=n_step) for t in tqdm(range(pre_train_timesteps)): # sample and train experience = replay_buffer.sample(batch_size, beta=prioritized_replay_beta0) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) # Update priorities new_priorities = np.abs(td_errors) + np.abs( n_td_errors) + demo_prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # Update target network periodically if t > 0 and t % target_network_update_freq == 0: model.update_target() # Logging elapsed_time = timedelta(time() - start) if print_freq is not None and t % 10000 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", 0) logger.record_tabular("max 100 episode reward", 0) logger.record_tabular("min 100 episode reward", 0) logger.record_tabular("demo sample rate", 1) logger.record_tabular("epsilon", 0) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", True) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() # ============================================== exploring ========================================================= sample_counts = 0 demo_used_counts = 0 episode_rewards = deque(maxlen=100) this_episode_reward = 0. best_score = 0. saved_mean_reward = None is_demo = False obs = env.reset() # Always mimic the vectorized env obs = np.expand_dims(np.array(obs), axis=0) reset = True for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = tf.constant(exploration.value(t)) update_param_noise_threshold = 0. else: # not used update_eps = tf.constant(0.) update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action, epsilon, _, _ = model.step(tf.constant(obs), update_eps=update_eps, **kwargs) action = action[0].numpy() reset = False new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. new_obs = np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.add(*n_step_sample) else: replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), 0.) obs = new_obs # invert log scaled score for logging this_episode_reward += np.sign(rew) * (np.exp(np.sign(rew) * rew) - 1.) if done: num_episodes += 1 obs = env.reset() obs = np.expand_dims(np.array(obs), axis=0) episode_rewards.append(this_episode_reward) reset = True if this_episode_reward > best_score: best_score = this_episode_reward ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, './best_model', max_to_keep=1) manager.save(t) logger.log("saved best model") this_episode_reward = 0.0 if t % train_freq == 0: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) new_priorities = np.abs(td_errors) + np.abs( n_td_errors ) + demo_prioritized_replay_eps * is_demos + prioritized_replay_eps * ( 1. - is_demos) replay_buffer.update_priorities(batch_idxes, new_priorities) # for logging sample_counts += batch_size demo_used_counts += np.sum(is_demos) if t % target_network_update_freq == 0: # Update target network periodically. model.update_target() if t % checkpoint_freq == 0: save_path = checkpoint_path ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, save_path, max_to_keep=10) manager.save(t) logger.log("saved checkpoint") elapsed_time = timedelta(time() - start) if done and num_episodes > 0 and num_episodes % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", np.mean(episode_rewards)) logger.record_tabular("max 100 episode reward", np.max(episode_rewards)) logger.record_tabular("min 100 episode reward", np.min(episode_rewards)) logger.record_tabular("demo sample rate", demo_used_counts / sample_counts) logger.record_tabular("epsilon", epsilon.numpy()) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", False) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() return model
class SarsaAgent(object): def __init__(self, args, env, writer=None): """ init the agent here """ self.eval_env = copy.deepcopy(env) self.args = args self.state_dim = env.reset().shape self.action_dim = env.action_space.n self.device = torch.device("cuda" if ( torch.cuda.is_available() and self.args.gpu) else "cpu") # set the same random seed in the main launcher random.seed(self.args.seed) torch.manual_seed(self.args.seed) np.random.seed(self.args.seed) if self.args.gpu: torch.cuda.manual_seed(self.args.seed) self.writer = writer if self.args.env_name == "grid": self.dqn = OneHotDQN(self.state_dim, self.action_dim).to(self.device) self.dqn_target = OneHotDQN(self.state_dim, self.action_dim).to(self.device) else: raise Exception("not implemented yet!") # copy parameters self.dqn_target.load_state_dict(self.dqn.state_dict()) self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=self.args.lr) # for actors def make_env(): def _thunk(): env = create_env(args) return env return _thunk envs = [make_env() for i in range(self.args.num_envs)] self.envs = SubprocVecEnv(envs) # create epsilon and beta schedule # NOTE: hardcoded for now self.eps_decay = LinearSchedule(50000 * 200, 0.01, 1.0) # self.eps_decay = LinearSchedule(self.args.num_episodes * 200, 0.01, 1.0) self.total_steps = 0 self.num_episodes = 0 # for storing resutls self.results_dict = { "train_rewards": [], "train_constraints": [], "eval_rewards": [], "eval_constraints": [], } self.cost_indicator = "none" if "grid" in self.args.env_name: self.cost_indicator = 'pit' else: raise Exception("not implemented yet") self.eps = self.eps_decay.value(self.total_steps) def pi(self, state, greedy_eval=False): """ take the action based on the current policy """ with torch.no_grad(): # to take random action or not if (random.random() > self.eps_decay.value( self.total_steps)) or greedy_eval: q_value = self.dqn(state) # chose the max/greedy actions action = q_value.max(1)[1].cpu().numpy() else: action = np.random.randint(0, high=self.action_dim, size=(self.args.num_envs, )) return action def compute_n_step_returns(self, next_value, rewards, masks): """ n-step SARSA returns """ R = next_value returns = [] for step in reversed(range(len(rewards))): R = rewards[step] + self.args.gamma * R * masks[step] returns.insert(0, R) return returns def log_episode_stats(self, ep_reward, ep_constraint): """ log the stats for environment performance """ # log episode statistics self.results_dict["train_rewards"].append(ep_reward) self.results_dict["train_constraints"].append(ep_constraint) if self.writer: self.writer.add_scalar("Return", ep_reward, self.num_episodes) self.writer.add_scalar("Constraint", ep_constraint, self.num_episodes) log( 'Num Episode {}\t'.format(self.num_episodes) + \ 'E[R]: {:.2f}\t'.format(ep_reward) +\ 'E[C]: {:.2f}\t'.format(ep_constraint) +\ 'avg_train_reward: {:.2f}\t'.format(np.mean(self.results_dict["train_rewards"][-100:])) +\ 'avg_train_constraint: {:.2f}\t'.format(np.mean(self.results_dict["train_constraints"][-100:])) ) def run(self): """ Learning happens here """ self.total_steps = 0 self.eval_steps = 0 # reset state and env # reset exploration porcess state = self.envs.reset() prev_state = state ep_reward = 0 ep_len = 0 ep_constraint = 0 start_time = time.time() while self.num_episodes < self.args.num_episodes: values = [] c_q_vals = [] c_r_vals = [] states = [] actions = [] mus = [] prev_states = [] rewards = [] done_masks = [] begin_masks = [] constraints = [] # n-step sarsa for _ in range(self.args.traj_len): state = torch.FloatTensor(state).to(self.device) # get the action action = self.pi(state) next_state, reward, done, info = self.envs.step(action) # convert it back to tensor action = torch.LongTensor(action).unsqueeze(1).to(self.device) q_values = self.dqn(state) Q_value = q_values.gather(1, action) # logging mode for only agent 1 ep_reward += reward[0] ep_constraint += info[0][self.cost_indicator] values.append(Q_value) rewards.append( torch.FloatTensor(reward).unsqueeze(1).to(self.device)) done_masks.append( torch.FloatTensor(1 - done).unsqueeze(1).to(self.device)) begin_masks.append( torch.FloatTensor([ci['begin'] for ci in info ]).unsqueeze(1).to(self.device)) constraints.append( torch.FloatTensor([ci[self.cost_indicator] for ci in info ]).unsqueeze(1).to(self.device)) prev_states.append(prev_state) states.append(state) actions.append(action) # update states prev_state = state state = next_state self.total_steps += (1 * self.args.num_envs) # hack to reuse the same code # iteratively add each done episode, so that can eval at regular interval for _ in range(done.sum()): if done[0]: if self.num_episodes % self.args.log_every == 0: self.log_episode_stats(ep_reward, ep_constraint) # reset the rewards anyways ep_reward = 0 ep_constraint = 0 self.num_episodes += 1 # eval the policy here after eval_every steps if self.num_episodes % self.args.eval_every == 0: eval_reward, eval_constraint = self.eval() self.results_dict["eval_rewards"].append(eval_reward) self.results_dict["eval_constraints"].append( eval_constraint) log('----------------------------------------') log('Eval[R]: {:.2f}\t'.format(eval_reward) +\ 'Eval[C]: {}\t'.format(eval_constraint) +\ 'Episode: {}\t'.format(self.num_episodes) +\ 'avg_eval_reward: {:.2f}\t'.format(np.mean(self.results_dict["eval_rewards"][-10:])) +\ 'avg_eval_constraint: {:.2f}\t'.format(np.mean(self.results_dict["eval_constraints"][-10:])) ) log('----------------------------------------') if self.writer: self.writer.add_scalar("eval_reward", eval_reward, self.eval_steps) self.writer.add_scalar("eval_constraint", eval_constraint, self.eval_steps) self.eval_steps += 1 # break here if self.num_episodes >= self.args.num_episodes: break # calculate targets here next_state = torch.FloatTensor(next_state).to(self.device) next_q_values = self.dqn(next_state) next_action = self.pi(next_state) next_action = torch.LongTensor(next_action).unsqueeze(1).to( self.device) next_q_values = next_q_values.gather(1, next_action) # calculate targets target_Q_vals = self.compute_n_step_returns( next_q_values, rewards, done_masks) Q_targets = torch.cat(target_Q_vals).detach() Q_values = torch.cat(values) # bias corrected loss loss = F.mse_loss(Q_values, Q_targets) self.optimizer.zero_grad() loss.backward() self.optimizer.step() # done with all the training # save the models self.save_models() def eval(self): """ evaluate the current policy and log it """ avg_reward = [] avg_constraint = [] with torch.no_grad(): for _ in range(self.args.eval_n): state = self.eval_env.reset() done = False ep_reward = 0 ep_constraint = 0 ep_len = 0 start_time = time.time() while not done: # convert the state to tensor state_tensor = torch.FloatTensor(state).to( self.device).unsqueeze(0) # get the policy action action = self.pi(state_tensor, greedy_eval=True)[0] next_state, reward, done, info = self.eval_env.step(action) ep_reward += reward ep_len += 1 ep_constraint += info[self.cost_indicator] # update the state state = next_state avg_reward.append(ep_reward) avg_constraint.append(ep_constraint) return np.mean(avg_reward), np.mean(avg_constraint) def save_models(self): """create results dict and save""" models = { "dqn": self.dqn.state_dict(), "env": copy.deepcopy(self.eval_env), } torch.save(models, os.path.join(self.args.out, 'models.pt')) torch.save(self.results_dict, os.path.join(self.args.out, 'results_dict.pt')) def load_models(self): models = torch.load(os.path.join(self.args.out, 'models.pt')) self.dqn.load_state_dict(models["dqn"]) self.eval_env = models["env"]
class DQN: """ This baseline solves the problem using standard q-learning over the cross product between the RM and the MDP """ def __init__(self, sess, policy_name, learning_params, curriculum, num_features, num_states, num_actions): # initialize attributes self.sess = sess self.learning_params = learning_params self.use_double_dqn = learning_params.use_double_dqn self.use_priority = learning_params.prioritized_replay self.policy_name = policy_name self.tabular_case = learning_params.tabular_case # This proxy adds the machine state representation to the MDP state self.feature_proxy = FeatureProxy(num_features, num_states, self.tabular_case) self.num_actions = num_actions self.num_features = self.feature_proxy.get_num_features() # create dqn network self._create_network(learning_params.lr, learning_params.gamma, learning_params.num_neurons, learning_params.num_hidden_layers) # create experience replay buffer if self.use_priority: self.replay_buffer = PrioritizedReplayBuffer( learning_params.buffer_size, alpha=learning_params.prioritized_replay_alpha) if learning_params.prioritized_replay_beta_iters is None: learning_params.prioritized_replay_beta_iters = curriculum.total_steps self.beta_schedule = LinearSchedule( learning_params.prioritized_replay_beta_iters, initial_p=learning_params.prioritized_replay_beta0, final_p=1.0) else: self.replay_buffer = ReplayBuffer(learning_params.buffer_size) self.beta_schedule = None # count of the number of environmental steps self.step = 0 def _create_network(self, lr, gamma, num_neurons, num_hidden_layers): total_features = self.num_features total_actions = self.num_actions # Inputs to the network self.s1 = tf.placeholder(tf.float64, [None, total_features]) self.a = tf.placeholder(tf.int32) self.r = tf.placeholder(tf.float64) self.s2 = tf.placeholder(tf.float64, [None, total_features]) self.done = tf.placeholder(tf.float64) self.IS_weights = tf.placeholder( tf.float64) # Importance sampling weights for prioritized ER # Creating target and current networks with tf.variable_scope( self.policy_name ): # helps to give different names to this variables for this network # Defining regular and target neural nets if self.tabular_case: with tf.variable_scope("q_network") as scope: q_values, _ = create_linear_regression( self.s1, total_features, total_actions) scope.reuse_variables() q_target, _ = create_linear_regression( self.s2, total_features, total_actions) else: with tf.variable_scope("q_network") as scope: q_values, q_values_weights = create_net( self.s1, total_features, total_actions, num_neurons, num_hidden_layers) if self.use_double_dqn: scope.reuse_variables() q2_values, _ = create_net(self.s2, total_features, total_actions, num_neurons, num_hidden_layers) with tf.variable_scope("q_target"): q_target, q_target_weights = create_net( self.s2, total_features, total_actions, num_neurons, num_hidden_layers) self.update_target = create_target_updates( q_values_weights, q_target_weights) # Q_values -> get optimal actions self.best_action = tf.argmax(q_values, 1) # Optimizing with respect to q_target action_mask = tf.one_hot(indices=self.a, depth=total_actions, dtype=tf.float64) q_current = tf.reduce_sum(tf.multiply(q_values, action_mask), 1) if self.use_double_dqn: # DDQN best_action_mask = tf.one_hot(indices=tf.argmax(q2_values, 1), depth=total_actions, dtype=tf.float64) q_max = tf.reduce_sum(tf.multiply(q_target, best_action_mask), 1) else: # DQN q_max = tf.reduce_max(q_target, axis=1) # Computing td-error and loss function q_max = q_max * (1.0 - self.done ) # dead ends must have q_max equal to zero q_target_value = self.r + gamma * q_max q_target_value = tf.stop_gradient(q_target_value) if self.use_priority: # prioritized experience replay self.td_error = q_current - q_target_value huber_loss = 0.5 * tf.square(self.td_error) # without clipping loss = tf.reduce_mean( self.IS_weights * huber_loss) # weights fix bias in case of using priorities else: # standard experience replay loss = 0.5 * tf.reduce_sum( tf.square(q_current - q_target_value)) # Defining the optimizer if self.tabular_case: optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr) else: optimizer = tf.train.AdamOptimizer(learning_rate=lr) self.train = optimizer.minimize(loss=loss) # Initializing the network values self.sess.run(tf.variables_initializer(self._get_network_variables())) self.update_target_network() # copying weights to target net def _train(self, s1, a, r, s2, done, IS_weights): if self.use_priority: _, td_errors = self.sess.run( [self.train, self.td_error], { self.s1: s1, self.a: a, self.r: r, self.s2: s2, self.done: done, self.IS_weights: IS_weights }) else: self.sess.run(self.train, { self.s1: s1, self.a: a, self.r: r, self.s2: s2, self.done: done }) td_errors = None return td_errors def get_number_features(self): return self.num_features def learn(self): if self.use_priority: experience = self.replay_buffer.sample( self.learning_params.batch_size, beta=self.beta_schedule.value(self.get_step())) s1, a, r, s2, done, weights, batch_idxes = experience else: s1, a, r, s2, done = self.replay_buffer.sample( self.learning_params.batch_size) weights, batch_idxes = None, None td_errors = self._train(s1, a, r, s2, done, weights) # returns the absolute td_error if self.use_priority: new_priorities = np.abs( td_errors) + self.learning_params.prioritized_replay_eps self.replay_buffer.update_priorities(batch_idxes, new_priorities) def add_experience(self, s1, u1, a, r, s2, u2, done): s1 = self.feature_proxy.add_state_features(s1, u1) s2 = self.feature_proxy.add_state_features(s2, u2) self.replay_buffer.add(s1, a, r, s2, done) def get_step(self): return self.step def add_step(self): self.step += 1 def get_best_action(self, s1, u1): s1 = self.feature_proxy.add_state_features(s1, u1).reshape( (1, self.num_features)) return self.sess.run(self.best_action, {self.s1: s1}) def update_target_network(self): if not self.tabular_case: self.sess.run(self.update_target) def _get_network_variables(self): return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.policy_name)
class Learn: def __init__(self, config, env): # super().__init__() self.config = config self.env = env self.agent_ids = self.get_agent_ids() self.replay_memory, self.beta_schedule = self.init_replay_memory() self.optimizer = tf.keras.optimizers.Adam(self.config.lr) # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule(schedule_timesteps=int( config.exploration_fraction * config.num_timesteps), initial_p=1.0, final_p=config.exploration_final_eps) self.eps = tf.Variable(0.0) self.models, self.target_models = self._init_networks() self.agents = [ Agent(config, self.models[agent_id], self.target_models[agent_id], agent_id) for agent_id in self.agent_ids ] self.support_z = np.linspace(-5.0, 5.0, self.config.atoms) self.fps_zeros = np.zeros( (self.config.num_agents, self.config.fp_shape)) def _init_networks(self): network = Network(self.config, self.agent_ids) # base_model = network.init_base_model() # target_base_model = network.init_base_model() return network.build_models("learn_"), network.build_models("target_") def get_agent_ids(self): return [agent_id for agent_id in range(self.config.num_agents)] def init_replay_memory(self): """ :return: replay_buffer, beta_schedule """ if self.config.prioritized_replay: replay_buffer = PrioritizedReplayBuffer( self.config.buffer_size, alpha=self.config.prioritized_replay_alpha, n_steps=self.config.n_steps) if self.config.prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = self.config.num_timesteps beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=self.config.prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(self.config.buffer_size, self.config.n_steps) beta_schedule = None return replay_buffer, beta_schedule @tf.function def get_actions(self, obs, stochastic=True, update_eps=-1): """ :param obs: observation for all agents :param stochastic: True for Train phase and False for test phase :param update_eps: epsilon update for eps-greedy :return: actions, q_values of all agents as fps """ deterministic_actions = [] fps = [] for agent_id in self.agent_ids: if self.config.distributionalRL: deterministic_action, fp = self.agents[ agent_id].greedy_action_dist(obs[agent_id]) else: deterministic_action, fp = self.agents[agent_id].greedy_action( obs[agent_id]) deterministic_actions.append(deterministic_action) fps.append(fp) # print(f' deterministic_actions {deterministic_actions}') # print(f' fps {fps}') random_actions = tf.random.uniform(tf.stack([self.config.num_agents]), minval=0, maxval=self.config.num_actions, dtype=tf.int64) # print(f' random_actions {random_actions}') chose_random = tf.random.uniform(tf.stack([self.config.num_agents]), minval=0, maxval=1, dtype=tf.float32) < self.eps # print(f' chose_random {chose_random}') stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions) # print(f' stochastic_actions.numpy() {stochastic_actions.numpy()}') if stochastic: actions = stochastic_actions.numpy() else: actions = deterministic_actions if update_eps >= 0: self.eps.assign(update_eps) # print(f' actions {actions}') return actions, fps @tf.function def get_max_values(self, obs): """ :param obs: list observations one for each agent :return: best values based on Q-Learning formula maxQ(s',a') """ best_q_vals = [] for agent_id in self.agent_ids: if self.config.distributionalRL: best_q_val = self.agents[agent_id].max_value_dist( obs[agent_id]) else: best_q_val = self.agents[agent_id].max_value(obs[agent_id]) best_q_vals.append(best_q_val) # print(f' best_q_vals.numpy() {best_q_vals.numpy()}') return best_q_vals @tf.function def compute_loss(self, obses_t, actions, rewards, obses_tp1, dones, weights, fps=None): """ :param obses_t: list observations one for each agent :param actions: :param rewards: :param dones: :param weights: :param fps: :return: loss and td errors tensor list one for each agent """ # print(f' obses_t.shape {obses_t.shape}') losses = [] td_errors = np.zeros(self.config.batch_size) for agent_id in self.agent_ids: if self.config.distributionalRL: loss, td_error = self.agents[agent_id].compute_loss_dist( obses_t[agent_id], actions[agent_id], rewards[agent_id], obses_tp1[agent_id], dones[agent_id], weights[agent_id], fps[agent_id]) else: loss, td_error = self.agents[agent_id].compute_loss( obses_t[agent_id], actions[agent_id], rewards[agent_id], obses_tp1[agent_id], dones[agent_id], weights[agent_id], fps[agent_id]) losses.append(loss) td_errors += td_error return losses, td_errors @tf.function() def train(self, obses_t, actions, rewards, obses_tp1, dones, weights, fps=None): with tf.GradientTape() as tape: losses, td_errors = self.compute_loss(obses_t, actions, rewards, obses_tp1, dones, weights, fps) loss = tf.reduce_sum(losses) # params = tape.watched_variables() params = [] for agent_id in self.agent_ids: params += self.agents[agent_id].model.trainable_variables # print(f' param {params}') grads = tape.gradient(loss, params) if self.config.grad_norm_clipping: clipped_grads = [] for grad in grads: clipped_grads.append( tf.clip_by_norm(grad, self.config.grad_norm_clipping)) grads = clipped_grads self.optimizer.apply_gradients(list(zip(grads, params))) return loss, td_errors def compute_n_step_return(self, mb_rewards, mb_dones, obs1): if self.config.gamma > 0.0: # print(f' last_values {last_values}') last_values = self.get_max_values(tf.constant(obs1)) for agent_id, (rewards, dones, value) in enumerate( zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() value = value.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[agent_id] = rewards return mb_rewards def create_fingerprints(self, fps, t): fps_ = [] for agent_id in self.agent_ids: fp = fps[:agent_id] fp.extend(fps[agent_id + 1:]) fp_a = np.concatenate((fp, [[self.exploration.value(t) * 100, t]]), axis=None) # print(f' fp_a.shape is {np.array(fp_a).shape}') fps_.append(fp_a) return fps_ def learn(self): episode_rewards = [0.0] obs = self.env.reset() print(obs.shape) done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] t = 0 for ep in range(self.config.num_episodes): episode_length = 0 update_eps = tf.constant(self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_obs1, mb_dones, mb_fps = [], [], [], [], [], [] while True: # for n_step in range(self.config.n_steps): t += 1 episode_length += 1 # print(f't is {t} -- n_steps is {n_step}') actions, fps = self.get_actions(tf.constant(obs), update_eps=update_eps) # print(f' fps.shape is {np.array(fps).shape}') if self.config.num_agents == 1: obs1, rews, done, _ = self.env.step(actions[0]) else: obs1, rews, done, _ = self.env.step(actions) fps_ = self.create_fingerprints(fps, t) # print(f' fps_.shape is {np.array(fps_).shape}') mb_fps.append(fps_) mb_obs.append(obs.copy()) mb_actions.append(actions) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f'rewards is {rews}') if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_obs1.append(obs1.copy()) mb_rewards.append(rews) obs = obs1 episode_rewards[-1] += np.max(rews) if done or episode_length > self.config.max_episodes_length: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() break # to break while as episode is finished here mb_obs.append(obs.copy()) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') for extra_step in range(self.config.n_steps - len(mb_actions) + 1): # print('extra_info as 0 s added') mb_obs.append(obs * 0.) mb_actions.append(actions * 0.) mb_rewards.append(np.array(rews) * 0.) mb_fps.append(self.fps_zeros) mb_dones.append([float(0.) for _ in self.agent_ids]) # print(f' mb_fps.shape is {np.array(mb_fps).shape}') # swap axes to have lists in shape of (num_agents, num_steps, ...) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') # print(f' mb_dones.shape is {np.array(mb_dones).shape}') mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_masks = mb_dones # [:, :-1] mb_dones = mb_dones[:, 1:] # print(f' before discount mb_rewards is {mb_rewards}') mb_rewards = self.compute_n_step_return(mb_rewards, mb_dones, obs1) # print(f' after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add_episode(mb_obs, mb_actions, mb_rewards, mb_masks, mb_fps) if ep > self.config.learning_starts: if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, dones, fps, weights, batch_idxes) = experience # print(f' dones.shape {dones.shape}') else: obses_t, actions, rewards, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # print(f'obses_t.shape {obses_t.shape}') # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) obses_t = obses_t[:, :, 0:-1] obses_tp1 = obses_t[:, :, -1] # print(f'obses_t.shape {obses_t.shape}') # print(f'obses_tp1.shape {obses_tp1.shape}') actions = actions.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') rewards = rewards.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') # obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) fps = fps.swapaxes(0, 1) # print(f'weights.shape {weights.shape}') # weights = np.expand_dims(weights, 2) # print(f'weights.shape {weights.shape}') _wt = np.tile(weights, (self.config.num_agents, 1)) # print(f'_wt.shape {_wt.shape}') # print(f'weights.shape {weights.shape}') # weights = weights.swapaxes(0, 1) # weights shape is (1, batch_size, n_steps) # print(f'weights.shape {weights.shape}') # shape format is (agent_num, batch_size, n_steps, ...) # if 'rnn' not in self.config.network: # shape = obses_t.shape # obses_t = np.reshape(obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) # # shape = obses_tp1.shape # # obses_tp1 = np.reshape(obses_tp1, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = actions.shape # actions = np.reshape(actions, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = rewards.shape # rewards = np.reshape(rewards, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = dones.shape # dones = np.reshape(dones, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = _wt.shape # _wt = np.reshape(_wt, (shape[0], shape[1] * shape[2], *shape[3:])) # print(f'obses_t.shape {obses_t.shape}') # shape format is (agent_num, batch_size * n_steps, ...) # print(f' obses_t.shape {obses_t.shape}') # print(f' obses_tp1.shape {obses_tp1.shape}') # print(f' actions.shape {actions.shape}') # print(f' rewards.shape {rewards.shape}') # print(f' dones.shape {dones.shape}') # print(f' _wt.shape {_wt.shape}') obses_t = tf.constant(obses_t) obses_tp1 = tf.constant(obses_tp1) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) fps = tf.constant(fps) _wt = tf.constant(_wt) loss, td_errors = self.train(obses_t, actions, rewards, obses_tp1, dones, _wt, fps) # print(f' td_errors {td_errors}') # td_errors = td_errors.reshape((self.config.batch_size, -1)) # print(f' td_errors.shape {td_errors.shape}') # td_errors = np.sum(td_errors, 1) # print(f' td_errors.shape {td_errors.shape}') # print(f'td_errors.shape = {np.array(td_errors).shape} , batch_idxes.shape = {np.array(batch_idxes).shape}') if self.config.prioritized_replay: new_priorities = np.abs( td_errors) + self.config.prioritized_replay_eps self.replay_memory.update_priorities( batch_idxes, new_priorities) if ep % (self.config.print_freq) == 0: print(f't = {t} , loss = {loss}') if ep > self.config.learning_starts and ep % self.config.target_network_update_freq == 0: # Update target network periodically. for agent_id in self.agent_ids: self.agents[agent_id].soft_update_target() if ep % self.config.playing_test == 0 and ep != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if ep % (self.config.print_freq * 10) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq*10} to {t} steps: {nseconds}' ) # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[ 1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular() def play_test_games(self): num_tests = self.config.num_tests test_env = init_env(self.config, mode='test') test_rewards = np.zeros(num_tests) for i in range(num_tests): done = False obs = test_env.reset() iter = 0 while True: iter += 1 actions, _ = self.get_actions(tf.constant(obs), stochastic=False) # print(f'actions[0] {actions[0]}, test_done {done}, {iter}') if self.config.num_agents == 1: obs1, rews, done, _ = test_env.step(actions[0]) else: obs1, rews, done, _ = test_env.step(actions) # ToDo fingerprint computation obs = obs1 if done or iter >= self.config.max_episodes_length: # print(f'test {i} rewards is {rews}') test_rewards[i] = np.mean(rews) break print( f'test_rewards: {test_rewards} \n mean reward of {num_tests} tests: {np.mean(test_rewards)}' ) test_env.close()
def learn(env, num_actions=3, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16): torch.set_num_threads(num_cpu) if prioritized_replay: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative obs, xy_per_marine = common.init(env, obs) group_id = 0 reset = True dqn = DQN(num_actions, lr, cuda) print('\nCollecting experience...') checkpoint_path = 'models/deepq/checkpoint.pth.tar' if os.path.exists(checkpoint_path): dqn, saved_mean_reward = load_checkpoint(dqn, cuda, filename=checkpoint_path) for t in range(max_timesteps): # Take action and update exploration to the newest value # custom process for DefeatZerglingsAndBanelings obs, screen, player = common.select_marine(env, obs) # action = act( # np.array(screen)[None], update_eps=update_eps, **kwargs)[0] action = dqn.choose_action(np.array(screen)[None]) reset = False rew = 0 new_action = None obs, new_action = common.marine_action(env, obs, player, action) army_count = env._obs[0].observation.player_common.army_count try: if army_count > 0 and _ATTACK_SCREEN in obs[0].observation["available_actions"]: obs = env.step(actions=new_action) else: new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) except Exception as e: # print(e) 1 # Do nothing player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = player_relative rew += obs[0].reward done = obs[0].step_type == environment.StepType.LAST selected = obs[0].observation["screen"][_SELECTED] player_y, player_x = (selected == _PLAYER_FRIENDLY).nonzero() if len(player_y) > 0: player = [int(player_x.mean()), int(player_y.mean())] if len(player) == 2: if player[0] > 32: new_screen = common.shift(LEFT, player[0] - 32, new_screen) elif player[0] < 32: new_screen = common.shift(RIGHT, 32 - player[0], new_screen) if player[1] > 32: new_screen = common.shift(UP, player[1] - 32, new_screen) elif player[1] < 32: new_screen = common.shift(DOWN, 32 - player[1], new_screen) # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: print("Episode Reward : %s" % episode_rewards[-1]) obs = env.reset() player_relative = obs[0].observation["screen"][ _PLAYER_RELATIVE] screen = player_relative group_list = common.init(env, obs) # Select all marines first # env.step(actions=[sc2_actions.FunctionCall(_SELECT_UNIT, [_SELECT_ALL])]) episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = dqn.learn(obses_t, actions, rewards, obses_tp1, gamma, batch_size) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. dqn.update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_checkpoint({ 'epoch': t + 1, 'state_dict': dqn.save_state_dict(), 'best_accuracy': mean_100ep_reward }, checkpoint_path) saved_mean_reward = mean_100ep_reward
def main(): with open('cartpole.json', encoding='utf-8') as config_file: config = json.load(config_file) env = gym.make('CartPole-v0') state_shape = env.observation_space.shape action_count = env.action_space.n layers = [] for layer in config['layers']: layers.append(Dense(layer, activation=C.relu)) layers.append(Dense((action_count, config['n']), activation=None)) model_func = Sequential(layers) replay_buffer = ReplayBuffer(config['buffer_capacity']) # Fill the buffer with randomly generated samples state = env.reset() for i in range(config['buffer_capacity']): action = env.action_space.sample() post_state, reward, done, _ = env.step(action) replay_buffer.add(state.astype(np.float32), action, reward, post_state.astype(np.float32), float(done)) if done: state = env.reset() reward_buffer = np.zeros(config['max_episodes'], dtype=np.float32) losses = [] epsilon_schedule = LinearSchedule(1, 0.01, config['max_episodes']) agent = CategoricalAgent(state_shape, action_count, model_func, config['vmin'], config['vmax'], config['n'], lr=config['lr'], gamma=config['gamma']) log_freq = config['log_freq'] for episode in range(1, config['max_episodes'] + 1): state = env.reset().astype(np.float32) done = False while not done: action = agent.act(state, epsilon_schedule.value(episode)) post_state, reward, done, _ = env.step(action) post_state = post_state.astype(np.float32) replay_buffer.add(state, action, reward, post_state, float(done)) reward_buffer[episode - 1] += reward state = post_state minibatch = replay_buffer.sample(config['minibatch_size']) agent.train(*minibatch) loss = agent.trainer.previous_minibatch_loss_average losses.append(loss) if episode % config['target_update_freq'] == 0: agent.update_target() if episode % log_freq == 0: average = np.sum(reward_buffer[episode - log_freq: episode]) / log_freq print('Episode {:4d} | Loss: {:6.4f} | Reward: {}'.format(episode, loss, average)) agent.model.save('cartpole.cdqn') sns.set_style('dark') pd.Series(reward_buffer).rolling(window=log_freq).mean().plot() plt.xlabel('Episode') plt.ylabel('Reward') plt.title('CartPole - Reward with Time') plt.show() plt.plot(np.arange(len(losses)), losses) plt.xlabel('Episode') plt.ylabel('Loss') plt.title('CartPole - Loss with Time') plt.show()