def load_history(agent, env, hist_files): logger.info() # load data if hist_files: for fn_tmp in hist_files.split(','): fn_base, fn_ext = os.path.splitext(fn_tmp) # this relay on the file names following the format base_rank.ext # todo: when mpi_size>1, not enough experience, there might be hand shake problems. fn_rank = '%s_%d%s' % (fn_base, MPI.COMM_WORLD.Get_rank(), fn_ext) rcd = env.read_step_csv(fn_rank) if rcd: start = max(0, len(rcd[0]) - int(agent.memory.limit)) rcd = [r[start:] for r in rcd] agent.store_multrans(agent.memory, *rcd) logger.info('loaded experiences from %s, memory.nb_entries=%d' % (fn_rank, agent.memory.nb_entries)) if agent.memory.nb_entries > 0: # states stats = agent.get_stats(agent.memory) combined_stats = stats.copy() for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() print('rank%d, loaded experiences from %s, memory.nb_entries=%d' % ( MPI.COMM_WORLD.Get_rank(), hist_files, agent.memory.nb_entries))
def run(self): print('Training...') try: # Produce video only if monitor method is implemented. try: if self.args.record_video_every != -1: self.env.monitor( is_monitor=True, is_train=True, experiment_dir=self.args.experiment_dir, record_video_every=self.args.record_video_every) except: pass self.global_time_step = self.model.global_time_step_tensor.eval( self.sess) # Calculate the batch_size nbatch = self.args.num_envs * self.num_steps for iteration in range(self.num_iterations // nbatch + 1): self.cur_iteration = iteration obs, states, rewards, masks, actions, values = self.model.forward( ) self.model.backward(obs, states, rewards, masks, actions, values, self.summary_writer, self.cur_iteration * nbatch) # Update the global step self.model.global_step_assign_op.eval( session=self.sess, feed_dict={ self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1 }) if not iteration % self.args.print_freq: # mean_100ep_reward = round(np.mean(epoch_rewards[-99:-1]), 1) # num_episodes = len(epoch_rewards) logger.record_tabular("steps", iteration * nbatch) # logger.record_tabular("episodes", num_episodes) # logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("Current date and time: ", datetime.datetime.now()) logger.dump_tabular() if iteration % self.save_every == 0: self.model.save() self.env.close() except KeyboardInterrupt: print('Error occured..\n') self.model.save() self.env.close()
def Imitation_Learning(self, step_time, data=None, policy=None, verbose=2): ''' :param data: the data is a list, and each element is a dict with 5 keys s,a,r,s_,tr sample = {"s": s, "a": a, "s_": s_, "r": r, "tr": done} :param policy: :return: ''' if data is not None and policy is not None: raise Exception( "The IL only need one way to guide, Please make sure the input " ) if data is not None: for time in step_time: self.step += 1 loss = self.backward(data[time]) if verbose == 1: logger.record_tabular("steps", self.step) logger.record_tabular("loss", loss) logger.dumpkvs() if policy is not None: s = self.env.reset() for time in step_time: self.step += 1 a = policy(s) s_, r, done, info = self.env.step(a) sample = {"s": s, "a": a, "s_": s_, "r": r, "tr": done} loss = self.backward(sample) s = s_ if verbose == 1: logger.record_tabular("steps", self.step) logger.record_tabular("loss", loss) logger.dumpkvs()
def test(eval_env, agent, render_eval=True, nb_epochs=1, start_ckpt=None, **kwargs): logger.info('Start testing:', start_ckpt, '\n') with tf_util.single_threaded_session() as sess: agent.initialize(sess, start_ckpt=start_ckpt) sess.graph.finalize() for _ in range(nb_epochs): combined_stats = {} eval_episode(eval_env, render_eval, agent, combined_stats) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('')
def sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic): assert load_model_path is not None U.load_state(load_model_path) sample_trajs = [] for iters_so_far in range(max_sample_traj): logger.log("********** Iteration %i ************" % iters_so_far) traj = traj_gen.__next__() ob, new, ep_ret, ac, rew, ep_len = traj['ob'], traj['new'], traj[ 'ep_ret'], traj['ac'], traj['rew'], traj['ep_len'] logger.record_tabular("ep_ret", ep_ret) logger.record_tabular("ep_len", ep_len) logger.record_tabular("immediate reward", np.mean(rew)) if MPI.COMM_WORLD.Get_rank() == 0: logger.dump_tabular() traj_data = {"ob": ob, "ac": ac, "rew": rew, "ep_ret": ep_ret} sample_trajs.append(traj_data) sample_ep_rets = [traj["ep_ret"] for traj in sample_trajs] logger.log("Average total return: %f" % (sum(sample_ep_rets) / len(sample_ep_rets))) if sample_stochastic: task_name = 'stochastic.' + task_name else: task_name = 'deterministic.' + task_name pkl.dump(sample_trajs, open(task_name + ".pkl", "wb"))
def learn(encoder, action_decorder, state_decorder, embedding_shape, *, dataset, logdir, batch_size, time_steps, epsilon=0.001, lr_rate=1e-3): lstm_encoder = encoder("lstm_encoder") ac_decoder = action_decorder("ac_decoder") state_decoder = state_decorder("state_decoder") #换成了mlp obs = U.get_placeholder_cached(name="obs") ##for encoder ob = U.get_placeholder_cached(name="ob") embedding = U.get_placeholder_cached(name="embedding") # obss = U.get_placeholder_cached(name="obss") ## for action decoder, 这个state decoder是不是也可以用, 是不是应该改成obs # ## for action decoder, 这个state decoder应该也是可以用的 # embeddingss = U.get_placeholder_cached(name="embeddingss") ac = ac_decoder.pdtype.sample_placeholder([None]) obs_out = state_decoder.pdtype.sample_placeholder([None]) # p(z) 标准正太分布, state先验分布???是不是应该换成demonstration的标准正态分布???? 可以考虑一下这个问题 from common.distributions import make_pdtype p_z_pdtype = make_pdtype(embedding_shape) p_z_params = U.concatenate([ tf.zeros(shape=[embedding_shape], name="mean"), tf.zeros(shape=[embedding_shape], name="logstd") ], axis=-1) p_z = p_z_pdtype.pdfromflat(p_z_params) recon_loss = -tf.reduce_mean( tf.reduce_sum(ac_decoder.pd.logp(ac) + state_decoder.pd.logp(obs_out), axis=0)) ##这个地方还要再改 kl_loss = lstm_encoder.pd.kl(p_z) ##p(z):标准正太分布, 这个看起来是不是也不太对!!!! vae_loss = recon_loss + kl_loss ###vae_loss 应该是一个batch的 ep_stats = stats(["recon_loss", "kl_loss", "vae_loss"]) losses = [recon_loss, kl_loss, vae_loss] ## var_list var_list = [] en_var_list = lstm_encoder.get_trainable_variables() var_list.extend(en_var_list) # ac_de_var_list = ac_decoder.get_trainable_variables() # var_list.extend(ac_de_var_list) state_de_var_list = state_decoder.get_trainable_variables() var_list.extend(state_de_var_list) # compute_recon_loss = U.function([ob, obs, embedding, obss, embeddingss, ac, obs_out], recon_loss) compute_losses = U.function([obs, ob, embedding, ac, obs_out], losses) compute_grad = U.function([obs, ob, embedding, ac, obs_out], U.flatgrad(vae_loss, var_list)) ###这里没有想好!!!,可能是不对的!! adam = MpiAdam(var_list, epsilon=epsilon) U.initialize() adam.sync() writer = U.FileWriter(logdir) writer.add_graph(tf.get_default_graph()) # =========================== TRAINING ===================== # iters_so_far = 0 saver = tf.train.Saver(var_list=tf.trainable_variables(), max_to_keep=100) saver_encoder = tf.train.Saver(var_list=en_var_list, max_to_keep=100) # saver_pol = tf.train.Saver(var_list=ac_de_var_list, max_to_keep=100) ##保留一下policy的参数,但是这个好像用不到哎 while True: logger.log("********** Iteration %i ************" % iters_so_far) recon_loss_buffer = deque(maxlen=100) kl_loss_buffer = deque(maxlen=100) vae_loss_buffer = deque(maxlen=100) for observations in dataset.get_next_batch(batch_size=time_steps): observations = observations.transpose((1, 0)) embedding_now = lstm_encoder.get_laten_vector(observations) embeddings = np.array([embedding_now for _ in range(time_steps)]) embeddings_reshape = embeddings.reshape((time_steps, -1)) actions = ac_decoder.act(stochastic=True, ob=observations, embedding=embeddings_reshape) state_outputs = state_decoder.get_outputs( observations.reshape(time_steps, -1, 1), embeddings) ##还没有加混合高斯......乱加了一通,已经加完了 recon_loss, kl_loss, vae_loss = compute_losses( observations, observations.reshape(batch_size, time_steps, -1), embeddings_reshape, observations.reshape(time_steps, -1, 1), embeddings, actions, state_outputs) g = compute_grad(observations, observations.reshape(batch_size, time_steps, -1), embeddings_reshape, observations.reshape(time_steps, -1, 1), embeddings, actions, state_outputs) adam.update(g, lr_rate) recon_loss_buffer.append(recon_loss) kl_loss_buffer.append(kl_loss) vae_loss_buffer.append(vae_loss) ep_stats.add_all_summary(writer, [ np.mean(recon_loss_buffer), np.mean(kl_loss_buffer), np.mean(vae_loss_buffer) ], iters_so_far) logger.record_tabular("recon_loss", recon_loss) logger.record_tabular("kl_loss", kl_loss) logger.record_tabular("vae_loss", vae_loss) logger.dump_tabular() if (iters_so_far % 10 == 0 and iters_so_far != 0): save(saver=saver, sess=tf.get_default_session(), logdir=logdir, step=iters_so_far) save(saver=saver_encoder, sess=tf.get_default_session(), logdir="./vae_saver", step=iters_so_far) # save(saver=saver_pol, sess=tf.get_default_session(), logdir="pol_saver", step=iters_so_far) iters_so_far += 1
def learn(self): episode_rewards = [0.0] obs = self.env.reset() print(obs.shape) done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] t = 0 for ep in range(self.config.num_episodes): episode_length = 0 update_eps = tf.constant(self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_obs1, mb_dones, mb_fps = [], [], [], [], [], [] while True: # for n_step in range(self.config.n_steps): t += 1 episode_length += 1 # print(f't is {t} -- n_steps is {n_step}') actions, fps = self.get_actions(tf.constant(obs), update_eps=update_eps) # print(f' fps.shape is {np.array(fps).shape}') if self.config.num_agents == 1: obs1, rews, done, _ = self.env.step(actions[0]) else: obs1, rews, done, _ = self.env.step(actions) fps_ = self.create_fingerprints(fps, t) # print(f' fps_.shape is {np.array(fps_).shape}') mb_fps.append(fps_) mb_obs.append(obs.copy()) mb_actions.append(actions) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f'rewards is {rews}') if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_obs1.append(obs1.copy()) mb_rewards.append(rews) obs = obs1 episode_rewards[-1] += np.max(rews) if done or episode_length > self.config.max_episodes_length: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() break # to break while as episode is finished here mb_obs.append(obs.copy()) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') for extra_step in range(self.config.n_steps - len(mb_actions) + 1): # print('extra_info as 0 s added') mb_obs.append(obs * 0.) mb_actions.append(actions * 0.) mb_rewards.append(np.array(rews) * 0.) mb_fps.append(self.fps_zeros) mb_dones.append([float(0.) for _ in self.agent_ids]) # print(f' mb_fps.shape is {np.array(mb_fps).shape}') # swap axes to have lists in shape of (num_agents, num_steps, ...) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') # print(f' mb_dones.shape is {np.array(mb_dones).shape}') mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_masks = mb_dones # [:, :-1] mb_dones = mb_dones[:, 1:] # print(f' before discount mb_rewards is {mb_rewards}') mb_rewards = self.compute_n_step_return(mb_rewards, mb_dones, obs1) # print(f' after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add_episode(mb_obs, mb_actions, mb_rewards, mb_masks, mb_fps) if ep > self.config.learning_starts: if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, dones, fps, weights, batch_idxes) = experience # print(f' dones.shape {dones.shape}') else: obses_t, actions, rewards, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # print(f'obses_t.shape {obses_t.shape}') # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) obses_t = obses_t[:, :, 0:-1] obses_tp1 = obses_t[:, :, -1] # print(f'obses_t.shape {obses_t.shape}') # print(f'obses_tp1.shape {obses_tp1.shape}') actions = actions.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') rewards = rewards.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') # obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) fps = fps.swapaxes(0, 1) # print(f'weights.shape {weights.shape}') # weights = np.expand_dims(weights, 2) # print(f'weights.shape {weights.shape}') _wt = np.tile(weights, (self.config.num_agents, 1)) # print(f'_wt.shape {_wt.shape}') # print(f'weights.shape {weights.shape}') # weights = weights.swapaxes(0, 1) # weights shape is (1, batch_size, n_steps) # print(f'weights.shape {weights.shape}') # shape format is (agent_num, batch_size, n_steps, ...) # if 'rnn' not in self.config.network: # shape = obses_t.shape # obses_t = np.reshape(obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) # # shape = obses_tp1.shape # # obses_tp1 = np.reshape(obses_tp1, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = actions.shape # actions = np.reshape(actions, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = rewards.shape # rewards = np.reshape(rewards, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = dones.shape # dones = np.reshape(dones, (shape[0], shape[1] * shape[2], *shape[3:])) # shape = _wt.shape # _wt = np.reshape(_wt, (shape[0], shape[1] * shape[2], *shape[3:])) # print(f'obses_t.shape {obses_t.shape}') # shape format is (agent_num, batch_size * n_steps, ...) # print(f' obses_t.shape {obses_t.shape}') # print(f' obses_tp1.shape {obses_tp1.shape}') # print(f' actions.shape {actions.shape}') # print(f' rewards.shape {rewards.shape}') # print(f' dones.shape {dones.shape}') # print(f' _wt.shape {_wt.shape}') obses_t = tf.constant(obses_t) obses_tp1 = tf.constant(obses_tp1) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) fps = tf.constant(fps) _wt = tf.constant(_wt) loss, td_errors = self.train(obses_t, actions, rewards, obses_tp1, dones, _wt, fps) # print(f' td_errors {td_errors}') # td_errors = td_errors.reshape((self.config.batch_size, -1)) # print(f' td_errors.shape {td_errors.shape}') # td_errors = np.sum(td_errors, 1) # print(f' td_errors.shape {td_errors.shape}') # print(f'td_errors.shape = {np.array(td_errors).shape} , batch_idxes.shape = {np.array(batch_idxes).shape}') if self.config.prioritized_replay: new_priorities = np.abs( td_errors) + self.config.prioritized_replay_eps self.replay_memory.update_priorities( batch_idxes, new_priorities) if ep % (self.config.print_freq) == 0: print(f't = {t} , loss = {loss}') if ep > self.config.learning_starts and ep % self.config.target_network_update_freq == 0: # Update target network periodically. for agent_id in self.agent_ids: self.agents[agent_id].soft_update_target() if ep % self.config.playing_test == 0 and ep != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if ep % (self.config.print_freq * 10) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq*10} to {t} steps: {nseconds}' ) # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[ 1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular()
def run(self): # switch to train mode self.train() # Prepare for rollouts seg_generator = self.traj_segment_generator(self.pi, self.env, self.timesteps_per_batch) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards self.check_time_constraints() while True: if self.callback: self.callback(locals(), globals()) if self.max_timesteps and timesteps_so_far >= self.max_timesteps: break elif self.max_episodes and episodes_so_far >= self.max_episodes: break elif self.max_iters and iters_so_far >= self.max_iters: break elif self.max_seconds and time.time() - tstart >= self.max_seconds: break cur_lrmult = self.get_lr_multiplier(timesteps_so_far) logger.log("********** Iteration %i ************"%iters_so_far) segment = seg_generator.__next__() self.add_vtarg_and_adv(segment, self.gamma, self.lam) ob, ac, atarg, tdlamret = segment["ob"], segment["ac"], segment["adv"], segment["tdlamret"] vpredbefore = segment["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not self.pi.recurrent) optim_batchsize = self.optim_batchsize or ob.shape[0] # update running mean/std for policy # if hasattr(self.pi, "ob_rms"): self.pi.ob_rms.update(ob) # set old parameter values to new parameter values self.oldpi.load_state_dict(self.pi.state_dict()) logger.log("Optimizing...") logger.log(fmt_row(13, self.loss_names)) # Here we do a bunch of optimization epochs over the data for _ in range(self.optim_epochs): losses = [] # list of tuples, each of which gives the loss for a minibatch for batch in d.iterate_once(self.optim_batchsize): self.optimizer.zero_grad() batch['ob'] = rearrange_batch_image(batch['ob']) batch = self.convert_batch_tensor(batch) total_loss, *newlosses = self.forward(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) total_loss.backward() self.optimizer.step(_step_size=self.optim_stepsize * cur_lrmult) losses.append(torch.stack(newlosses[0], dim=0).view(-1)) mean_losses = torch.mean(torch.stack(losses, dim=0), dim=0).data.cpu().numpy() logger.log(fmt_row(13, mean_losses)) logger.log("Evaluating losses...") losses = [] for batch in d.iterate_once(self.optim_batchsize): batch['ob'] = rearrange_batch_image(batch['ob']) batch = self.convert_batch_tensor(batch) _, *newlosses = self.forward(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) losses.append(torch.stack(newlosses[0], dim=0).view(-1)) mean_losses = torch.mean(torch.stack(losses, dim=0), dim=0).data.cpu().numpy() logger.log(fmt_row(13, mean_losses)) for (lossval, name) in zipsame(mean_losses, self.loss_names): logger.record_tabular("loss_"+name, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (segment["ep_lens"], segment["ep_rets"]) # local values lens, rews = map(flatten_lists, zip(*[lrlocal])) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) logger.dump_tabular()
def learn(self): self.network.soft_update_target() episode_rewards = [0.0] obs = self.env.reset() done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] for t in range(self.config.num_timesteps): update_eps = tf.constant(self.exploration.value(t)) if t % (self.config.print_freq) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq} to {t} steps: {nseconds}' ) mb_obs, mb_rewards, mb_actions, mb_fps, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for nstep in range(self.config.n_steps): actions, fps_ = self.choose_action(tf.constant(obs), update_eps=update_eps) fps = [] if self.config.num_agents > 1: for a in self.agent_ids: fp = fps_[:a] fp.extend(fps_[a + 1:]) fp_a = np.concatenate( (fp, [[self.exploration.value(t) * 100, t]]), axis=None) fps.append(fp_a) # print(f'fps.shape {np.array(fps).shape}') mb_obs.append(obs.copy()) mb_actions.append(actions) mb_fps.append(fps) mb_dones.append([float(done) for _ in self.agent_ids]) obs1, rews, done, info = self.env.step(actions.tolist()) if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_rewards.append(rews) obs = obs1 maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f'before discount mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.network.last_value(tf.constant(obs1)) # print(f'last_values {last_values}') for n, (rewards, dones, value) in enumerate( zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[n] = rewards # print(f'after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add( (mb_obs, mb_actions, mb_rewards, obs1, mb_masks, mb_fps)) if t > self.config.learning_starts and t % self.config.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, fps, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) actions = actions.swapaxes(0, 1) rewards = rewards.swapaxes(0, 1) obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) fps = fps.swapaxes(0, 1) weights = weights.swapaxes(0, 1) if self.config.network == 'cnn': shape = obses_t.shape obses_t = np.reshape( obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) shape = actions.shape actions = np.reshape( actions, (shape[0], shape[1] * shape[2], *shape[3:])) shape = rewards.shape rewards = np.reshape( rewards, (shape[0], shape[1] * shape[2], *shape[3:])) shape = dones.shape dones = np.reshape( dones, (shape[0], shape[1] * shape[2], *shape[3:])) shape = weights.shape weights = np.reshape( weights, (shape[0], shape[1] * shape[2], *shape[3:])) shape = fps.shape fps = np.reshape( fps, (shape[0], shape[1] * shape[2], *shape[3:])) # shape format is (agent_num, batch_size, n_steps, ...) obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) fps = tf.constant(fps) # print(f'obses_t.shape {obses_t.shape}') # print(f'actions.shape {actions.shape}') # print(f'rewards.shape {rewards.shape}') # print(f'dones.shape {dones.shape}') # print(f'weights.shape {weights.shape}') # print(f'fps.shape {fps.shape}') loss, td_errors = self.train(obses_t, actions, rewards, dones, weights, fps) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. self.network.soft_update_target() if t % self.config.playing_test == 0 and t != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[ 1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular()
def learn(env, seed=None, num_agents = 2, lr=0.00008, total_timesteps=100000, buffer_size=2000, exploration_fraction=0.2, exploration_final_eps=0.01, train_freq=1, batch_size=16, print_freq=100, checkpoint_freq=10000, checkpoint_path=None, learning_starts=2000, gamma=0.99, target_network_update_freq=1000, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, param_noise=False, callback=None, load_path=None, **network_kwargs ): """Train a deepq model. Parameters ------- env: gym.Env environment to train on network: string or a function neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that) seed: int or None prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used. lr: float learning rate for adam optimizer total_timesteps: int number of env steps to optimizer for buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. set to None to disable printing batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to total_timesteps. prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. param_noise: bool whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905) callback: (locals, globals) -> None function called at every steps with state of the algorithm. If callback returns true training stops. load_path: str path to load the model from. (default: None) **network_kwargs additional keyword arguments to pass to the network builder. Returns ------- act: ActWrapper Wrapper over act function. Adds ability to save it and load it. See header of baselines/deepq/categorical.py for details on the act function. """ # Create all the functions necessary to train the model set_global_seeds(seed) double_q = True grad_norm_clipping = True shared_weights = True play_test = 1000 nsteps = 16 agent_ids = env.agent_ids() # Create the replay buffer if prioritized_replay: replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None # Create the schedule for exploration starting from 1. exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) print(f'agent_ids {agent_ids}') num_actions = env.action_space.n print(f'num_actions {num_actions}') dqn_agent = MAgent(env, agent_ids, nsteps, lr, replay_buffer, shared_weights, double_q, num_actions, gamma, grad_norm_clipping, param_noise) if load_path is not None: load_path = osp.expanduser(load_path) ckpt = tf.train.Checkpoint(model=dqn_agent.q_network) manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None) ckpt.restore(manager.latest_checkpoint) print("Restoring from {}".format(manager.latest_checkpoint)) dqn_agent.update_target() episode_rewards = [0.0 for i in range(101)] saved_mean_reward = None obs_all = env.reset() obs_shape = obs_all reset = True done = False # Start total timer tstart = time.time() for t in range(total_timesteps): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = tf.constant(exploration.value(t)) update_param_noise_threshold = 0. else: update_eps = tf.constant(0.) # Compute the threshold such that the KL divergence between perturbed and non-perturbed # policy is comparable to eps-greedy exploration with eps = exploration.value(t). # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017 # for detailed explanation. update_param_noise_threshold = -np.log( 1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs['update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True if t % print_freq == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print(f'time spend to perform {t-print_freq} to {t} steps is {nseconds} ') print('eps update', exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] for _ in range(nsteps): # Given observations, take action and value (V(s)) obs_ = tf.constant(obs_all) # print(f'obs_.shape is {obs_.shape}') # obs_ = tf.expand_dims(obs_, axis=1) # print(f'obs_.shape is {obs_.shape}') actions_list, fps_ = dqn_agent.choose_action(obs_, update_eps=update_eps, **kwargs) fps = [[] for _ in agent_ids] # print(f'fps_.shape is {np.asarray(fps_).shape}') for a in agent_ids: fps[a] = np.delete(fps_, a, axis=0) # print(fps) # print(f'actions_list is {actions_list}') # print(f'values_list is {values_list}') # Append the experiences mb_obs.append(obs_all.copy()) mb_actions.append(actions_list) mb_values.append(fps) mb_dones.append([float(done) for _ in range(num_agents)]) # Take actions in env and look the results obs1_all, rews, done, info = env.step(actions_list) rews = [np.max(rews) for _ in range(len(rews))] # for cooperative purpose same reward for every one # print(rews) mb_rewards.append(rews) obs_all = obs1_all # print(rewards, done, info) maybeepinfo = info[0].get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) if done: episode_rewards.append(0.0) obs_all = env.reset() reset = True mb_dones.append([float(done) for _ in range(num_agents)]) # print(f'mb_actions is {mb_actions}') # print(f'mb_rewards is {mb_rewards}') # print(f'mb_values is {mb_values}') # print(f'mb_dones is {mb_dones}') mb_obs = np.asarray(mb_obs, dtype=obs_all[0].dtype) mb_actions = np.asarray(mb_actions, dtype=actions_list[0].dtype) mb_rewards = np.asarray(mb_rewards, dtype=np.float32) mb_values = np.asarray(mb_values, dtype=np.float32) # print(f'mb_values.shape is {mb_values.shape}') mb_dones = np.asarray(mb_dones, dtype=np.bool) mb_masks = mb_dones[:-1] mb_dones = mb_dones[1:] # print(f'mb_actions is {mb_actions}') # print(f'mb_rewards is {mb_rewards}') # print(f'mb_values is {mb_values}') # print(f'mb_dones is {mb_dones}') # print(f'mb_masks is {mb_masks}') # print(f'mb_masks.shape is {mb_masks.shape}') if gamma > 0.0: # Discount/bootstrap off value fn last_values = dqn_agent.value(tf.constant(obs_all)) # print(f'last_values is {last_values}') if mb_dones[-1][0] == 0: # print('================ hey ================ mb_dones[-1][0] == 0') mb_rewards = discount_with_dones(np.concatenate((mb_rewards, [last_values])), np.concatenate((mb_dones, [[float(False) for _ in range(num_agents)]])) , gamma)[:-1] else: mb_rewards = discount_with_dones(mb_rewards, mb_dones, gamma) # print(f'after discount mb_rewards is {mb_rewards}') if replay_buffer is not None: replay_buffer.add(mb_obs, mb_actions, mb_rewards, obs1_all, mb_masks[:,0], mb_values, np.tile([exploration.value(t), t], (nsteps, num_agents, 1))) if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones, fps, extra_datas = replay_buffer.sample(batch_size) weights, batch_idxes = np.ones_like(rewards), None obses_t, obses_tp1 = tf.constant(obses_t), None actions, rewards, dones = tf.constant(actions), tf.constant(rewards, dtype=tf.float32), tf.constant(dones) weights, fps, extra_datas = tf.constant(weights), tf.constant(fps), tf.constant(extra_datas) s = obses_t.shape # print(f'obses_t.shape is {s}') obses_t = tf.reshape(obses_t, (s[0] * s[1], *s[2:])) s = actions.shape # print(f'actions.shape is {s}') actions = tf.reshape(actions, (s[0] * s[1], *s[2:])) s = rewards.shape # print(f'rewards.shape is {s}') rewards = tf.reshape(rewards, (s[0] * s[1], *s[2:])) s = weights.shape # print(f'weights.shape is {s}') weights = tf.reshape(weights, (s[0] * s[1], *s[2:])) s = fps.shape # print(f'fps.shape is {s}') fps = tf.reshape(fps, (s[0] * s[1], *s[2:])) # print(f'fps.shape is {fps.shape}') s = extra_datas.shape # print(f'extra_datas.shape is {s}') extra_datas = tf.reshape(extra_datas, (s[0] * s[1], *s[2:])) s = dones.shape # print(f'dones.shape is {s}') dones = tf.reshape(dones, (s[0], s[1], *s[2:])) # print(f'dones.shape is {s}') td_errors = dqn_agent.nstep_train(obses_t, actions, rewards, obses_tp1, dones, weights, fps, extra_datas) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. dqn_agent.update_target() if t % play_test == 0 and t != 0: play_test_games(dqn_agent) mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if done and print_freq is not None and len(episode_rewards) % print_freq == 0: print(f'last 100 episode mean reward {mean_100ep_reward} in {num_episodes} playing') logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular()
def interact(self, max_step=50000, max_ep_cycle=2000, render=False, verbose=1, record_ep_inter=None): ''' :param max_step: :param max_ep_time: :param max_ep_cycle: max step in per circle .........................show parameter.................................. :param verbose if verbose == 1 show every ep if verbose == 2 show every step :param record_ep_inter record_ep_interact data :return: None ''' # if IL_time is not None: # .....................initially——recode...........................# ep_reward = [] ep_Q_value = [] ep_loss = [] while self.step < max_step: s = self.env.reset() 'reset the ep record' ep_r, ep_q, ep_l = 0, 0, 0 'reset the RL flag' ep_cycle, done = 0, 0 self.episode += 1 while done == 0 and ep_cycle < max_ep_cycle: self.step += 1 ep_cycle += 1 'the interaction part' a, info_forward = self.forward(s) s_, r, done, info = self.env.step(a) sample = {"s": s, "a": a, "s_": s_, "r": r, "tr": done} s = s_ loss = self.backward(sample) if render: self.env.render() 'the record part' ep_r += r ep_q += info_forward[a] ep_l += loss if verbose == 1 and self.step > self.learning_starts: logger.record_tabular("steps", self.step) logger.record_tabular("episodes", self.episode) logger.record_tabular("loss", loss) logger.record_tabular("reward", r) logger.record_tabular("Q_value", round(q[a].date.numpy())) logger.dump_tabular() if record_ep_inter is not None: if self.episode % record_ep_inter == 0: kvs = { "s": s, "a": a, "s_": s_, "r": r, "tr": done, "ep": self.episode, "step": self.step, "ep_step": ep_cycle } self.csvwritter.writekvs(kvs) if done: ep_reward.append(ep_r) ep_Q_value.append(ep_q) ep_loss.append(ep_l) mean_100ep_reward = round(np.mean(ep_reward[-101:-1]), 1) if verbose == 2 and self.step > self.learning_starts: logger.record_tabular("steps", self.step) logger.record_tabular("episodes", self.episode) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("episode_reward", ep_reward[-1]) logger.record_tabular("episode_loss", ep_l) logger.record_tabular("episode_Q_value", ep_q) logger.record_tabular("step_used", ep_cycle) logger.dump_tabular()
def train_once(self, iters_so_far): seg = [] with self.timed("sampling"): for i in range(self.nr_traj_seg_gens): seg.extend(self.seg_gen[i].__next__()) self.add_vtarg_and_adv(seg, self.gamma, self.gae_lambda) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) # ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"] # vpredbefore = seg["vpred"] # predicted value function before udpate # atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate ob = np.concatenate([s['ob'] for s in seg], axis=0) ac = np.concatenate([s['ac'] for s in seg], axis=0) atarg = np.concatenate([s['adv'] for s in seg], axis=0) tdlamret = np.concatenate([s['tdlamret'] for s in seg], axis=0) vpredbefore = np.concatenate( [s["vpred"] for s in seg], axis=0) # predicted value function before udpate atarg = (atarg - atarg.mean() ) / atarg.std() # standardized advantage function estimate # if hasattr(self.pi, "ret_rms"): # self.pi.ret_rms.update(tdlamret) # if hasattr(self.pi, "ob_rms"): # data_ob = ob[:, :-5].reshape([-1, 5]) # data_ob_extract = data_ob[np.where(data_ob[:, 3] == 1)][:, 0:-2] # # data_ob = ob[:, :-1].reshape([-1, 7])[:, :-2] # self.pi.ob_rms.update(data_ob_extract) # update running mean/std for policy # # self.pi.ob_rms.update(ob) # update running mean/std for policy # args = seg["ob"], seg["ac"], atarg args = ob, ac, atarg, tdlamret fvpargs = [arr[::5] for arr in args[:-1]] def fisher_vector_product(p): return self.allmean(self.compute_fvp( p, *fvpargs)) + self.cg_damping * p self.assign_old_eq_new( ) # set old parameter values to new parameter values with self.timed("computegrad"): *lossbefore, g = self.compute_lossandgrad(*args) lossbefore = self.allmean(np.array(lossbefore)) g = self.allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with self.timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=self.cg_iters) # , verbose=self.rank==0) assert np.isfinite(stepdir).all() shs = .5 * stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / self.max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = self.get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize self.set_from_flat(thnew) meanlosses = surr, kl, *_ = self.allmean( np.array(self.compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > self.max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") self.set_from_flat(thbefore) if self.nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather( (thnew.sum(), self.vfadam.getflat().sum())) # list of tuples assert all( np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) for (lossname, lossval) in zip(self.loss_names, meanlosses): logger.record_tabular(lossname, lossval) with self.timed("vf"): for _ in range(self.vf_iters): # for (mbob, mbret) in dataset.iterbatches((seg["ob"], seg["tdlamret"]), for (mbob, mbret) in dataset.iterbatches( (ob, tdlamret), include_final_partial_batch=False, batch_size=64): g = self.allmean(self.compute_vflossandgrad(mbob, mbret)) self.vfadam.update(g, self.vf_stepsize) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) # lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values lrlocal = (seg[0]["ep_lens"], seg[0]["ep_rets"]) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews = map(flatten_lists, zip(*listoflrpairs)) average_action = np.mean(np.square(ac)) logger.record_tabular('AverageReturn', np.mean(rews)) logger.record_tabular('NumSamples', np.sum(lens) * len(seg)) logger.record_tabular('StdReturn', np.std(rews)) logger.record_tabular('MaxReturn', np.max(rews)) logger.record_tabular('MinReturn', np.min(rews)) logger.record_tabular('AverageAction', average_action) return rews, lens
def learn( env, policy_func, discriminator, expert_dataset, timesteps_per_batch, *, g_step, d_step, # timesteps per actor per update clip_param, entcoeff, # clipping parameter epsilon, entropy coeff optim_epochs, optim_stepsize, optim_batchsize, # optimization hypers gamma, lam, # advantage estimation max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint callback=None, # you can do anything in the callback, since it takes locals(), globals() adam_epsilon=1e-5, d_stepsize=3e-4, schedule='constant', # annealing for stepsize parameters (epsilon and adam) save_per_iter=100, ckpt_dir=None, task="train", sample_stochastic=True, load_model_path=None, task_name=None, max_sample_traj=1500): nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space) # Construct network for new policy oldpi = policy_func("oldpi", ob_space, ac_space) # Network for old policy atarg = tf.placeholder( dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return lrmult = tf.placeholder( name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule clip_param = clip_param * lrmult # Annealed cliping parameter epislon ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = U.mean(kloldnew) meanent = U.mean(ent) pol_entpen = (-entcoeff) * meanent ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold surr1 = ratio * atarg # surrogate from conservative policy iteration surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg # pol_surr = -U.mean(tf.minimum( surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) vf_loss = U.mean(tf.square(pi.vpred - ret)) total_loss = pol_surr + pol_entpen + vf_loss losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent] loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"] var_list = pi.get_trainable_variables() lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)]) d_adam = MpiAdam(discriminator.get_trainable_variables()) adam = MpiAdam(var_list, epsilon=adam_epsilon) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) assign_old_eq_new = U.function( [], [], updates=[ tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables()) ]) compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses) U.initialize() th_init = get_flat() MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) d_adam.sync() adam.sync() def allmean(x): assert isinstance(x, np.ndarray) out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers return out # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, discriminator, timesteps_per_batch, stochastic=True) traj_gen = traj_episode_generator(pi, env, timesteps_per_batch, stochastic=sample_stochastic) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards true_rewbuffer = deque(maxlen=100) assert sum( [max_iters > 0, max_timesteps > 0, max_episodes > 0, max_seconds > 0]) == 1, "Only one time constraint permitted" if task == 'sample_trajectory': # not elegant, i know :( sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic) sys.exit() while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break elif max_seconds and time.time() - tstart >= max_seconds: break if schedule == 'constant': cur_lrmult = 1.0 elif schedule == 'linear': cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0) else: raise NotImplementedError # Save model if iters_so_far % save_per_iter == 0 and ckpt_dir is not None: U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far) logger.log("********** Iteration %i ************" % iters_so_far) for _ in range(g_step): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[ "tdlamret"] vpredbefore = seg[ "vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std( ) # standardized advantage function estimate d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent) optim_batchsize = optim_batchsize or ob.shape[0] if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy assign_old_eq_new( ) # set old parameter values to new parameter values logger.log("Optimizing...") logger.log(fmt_row(13, loss_names)) # Here we do a bunch of optimization epochs over the data for _ in range(optim_epochs): losses = [ ] # list of tuples, each of which gives the loss for a minibatch for batch in d.iterate_once(optim_batchsize): *newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) adam.update(g, optim_stepsize * cur_lrmult) losses.append(newlosses) logger.log(fmt_row(13, np.mean(losses, axis=0))) logger.log("Evaluating losses...") losses = [] for batch in d.iterate_once(optim_batchsize): newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) losses.append(newlosses) meanlosses, _, _ = mpi_moments(losses, axis=0) # ------------------ Update D ------------------ logger.log("Optimizing Discriminator...") logger.log(fmt_row(13, discriminator.loss_name)) ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob)) batch_size = len(ob) // d_step d_losses = [ ] # list of tuples, each of which gives the loss for a minibatch ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob)) batch_size = len(ob) // d_step d_losses = [ ] # list of tuples, each of which gives the loss for a minibatch for ob_batch, ac_batch in dataset.iterbatches( (ob, ac), include_final_partial_batch=False, batch_size=batch_size): ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch)) # update running mean/std for discriminator if hasattr(discriminator, "obs_rms"): discriminator.obs_rms.update( np.concatenate((ob_batch, ob_expert), 0)) *newlosses, g = discriminator.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert) d_adam.update(allmean(g), d_stepsize) d_losses.append(newlosses) logger.log(fmt_row(13, np.mean(d_losses, axis=0))) # ----------------- logger -------------------- logger.log(fmt_row(13, meanlosses)) for (lossval, name) in zipsame(meanlosses, loss_names): logger.record_tabular("loss_" + name, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"] ) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews, true_rews = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) true_rewbuffer.extend(true_rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if MPI.COMM_WORLD.Get_rank() == 0: logger.dump_tabular()
def learn(self): episode_rewards = [0.0] obs = self.env.reset() print(obs.shape) done = False tstart = time.time() episodes_trained = [0, False] # [episode_number, Done flag] for t in range(self.config.num_timesteps): # if t == 102: # break update_eps = tf.constant(self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_obs1, mb_dones = [], [], [], [], [] for n_step in range(self.config.n_steps): # print(f't is {t} -- n_steps is {n_step}') actions, _ = self.get_actions(tf.constant(obs), update_eps=update_eps) if self.config.num_agents == 1: obs1, rews, done, _ = self.env.step(actions[0]) else: obs1, rews, done, _ = self.env.step(actions) # TODO fingerprint computation mb_obs.append(obs.copy()) mb_actions.append(actions) mb_dones.append([float(done) for _ in self.agent_ids]) # print(f'rewards is {rews}') if self.config.same_reward_for_agents: rews = [np.max(rews) for _ in range(len(rews))] # for cooperative purpose same reward for every one mb_obs1.append(obs1.copy()) mb_rewards.append(rews) obs = obs1 episode_rewards[-1] += np.max(rews) if done: episodes_trained[0] = episodes_trained[0] + 1 episodes_trained[1] = True episode_rewards.append(0.0) obs = self.env.reset() mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) # print(f' mb_obs.shape is {np.array(mb_obs).shape}') mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f' before discount mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # print(f' last_values {last_values}') for agent_id, (rewards, dones) in enumerate(zip(mb_rewards, mb_dones)): value = self.agents[agent_id].max_value(tf.constant(obs1[agent_id])) rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[agent_id] = rewards # print(f' after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add(mb_obs, mb_actions, mb_rewards, mb_obs1, mb_masks) if t > self.config.learning_starts and t % self.config.train_freq == 0: if self.config.prioritized_replay: experience = self.replay_memory.sample(self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = self.replay_memory.sample(self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None # print(f'obses_t.shape {obses_t.shape}') # shape format is (batch_size, agent_num, n_steps, ...) obses_t = obses_t.swapaxes(0, 1) actions = actions.swapaxes(0, 1) rewards = rewards.swapaxes(0, 1) # print(f'rewards.shape {rewards.shape}') obses_tp1 = obses_tp1.swapaxes(0, 1) dones = dones.swapaxes(0, 1) print(f'weights.shape {weights.shape}') weights = weights.swapaxes(0, 1) # weights shape is (1, batch_size, n_steps) print(f'weights.shape {weights.shape}') # shape format is (agent_num, batch_size, n_steps, ...) if 'rnn' not in self.config.network: shape = obses_t.shape obses_t = np.reshape(obses_t, (shape[0], shape[1] * shape[2], *shape[3:])) shape = actions.shape actions = np.reshape(actions, (shape[0], shape[1] * shape[2], *shape[3:])) shape = rewards.shape rewards = np.reshape(rewards, (shape[0], shape[1] * shape[2], *shape[3:])) shape = dones.shape dones = np.reshape(dones, (shape[0], shape[1] * shape[2], *shape[3:])) shape = weights.shape weights = np.reshape(weights, (shape[0], shape[1])) # print(f'obses_t.shape {obses_t.shape}') # shape format is (agent_num, batch_size * n_steps, ...) obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) # print(f' obses_t.shape {obses_t.shape}') # print(f' actions.shape {actions.shape}') # print(f' rewards.shape {rewards.shape}') # print(f' dones.shape {dones.shape}') # print(f' weights.shape {weights.shape}') loss, td_errors = self.train(obses_t, actions, rewards, dones, weights) # print(f'td_errors.shape = {np.array(td_errors).shape} , batch_idxes.shape = {np.array(batch_idxes).shape}') if self.config.prioritized_replay: new_priorities = np.abs(td_errors) + self.config.prioritized_replay_eps self.replay_memory.update_priorities(batch_idxes, new_priorities) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. for agent_id in self.agent_ids: self.agents[agent_id].soft_update_target() if t % self.config.playing_test == 0 and t != 0: # self.network.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if t % (self.config.print_freq*100) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print(f'eps {self.exploration.value(t)} -- time {t - self.config.print_freq*1000} to {t} steps: {nseconds}') # if done and self.config.print_freq is not None and len(episode_rewards) % self.config.print_freq == 0: if episodes_trained[1] and episodes_trained[0] % self.config.print_freq == 0: episodes_trained[1] = False logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 past episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular()
def learn( env, policy_func, discriminator, expert_dataset, embedding_z, pretrained, pretrained_weight, *, g_step, d_step, timesteps_per_batch, # what to train on max_kl, cg_iters, gamma, lam, # advantage estimation entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, d_stepsize=3e-4, vf_iters=3, max_timesteps=0, max_episodes=0, max_iters=0, # time constraint callback=None, save_per_iter=100, ckpt_dir=None, log_dir=None, load_model_path=None, task_name=None): nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space, reuse=(pretrained_weight != None)) oldpi = policy_func("oldpi", ob_space, ac_space) atarg = tf.placeholder( dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = U.mean(kloldnew) meanent = U.mean(ent) entbonus = entcoeff * meanent vferr = U.mean(tf.square(pi.vpred - ret)) ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold surrgain = U.mean(ratio * atarg) optimgain = surrgain + entbonus losses = [optimgain, meankl, entbonus, surrgain, meanent] loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"] dist = meankl all_var_list = pi.get_trainable_variables() var_list = [ v for v in all_var_list if v.name.split("/")[1].startswith("pol") ] vf_var_list = [ v for v in all_var_list if v.name.split("/")[1].startswith("vf") ] d_adam = MpiAdam(discriminator.get_trainable_variables()) vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) klgrads = tf.gradients(dist, var_list) flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan") shapes = [var.get_shape().as_list() for var in var_list] start = 0 tangents = [] for shape in shapes: sz = U.intprod(shape) tangents.append(tf.reshape(flat_tangent[start:start + sz], shape)) start += sz gvp = tf.add_n( [U.sum(g * tangent) for (g, tangent) in zipsame(klgrads, tangents)]) # pylint: disable=E1111 fvp = U.flatgrad(gvp, var_list) assign_old_eq_new = U.function( [], [], updates=[ tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables()) ]) compute_losses = U.function([ob, ac, atarg], losses) compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)]) compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp) compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list)) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print( colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers return out writer = U.FileWriter(log_dir) U.initialize() th_init = get_flat() MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) d_adam.sync() vfadam.sync() print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, discriminator, embedding=embedding_z, timesteps_per_batch=timesteps_per_batch, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards true_rewbuffer = deque(maxlen=40) assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1 g_loss_stats = stats(loss_names) d_loss_stats = stats(discriminator.loss_name) ep_stats = stats(["True_rewards", "Rewards", "Episode_length"]) # if provide pretrained weight if pretrained_weight is not None: U.load_state(pretrained_weight, var_list=pi.get_variables()) # if provieded model path if load_model_path is not None: U.load_state(load_model_path) while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break # Save model if iters_so_far % save_per_iter == 0 and ckpt_dir is not None: U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far) logger.log("********** Iteration %i ************" % iters_so_far) def fisher_vector_product(p): return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p # ------------------ Update G ------------------ logger.log("Optimizing Policy...") for _ in range(g_step): with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[ "tdlamret"] vpredbefore = seg[ "vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std( ) # standardized advantage function estimate if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy args = seg["ob"], seg["ac"], atarg fvpargs = [arr[::5] for arr in args] assign_old_eq_new( ) # set old parameter values to new parameter values with timed("computegrad"): *lossbefore, g = compute_lossandgrad(*args) lossbefore = allmean(np.array(lossbefore)) g = allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0) assert np.isfinite(stepdir).all() shs = .5 * stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize set_from_flat(thnew) meanlosses = surr, kl, *_ = allmean( np.array(compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") set_from_flat(thbefore) if nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather( (thnew.sum(), vfadam.getflat().sum())) # list of tuples assert all( np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) with timed("vf"): for _ in range(vf_iters): for (mbob, mbret) in dataset.iterbatches( (seg["ob"], seg["tdlamret"]), include_final_partial_batch=False, batch_size=128): if hasattr(pi, "ob_rms"): pi.ob_rms.update( mbob) # update running mean/std for policy g = allmean(compute_vflossandgrad(mbob, mbret)) vfadam.update(g, vf_stepsize) g_losses = meanlosses for (lossname, lossval) in zip(loss_names, meanlosses): logger.record_tabular(lossname, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) # ------------------ Update D ------------------ logger.log("Optimizing Discriminator...") logger.log(fmt_row(13, discriminator.loss_name)) ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob)) batch_size = len(ob) // d_step d_losses = [ ] # list of tuples, each of which gives the loss for a minibatch for ob_batch, ac_batch in dataset.iterbatches( (ob, ac), include_final_partial_batch=False, batch_size=batch_size): ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch)) # update running mean/std for discriminator if hasattr(discriminator, "obs_rms"): discriminator.obs_rms.update( np.concatenate((ob_batch, ob_expert), 0)) *newlosses, g = discriminator.lossandgrad(ob_batch, ac_batch, ob_expert, ac_expert) d_adam.update(allmean(g), d_stepsize) d_losses.append(newlosses) logger.log(fmt_row(13, np.mean(d_losses, axis=0))) lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"] ) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs)) true_rewbuffer.extend(true_rets) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank == 0: logger.dump_tabular() g_loss_stats.add_all_summary(writer, g_losses, iters_so_far) d_loss_stats.add_all_summary(writer, np.mean(d_losses, axis=0), iters_so_far) ep_stats.add_all_summary(writer, [ np.mean(true_rewbuffer), np.mean(rewbuffer), np.mean(lenbuffer) ], iters_so_far)
def train_fn(args): base_dir = args.base_dir dirs = init_dir(base_dir) init_log(dirs['log']) environment = args.environment if environment is 'ford': config_dir = 'config/config_ford.ini' else: config_dir = 'config/config_gym.ini' copy_file(config_dir, dirs['data']) config = configparser.ConfigParser() config.read(config_dir) # test during training or test after training in_test, post_test = init_test_flag(args.test_mode) gamma = config.getfloat('MODEL_CONFIG', 'gamma') buffer_size = int(config.getfloat('MODEL_CONFIG', 'buffer_size')) batch_size = int(config.getfloat('MODEL_CONFIG', 'batch_size')) lr_init = config.getfloat('MODEL_CONFIG', 'lr_init') reward_norm = config.getfloat('MODEL_CONFIG', 'reward_norm') reward_clip = config.getfloat('MODEL_CONFIG', 'reward_clip') # training config total_step = int(config.getfloat('TRAIN_CONFIG', 'total_step')) rendering = int(config.getfloat('TRAIN_CONFIG', 'rendering')) learning_starts = int(config.getfloat('TRAIN_CONFIG', 'learning_starts')) train_freq = int(config.getfloat('TRAIN_CONFIG', 'train_freq')) test_freq = int(config.getfloat('TRAIN_CONFIG', 'test_freq')) log_freq = int(config.getfloat('TRAIN_CONFIG', 'log_freq')) print_freq = int(config.getfloat('TRAIN_CONFIG', 'print_freq')) target_network_update_freq = int( config.getfloat('TRAIN_CONFIG', 'target_network_update_freq')) number_update = int(config.getfloat('TRAIN_CONFIG', 'num_update')) num_history = int(config.getfloat('TRAIN_CONFIG', 'num_history')) seed = config.getint('TRAIN_CONFIG', 'seed') eps_init = config.getfloat('MODEL_CONFIG', 'epsilon_init') eps_decay = config.get('MODEL_CONFIG', 'epsilon_decay') eps_ratio = config.getfloat('MODEL_CONFIG', 'epsilon_ratio') eps_min = config.getfloat('MODEL_CONFIG', 'epsilon_min') env_seed = config.getint('ENV_CONFIG', 'env_seed') if eps_decay == 'constant': eps_scheduler = Scheduler(eps_init, decay=eps_decay) else: eps_scheduler = Scheduler(eps_init, eps_min, total_step * eps_ratio, decay=eps_decay) # Initialize environment print("Initializing environment") if environment is 'ford': env = FordEnv(config['ENV_CONFIG'], rendering=rendering, seed=env_seed) else: env = gym.make("CartPole-v0") config = tf.ConfigProto(allow_soft_placement=True) # config.gpu_options.allow_growth = True sess = tf.get_default_session() tf.set_random_seed(seed) if sess is None: sess = make_session(config=config, make_default=True) try: policy = Q_Policy(env.action_space.n, env.observation_space.shape[0], num_history) # Create all the functions necessary to train the model train, update_target, debug = policy.build_graph( optimizer=tf.train.AdamOptimizer(learning_rate=lr_init), gamma=gamma, grad_norm_clipping=10) replay_buffer = ReplayBuffer(buffer_size) obs_buffer = np.zeros(shape=(num_history, env.observation_space.shape[0])) obs_buffer_eval = np.zeros(shape=(num_history, env.observation_space.shape[0])) # Initialize the parameters and copy them to the target network. sess.run(tf.global_variables_initializer()) # if restore: # policy.load(sess, dirs['model'], checkpoint=None) update_target() epoch_rewards = [0.0] eval_rewards = [] ob_ls = [] steps = 0 # counting the steps in one epoch obs = env.reset() obs_buffer[-1] = obs for t in range(total_step): # Take action and update exploration to the newest value steps += 1 action = policy.forward(sess, obs_buffer[None], eps_scheduler.get(1), mode='explore') new_obs, rew, done, _, = env.step(action) obs_buffer_new = obs_buffer np.roll( obs_buffer_new, -1, axis=0 ) # shift the numpy array up, to make the most old experience last obs_buffer_new[-1] = new_obs if rendering: # this function is not supported well on some environment as the plot function issues. env.render() if reward_norm: rew = rew / reward_norm if reward_clip: rew = np.clip(rew, -reward_clip, reward_clip) replay_buffer.add(obs_buffer, action, rew, obs_buffer_new, float(done)) ob_ls.append([new_obs]) obs_buffer = obs_buffer_new epoch_rewards[-1] += rew # r_sum = -3499.51 if t > learning_starts and t % train_freq == 0: for _ in range(number_update): # Minimize the error in Bellman's equation on a batch sampled from replay buffer. obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None train(obses_t, actions, rewards, obses_tp1, dones, weights) # Update target network periodically. if t > learning_starts and t % target_network_update_freq == 0: update_target() if done: if print_freq is not None and len( epoch_rewards) % print_freq == 0: mean_100ep_reward = round(np.mean(epoch_rewards[-11:-1]), 1) num_episodes = len(epoch_rewards) logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * eps_scheduler.get(1))) logger.record_tabular("Current date and time: ", datetime.datetime.now()) logger.dump_tabular() # evaluation if in_test and len(epoch_rewards) % test_freq == 0: episode_reward_eval = 0 obs_eval = env.reset() done_eval = False # print("Staring evaluating") while not done_eval: obs_buffer_eval[-1] = obs_eval # Take action and update exploration to the newest value action_eval = policy.forward(sess, obs_buffer_eval[None], 1, mode='eval') new_obs_eval, rew_eval, done_eval, _ = env.step( action_eval) obs_eval = new_obs_eval np.roll(obs_buffer_eval, -1, axis=0) # shift the numpy array up if reward_norm: rew_eval = rew_eval / reward_norm episode_reward_eval += rew_eval print("evaluating reward = ", episode_reward_eval) eval_rewards.append(episode_reward_eval) print("Saving model...") policy.save(sess, dirs['model'], len(epoch_rewards)) if len(epoch_rewards) % log_freq == 0: np.save(dirs['results'] + '{}'.format('eval_rewards'), eval_rewards) np.save(dirs['results'] + '{}'.format('epoch_rewards'), epoch_rewards) np.save(dirs['results'] + '{}'.format('ob_ls'), ob_ls) obs_buffer = np.zeros(shape=(num_history, env.observation_space.shape[0])) obs_buffer_eval = np.zeros( shape=(num_history, env.observation_space.shape[0])) obs = env.reset() obs_buffer[-1] = obs epoch_rewards.append(0.0) env.close() plot(dirs['results']) if post_test: evaluate(args) except Exception as e: print("Done...") env.close() raise e
def run(self): policy = Q_Policy(num_actions=self.env.action_space.n, num_obs=self.env.observation_space.shape[0]) # Create all the functions necessary to train the model policy.build_graph( optimizer=tf.train.AdamOptimizer(learning_rate=self.lr_init), gamma=self.gamma, grad_norm_clipping=10) replay_buffer = ReplayBuffer(self.buffer_size) # Initialize the parameters and copy them to the target network. self.sess.run(tf.global_variables_initializer()) # if restore: # policy.load(sess, dirs['model'], checkpoint=None) policy.update_target(self.sess) epoch_rewards = [0.0] eval_rewards = [] ob_ls = [] steps = 0 # counting the steps in one epoch obs = self.env.reset() for t in range(self.total_step): # Take action and update exploration to the newest value steps += 1 action = policy.forward(self.sess, obs[None], self.eps_scheduler.get(1), mode='explore') new_obs, rew, done, _, = self.env.step(action) if self.rendering: self.env.render() if self.reward_norm: rew = rew / self.reward_norm if self.reward_clip: rew = np.clip(rew, -self.reward_clip, self.reward_clip) replay_buffer.add(obs, action, rew, new_obs, float(done)) ob_ls.append([new_obs]) obs = new_obs epoch_rewards[-1] += rew # r_sum = -3499.51 if t > self.learning_starts and t % self.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( self.batch_size) weights, batch_idxes = np.ones_like(rewards), None policy.backward(self.sess, obses_t, actions, obses_tp1, dones, rewards, weights, global_step=t, summary_writer=self.summary_writer) # Update target network periodically. if t > self.learning_starts and t % self.target_network_update_freq == 0: policy.update_target(self.sess) if done: if self.print_freq is not None and len( epoch_rewards) % self.print_freq == 0: mean_100ep_reward = round(np.mean(epoch_rewards[-99:-1]), 1) num_episodes = len(epoch_rewards) logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.eps_scheduler.get(1))) logger.record_tabular("Current date and time: ", datetime.datetime.now()) logger.dump_tabular() # evaluation if self.in_test and len(epoch_rewards) % self.test_freq == 0: episode_reward_eval = 0 obs_eval = self.env.reset() done_eval = False # print("Staring evaluating") while not done_eval: # Take action and update exploration to the newest value action_eval = policy.forward(self.sess, obs_eval[None], 1, mode='eval') new_obs_eval, rew_eval, done_eval, _ = self.env.step( action_eval) obs_eval = new_obs_eval if self.reward_norm: rew_eval = rew_eval / self.reward_norm episode_reward_eval += rew_eval self._add_summary(episode_reward_eval, t, is_train=False) print("evaluating reward = ", episode_reward_eval) eval_rewards.append(episode_reward_eval) print("Saving model...") policy.save(self.sess, self.dirs['model'], len(epoch_rewards)) if len(epoch_rewards) % self.log_freq == 0: np.save(self.dirs['results'] + '{}'.format('eval_rewards'), eval_rewards) np.save( self.dirs['results'] + '{}'.format('epoch_rewards'), epoch_rewards) np.save(self.dirs['results'] + '{}'.format('ob_ls'), ob_ls) obs = self.env.reset() self._add_summary(epoch_rewards[-1], global_step=t) self.summary_writer.flush() epoch_rewards.append(0.0) print("Training Done...") self.env.close() plot(self.dirs['results'])
def learn(self): self.soft_update_target() episode_rewards = [0.0] # Start total timer tstart = time.time() for t in range(self.config.num_episodes): obs = self.env.reset() done = False update_eps = tf.constant(self.exploration.value(t)) if t % (self.config.print_freq) == 0: time_1000_step = time.time() nseconds = time_1000_step - tstart tstart = time_1000_step print( f'time spend to perform {t - self.config.print_freq} to {t} steps is {nseconds} ' ) print('eps update', self.exploration.value(t)) mb_obs, mb_rewards, mb_actions, mb_fps, mb_dones = [], [], [], [], [] # mb_states = states epinfos = [] while not done: actions, fps_ = self.choose_action(tf.constant(obs), update_eps=update_eps) # print(f'actions is {actions}') # print(f'fps_ is {fps_}') fps = [] if self.config.num_agents > 1: for a in self.agent_ids: fp = fps_[:a] fp.extend(fps_[a + 1:]) fp_a = np.concatenate( (fp, [[self.exploration.value(t) * 100, t]]), axis=None) fps.append(fp_a) # print(f'fps is {fps}') mb_obs.append(obs.copy()) mb_actions.append(actions) mb_fps.append(fps) mb_dones.append([float(done) for _ in self.agent_ids]) obs1, rews, done, info = self.env.step(actions) if self.config.same_reward_for_agents: rews = [ np.max(rews) for _ in range(len(rews)) ] # for cooperative purpose same reward for every one mb_rewards.append(rews) obs = obs1 maybeepinfo = info.get('episode') if maybeepinfo: epinfos.append(maybeepinfo) episode_rewards[-1] += np.max(rews) episode_rewards.append(0.0) mb_dones.append([float(done) for _ in self.agent_ids]) # swap axes to have lists in shape of (num_agents, num_steps, ...) for extra_step in range(self.config.n_steps - len(mb_actions)): print('extra_info as 0 s added added ') mb_obs.insert(0, obs * 0.) mb_actions.insert(0, mb_actions[-1] * 0.) mb_rewards.insert(0, mb_rewards[-1] * 0.) mb_fps.insert(0, mb_fps[-1] * 0.) mb_dones.insert(0, mb_dones[-1] * 0.) mb_obs = np.asarray(mb_obs, dtype=obs[0].dtype).swapaxes(0, 1) mb_actions = np.asarray(mb_actions, dtype=actions[0].dtype).swapaxes(0, 1) mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1) mb_fps = np.asarray(mb_fps, dtype=np.float32).swapaxes(0, 1) mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1) mb_masks = mb_dones[:, :-1] mb_dones = mb_dones[:, 1:] # print(f'mb_actions.shape is {mb_actions.shape}') # print(f'mb_rewards is {mb_rewards}') if self.config.gamma > 0.0: # Discount/bootstrap off value fn last_values = self.value(tf.constant(obs1)) # print(f'last_values {last_values}') for n, (rewards, dones, value) in enumerate( zip(mb_rewards, mb_dones, last_values)): rewards = rewards.tolist() dones = dones.tolist() if dones[-1] == 0: rewards = discount_with_dones(rewards + [value], dones + [0], self.config.gamma)[:-1] else: rewards = discount_with_dones(rewards, dones, self.config.gamma) mb_rewards[n] = rewards # print(f'after discount mb_rewards is {mb_rewards}') if self.config.replay_buffer is not None: self.replay_memory.add_episode( (mb_obs, mb_actions, mb_rewards, mb_masks, mb_fps)) if t > self.config.learning_starts and t % self.config.train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if self.config.prioritized_replay: experience = self.replay_memory.sample( self.config.batch_size, beta=self.beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, fps, weights, batch_idxes) = experience else: obses_t, actions, rewards, dones, fps = self.replay_memory.sample( self.config.batch_size) weights, batch_idxes = np.ones_like(rewards), None obses_t = tf.constant(obses_t) actions = tf.constant(actions) rewards = tf.constant(rewards) dones = tf.constant(dones) weights = tf.constant(weights) fps = tf.constant(fps) # print(f'shapes {obses_t.shape} -- {actions.shape} -- {rewards.shape} -- {dones.shape} -- {fps.shape}') loss, td_errors = self.train(obses_t, actions, rewards, dones, weights, fps) if t % (self.config.train_freq * 50) == 0: print(f't = {t} , loss = {loss}') if t > self.config.learning_starts and t % self.config.target_network_update_freq == 0: # Update target network periodically. self.soft_update_target() if t % self.config.playing_test == 0 and t != 0: # self.save(self.config.save_path) self.play_test_games() mean_100ep_reward = np.mean(episode_rewards[-101:-1]) num_episodes = len(episode_rewards) if done and self.config.print_freq is not None and len( episode_rewards) % self.config.print_freq == 0: print( f'last 100 episode mean reward {mean_100ep_reward} in {num_episodes} playing' ) logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * self.exploration.value(t))) logger.dump_tabular()
# Take action and update exploration to the newest value action = act(obs[None], update_eps=exploration.value(t))[0] new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. replay_buffer.add(obs, action, rew, new_obs, float(done)) obs = new_obs episode_rewards[-1] += rew if done: obs = env.reset() episode_rewards.append(0) is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200 if is_solved: # Show off the result env.render() else: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if t > 1000: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32) train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) # Update target network periodically. if t % 1000 == 0: update_target() if done and len(episode_rewards) % 200 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", len(episode_rewards)) logger.record_tabular("mean episode reward", round(np.mean(episode_rewards[-101:-1]), 1)) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular()
def learn(env, encoder, action_decorder, state_decorder, embedding_shape,*, dataset, optimizer, logdir, batch_size, time_steps, adam_epsilon = 0.001, lr_rate = 1e-4, vae_beta = 8): lstm_encoder = encoder("lstm_encoder") ac_decoder = action_decorder("ac_decoder") state_decoder = state_decorder("state_decoder") #这个地方有问题 ac_de_ob = U.get_placeholder_cached(name="ac_de_ob") en_ob = U.get_placeholder_cached(name="en_ob") ##for encoder state_de_ob = U.get_placeholder_cached(name="state_de_ob") ## for action decoder, 这个state decoder是不是也可以用, 是不是应该改成obs ac_de_embedding = U.get_placeholder_cached(name="ac_de_embedding") ## for action decoder, 这个state decoder应该也是可以用的 state_de_embedding = U.get_placeholder_cached(name="state_de_embedding") # ac = ac_decoder.pdtype.sample_placeholder([None]) ob_next = tf.placeholder(name="ob_next", shape=[None, ob_shape], dtype=tf.float32) # ob_next_ac = tf.placeholder(name="ob_next_ac", shape=[ob_shape], dtype=tf.float32) # obs_out = state_decoder.pdtype.sample_placeholder([None]) # p(z) 标准正太分布 from common.distributions import make_pdtype p_z_pdtype = make_pdtype(embedding_shape) p_z_params = U.concatenate([tf.zeros(shape=[embedding_shape], name="mean"), tf.zeros(shape=[embedding_shape], name="logstd")], axis=-1) p_z = p_z_pdtype.pdfromflat(p_z_params) # recon_loss 里再加一个,对于action的 recon_loss = -tf.reduce_sum(state_decoder.pd.logp(ob_next)) # kl_loss = lstm_encoder.pd.kl(p_z)[0] ##p(z):标准正太分布, 这个看起来是不是也不太对!!!! # kl_loss = tf.maximum(lstm_encoder.pd.kl(p_z)[0], tf.constant(5.00)) ##p(z):标准正太分布, 这个看起来是不是也不太对!!!! kl_loss = lstm_encoder.pd.kl(p_z)[0] vae_loss = tf.reduce_mean(recon_loss + vae_beta * kl_loss) ###vae_loss 应该是一个batch的 ep_stats = stats(["recon_loss", "kl_loss", "vae_loss"]) losses = [recon_loss, kl_loss, vae_loss] # 均方误差去训练 action,把得到的action step 一下,得到x(t+1),然后用均方误差loss,或者可以试试交叉熵 ## var_list var_list = [] en_var_list = lstm_encoder.get_trainable_variables() var_list.extend(en_var_list) # ac_de_var_list = ac_decoder.get_trainable_variables() # var_list.extend(ac_de_var_list) state_de_var_list = state_decoder.get_trainable_variables() var_list.extend(state_de_var_list) # compute_recon_loss = U.function([ob, obs, embedding, obss, embeddingss, ac, obs_out], recon_loss) compute_losses = U.function([en_ob, ac_de_ob, state_de_ob, ac_de_embedding, state_de_embedding, ob_next], losses) compute_grad = U.function([en_ob, ac_de_ob, state_de_ob, ac_de_embedding, state_de_embedding, ob_next], U.flatgrad(vae_loss, var_list)) ###这里没有想好!!!,可能是不对的!! adam = MpiAdam(var_list, epsilon=adam_epsilon) U.initialize() adam.sync() writer = U.FileWriter(logdir) writer.add_graph(tf.get_default_graph()) # =========================== TRAINING ===================== # iters_so_far = 0 saver = tf.train.Saver(var_list=var_list, max_to_keep=100) saver_encoder = tf.train.Saver(var_list = en_var_list, max_to_keep=100) # saver_pol = tf.train.Saver(var_list=ac_de_var_list, max_to_keep=100) ##保留一下policy的参数,但是这个好像用不到哎 while iters_so_far < 50: ## 加多轮 logger.log("********** Iteration %i ************" % iters_so_far) ## 要不要每一轮调整一下batch_size recon_loss_buffer = deque(maxlen=100) # recon_loss2_buffer = deque(maxlen=100) kl_loss_buffer = deque(maxlen=100) vae_loss_buffer = deque(maxlen=100) # i = 0 for obs_and_next in dataset.get_next_batch(batch_size=time_steps): # print(i) # i += 1 observations = obs_and_next[0].transpose((1, 0))[:-1] ob_next = obs_and_next[0].transpose(1, 0)[state_decoder.receptive_field:, :] embedding_now = lstm_encoder.get_laten_vector(obs_and_next[0].transpose((1, 0))) embeddings = np.array([embedding_now for _ in range(time_steps - 1)]) embeddings_reshape = embeddings.reshape((time_steps-1, -1)) actions = ac_decoder.act(stochastic=True, ob=observations, embedding=embeddings_reshape) ob_next_ac = get_ob_next_ac(env, observations[-1], actions[0]) ##这个还需要再修改 #########################################3 # state_outputs = state_decoder.get_outputs(observations.reshape(1, time_steps, -1), embedding_now.reshape((1, 1, -1))) ##还没有加混合高斯......乱加了一通,已经加完了 # recon_loss = state_decoder.recon_loss(observations.reshape(1, time_steps, -1), embedding_now.reshape((1, 1, -1))) recon_loss, kl_loss, vae_loss = compute_losses(obs_and_next[0].transpose((1, 0)).reshape(1, time_steps, -1), observations.reshape(time_steps-1,-1), observations.reshape(1, time_steps-1, -1), embeddings_reshape, embedding_now.reshape((1,1, -1)), ob_next) g = compute_grad(obs_and_next[0].transpose((1, 0)).reshape(1, time_steps, -1), observations.reshape(time_steps-1,-1), observations.reshape(1, time_steps-1, -1), embeddings_reshape, embedding_now.reshape((1,1, -1)), ob_next) # logger.record_tabular("recon_loss", recon_loss) # logger.record_tabular("recon_loss2", recon_loss2) # logger.record_tabular("kl_loss", kl_loss) # logger.record_tabular("vae_loss", vae_loss) # logger.dump_tabular() adam.update(g, lr_rate) recon_loss_buffer.append(recon_loss) # recon_loss2_buffer.append(recon_loss2) kl_loss_buffer.append(kl_loss) vae_loss_buffer.append(vae_loss) ep_stats.add_all_summary(writer, [np.mean(recon_loss_buffer), np.mean(kl_loss_buffer), np.mean(vae_loss_buffer)], iters_so_far) logger.record_tabular("recon_loss", recon_loss) # logger.record_tabular("recon_loss2", recon_loss2) logger.record_tabular("kl_loss", kl_loss) logger.record_tabular("vae_loss", vae_loss) logger.dump_tabular() if(iters_so_far % 10 == 0 and iters_so_far != 0): save(saver=saver, sess=tf.get_default_session(), logdir=logdir, step=iters_so_far) save(saver=saver_encoder, sess=tf.get_default_session(),logdir="./vae_saver", step=iters_so_far) # save(saver=saver_pol, sess=tf.get_default_session(), logdir="pol_saver", step=iters_so_far) iters_so_far += 1 if iters_so_far < 6: lr_rate /= 2
def learn(env, network, seed=None, lr=5e-5, total_timesteps=100000, buffer_size=500000, exploration_fraction=0.1, exploration_final_eps=0.01, train_freq=1, batch_size=32, print_freq=10, checkpoint_freq=100000, checkpoint_path=None, learning_starts=0, gamma=0.99, target_network_update_freq=10000, prioritized_replay=True, prioritized_replay_alpha=0.4, prioritized_replay_beta0=0.6, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-3, param_noise=False, callback=None, load_path=None, load_idx=None, demo_path=None, n_step=10, demo_prioritized_replay_eps=1.0, pre_train_timesteps=750000, epsilon_schedule="constant", **network_kwargs): # Create all the functions necessary to train the model set_global_seeds(seed) q_func = build_q_func(network, **network_kwargs) with tf.device('/GPU:0'): model = DQfD(q_func=q_func, observation_shape=env.observation_space.shape, num_actions=env.action_space.n, lr=lr, grad_norm_clipping=10, gamma=gamma, param_noise=param_noise) # Load model from checkpoint if load_path is not None: load_path = osp.expanduser(load_path) ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None) if load_idx is None: ckpt.restore(manager.latest_checkpoint) print("Restoring from {}".format(manager.latest_checkpoint)) else: ckpt.restore(manager.checkpoints[load_idx]) print("Restoring from {}".format(manager.checkpoints[load_idx])) # Setup demo trajectory assert demo_path is not None with open(demo_path, "rb") as f: trajectories = pickle.load(f) # Create the replay buffer replay_buffer = PrioritizedReplayBuffer(buffer_size, prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = total_timesteps beta_schedule = LinearSchedule(prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) temp_buffer = deque(maxlen=n_step) is_demo = True for epi in trajectories: for obs, action, rew, new_obs, done in epi: obs, new_obs = np.expand_dims( np.array(obs), axis=0), np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.demo_len += 1 replay_buffer.add(*n_step_sample) else: replay_buffer.demo_len += 1 replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), float(is_demo)) logger.log("trajectory length:", replay_buffer.demo_len) # Create the schedule for exploration if epsilon_schedule == "constant": exploration = ConstantSchedule(exploration_final_eps) else: # not used exploration = LinearSchedule(schedule_timesteps=int( exploration_fraction * total_timesteps), initial_p=1.0, final_p=exploration_final_eps) model.update_target() # ============================================== pre-training ====================================================== start = time() num_episodes = 0 temp_buffer = deque(maxlen=n_step) for t in tqdm(range(pre_train_timesteps)): # sample and train experience = replay_buffer.sample(batch_size, beta=prioritized_replay_beta0) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) # Update priorities new_priorities = np.abs(td_errors) + np.abs( n_td_errors) + demo_prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) # Update target network periodically if t > 0 and t % target_network_update_freq == 0: model.update_target() # Logging elapsed_time = timedelta(time() - start) if print_freq is not None and t % 10000 == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", 0) logger.record_tabular("max 100 episode reward", 0) logger.record_tabular("min 100 episode reward", 0) logger.record_tabular("demo sample rate", 1) logger.record_tabular("epsilon", 0) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", True) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() # ============================================== exploring ========================================================= sample_counts = 0 demo_used_counts = 0 episode_rewards = deque(maxlen=100) this_episode_reward = 0. best_score = 0. saved_mean_reward = None is_demo = False obs = env.reset() # Always mimic the vectorized env obs = np.expand_dims(np.array(obs), axis=0) reset = True for t in tqdm(range(total_timesteps)): if callback is not None: if callback(locals(), globals()): break kwargs = {} if not param_noise: update_eps = tf.constant(exploration.value(t)) update_param_noise_threshold = 0. else: # not used update_eps = tf.constant(0.) update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)) kwargs['reset'] = reset kwargs[ 'update_param_noise_threshold'] = update_param_noise_threshold kwargs['update_param_noise_scale'] = True action, epsilon, _, _ = model.step(tf.constant(obs), update_eps=update_eps, **kwargs) action = action[0].numpy() reset = False new_obs, rew, done, _ = env.step(action) # Store transition in the replay buffer. new_obs = np.expand_dims(np.array(new_obs), axis=0) if n_step: temp_buffer.append((obs, action, rew, new_obs, done, is_demo)) if len(temp_buffer) == n_step: n_step_sample = get_n_step_sample(temp_buffer, gamma) replay_buffer.add(*n_step_sample) else: replay_buffer.add(obs[0], action, rew, new_obs[0], float(done), 0.) obs = new_obs # invert log scaled score for logging this_episode_reward += np.sign(rew) * (np.exp(np.sign(rew) * rew) - 1.) if done: num_episodes += 1 obs = env.reset() obs = np.expand_dims(np.array(obs), axis=0) episode_rewards.append(this_episode_reward) reset = True if this_episode_reward > best_score: best_score = this_episode_reward ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, './best_model', max_to_keep=1) manager.save(t) logger.log("saved best model") this_episode_reward = 0.0 if t % train_freq == 0: experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t)) batch_idxes = experience[-1] if experience[6] is None: # for n_step = 0 obses_t, actions, rewards, obses_tp1, dones, is_demos = tuple( map(tf.constant, experience[:6])) obses_tpn, rewards_n, dones_n = None, None, None weights = tf.constant(experience[-2]) else: obses_t, actions, rewards, obses_tp1, dones, is_demos, obses_tpn, rewards_n, dones_n, weights = tuple( map(tf.constant, experience[:-1])) td_errors, n_td_errors, loss_dq, loss_n, loss_E, loss_l2, weighted_error = model.train( obses_t, actions, rewards, obses_tp1, dones, is_demos, weights, obses_tpn, rewards_n, dones_n) new_priorities = np.abs(td_errors) + np.abs( n_td_errors ) + demo_prioritized_replay_eps * is_demos + prioritized_replay_eps * ( 1. - is_demos) replay_buffer.update_priorities(batch_idxes, new_priorities) # for logging sample_counts += batch_size demo_used_counts += np.sum(is_demos) if t % target_network_update_freq == 0: # Update target network periodically. model.update_target() if t % checkpoint_freq == 0: save_path = checkpoint_path ckpt = tf.train.Checkpoint(model=model) manager = tf.train.CheckpointManager(ckpt, save_path, max_to_keep=10) manager.save(t) logger.log("saved checkpoint") elapsed_time = timedelta(time() - start) if done and num_episodes > 0 and num_episodes % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", np.mean(episode_rewards)) logger.record_tabular("max 100 episode reward", np.max(episode_rewards)) logger.record_tabular("min 100 episode reward", np.min(episode_rewards)) logger.record_tabular("demo sample rate", demo_used_counts / sample_counts) logger.record_tabular("epsilon", epsilon.numpy()) logger.record_tabular("loss_td", np.mean(loss_dq.numpy())) logger.record_tabular("loss_n_td", np.mean(loss_n.numpy())) logger.record_tabular("loss_margin", np.mean(loss_E.numpy())) logger.record_tabular("loss_l2", np.mean(loss_l2.numpy())) logger.record_tabular("losses_all", weighted_error.numpy()) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.record_tabular("pre_train", False) logger.record_tabular("elapsed time", elapsed_time) logger.dump_tabular() return model
def train(env, eval_env, agent, render=False, render_eval=False, sanity_run=False, nb_epochs=500, nb_epoch_cycles=20, nb_rollout_steps=100, nb_train_steps=50, param_noise_adaption_interval=50, hist_files=None, start_ckpt=None, demo_files=None): rank = MPI.COMM_WORLD.Get_rank() mpi_size = MPI.COMM_WORLD.Get_size() if rank == 0: logdir = logger.get_dir() else: logdir = None memory = agent.memory batch_size = agent.batch_size with tf_util.single_threaded_session() as sess: # Prepare everything. agent.initialize(sess, start_ckpt=start_ckpt) sess.graph.finalize() agent.reset() dbg_tf_init(sess, agent.dbg_vars) total_nb_train = 0 total_nb_rollout = 0 total_nb_eval = 0 # pre-train demo and critic_step # train_params: (nb_steps, lr_scale) total_nb_train = pretrain_demo(agent, env, demo_files, total_nb_train, train_params=[(100, 1.0)], start_ckpt=start_ckpt) load_history(agent, env, hist_files) # main training obs = env.reset() reset = False episode_step = 0 last_episode_step = 0 for i_epoch in range(nb_epochs): t_epoch_start = time.time() logger.info('\n%s epoch %d starts:' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), i_epoch)) for i_cycle in range(nb_epoch_cycles): logger.info( '\n%s cycles_%d of epoch_%d' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), i_cycle, i_epoch)) # rollout rcd_obs, rcd_action, rcd_r, rcd_new_obs, rcd_done = [], [], [], [], [] if not sanity_run and mpi_size == 1 and last_episode_step != 0: # todo: use mpi_max(last_episode_step) # dynamically set nb_rollout_steps nb_rollout_steps = max(last_episode_step * 4, batch_size) logger.info( '[%d, %d] rollout for %d steps.' % (total_nb_rollout, memory.nb_entries, nb_rollout_steps)) t_rollout_start = time.time() for i_rollout in range(nb_rollout_steps): rollout_log = i_cycle == 0 # 50% param_noise, 40% action_noise action, q = agent.pi(obs, total_nb_rollout, compute_Q=True, rollout_log=rollout_log, apply_param_noise=i_rollout % 10 < 5, apply_action_noise=i_rollout % 10 > 5) assert action.shape == env.action_space.shape new_obs, r, done, reset, info = env.step(action) if rank == 0 and render: env.render() episode_step += 1 total_nb_rollout += 1 if rollout_log: summary_list = [('rollout/%s' % tp, info[tp]) for tp in ['rwd_walk', 'rwd_total']] tp = 'rwd_agent' summary_list += [ ('rollout/%s_x%d' % (tp, info['rf_agent']), info[tp] * info['rf_agent']) ] summary_list += [('rollout/q', q)] if r != 0: summary_list += [('rollout/q_div_r', q / r)] agent.add_list_summary(summary_list, total_nb_rollout) # store at the end of cycle to speed up MPI rollout # agent.store_transition(obs, action, r, new_obs, done) rcd_obs.append(obs) rcd_action.append(action) rcd_r.append(r) rcd_new_obs.append(new_obs) rcd_done.append(done) obs = new_obs if reset: # Episode done. last_episode_step = episode_step episode_step = 0 agent.reset() obs = env.reset() agent.store_multrans(memory, rcd_obs, rcd_action, rcd_r, rcd_new_obs, rcd_done) t_train_start = time.time() steps_per_second = float(nb_rollout_steps) / (t_train_start - t_rollout_start) agent.add_list_summary( [('rollout/steps_per_second', steps_per_second)], total_nb_rollout) # Train. if not sanity_run: # dynamically set nb_train_steps if memory.nb_entries > batch_size * 20: # using 1% of data for training every step? nb_train_steps = max( int(memory.nb_entries * 0.01 / batch_size), 1) else: nb_train_steps = 0 logger.info('[%d] training for %d steps.' % (total_nb_train, nb_train_steps)) for _ in range(nb_train_steps): # Adapt param noise, if necessary. if memory.nb_entries >= batch_size and total_nb_train % param_noise_adaption_interval == 0: agent.adapt_param_noise(total_nb_train) agent.train_main(total_nb_train) agent.update_target_net() total_nb_train += 1 if i_epoch == 0 and i_cycle < 5: rollout_duration = t_train_start - t_rollout_start train_duration = time.time() - t_train_start logger.info( 'rollout_time(%d) = %.3fs, train_time(%d) = %.3fs' % (nb_rollout_steps, rollout_duration, nb_train_steps, train_duration)) logger.info( 'rollout_speed=%.3fs/step, train_speed = %.3fs/step' % (np.divide(rollout_duration, nb_rollout_steps), np.divide(train_duration, nb_train_steps))) logger.info('') mpi_size = MPI.COMM_WORLD.Get_size() # Log stats. stats = agent.get_stats(memory) combined_stats = stats.copy() def as_scalar(x): if isinstance(x, np.ndarray): assert x.size == 1 return x[0] elif np.isscalar(x): return x else: raise ValueError('expected scalar, got %s' % x) combined_stats_sums = MPI.COMM_WORLD.allreduce( np.array([as_scalar(x) for x in combined_stats.values()])) combined_stats = { k: v / mpi_size for (k, v) in zip(combined_stats.keys(), combined_stats_sums) } # exclude logging zobs_dbg_%d, zobs_dbg_%d_normalized summary_list = [(key, combined_stats[key]) for key, v in combined_stats.items() if 'dbg' not in key] agent.add_list_summary(summary_list, i_epoch) # only print out train stats for epoch_0 for sanity check if i_epoch > 0: combined_stats = {} # Evaluation and statistics. if eval_env is not None: logger.info('[%d, %d] run evaluation' % (i_epoch, total_nb_eval)) total_nb_eval = eval_episode(eval_env, render_eval, agent, combined_stats, total_nb_eval) logger.info('epoch %d duration: %.2f mins' % (i_epoch, (time.time() - t_epoch_start) / 60)) for key in sorted(combined_stats.keys()): logger.record_tabular(key, combined_stats[key]) logger.dump_tabular() logger.info('') if rank == 0: agent.store_ckpt(os.path.join(logdir, '%s.ckpt' % 'ddpg'), i_epoch)
def learn(env, num_actions=3, lr=5e-4, max_timesteps=100000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16): torch.set_num_threads(num_cpu) if prioritized_replay: replay_buffer = PrioritizedReplayBuffer( buffer_size, alpha=prioritized_replay_alpha) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = max_timesteps beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=prioritized_replay_beta0, final_p=1.0) else: replay_buffer = ReplayBuffer(buffer_size) beta_schedule = None exploration = LinearSchedule( schedule_timesteps=int(exploration_fraction * max_timesteps), initial_p=1.0, final_p=exploration_final_eps) episode_rewards = [0.0] saved_mean_reward = None obs = env.reset() player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] screen = player_relative obs, xy_per_marine = common.init(env, obs) group_id = 0 reset = True dqn = DQN(num_actions, lr, cuda) print('\nCollecting experience...') checkpoint_path = 'models/deepq/checkpoint.pth.tar' if os.path.exists(checkpoint_path): dqn, saved_mean_reward = load_checkpoint(dqn, cuda, filename=checkpoint_path) for t in range(max_timesteps): # Take action and update exploration to the newest value # custom process for DefeatZerglingsAndBanelings obs, screen, player = common.select_marine(env, obs) # action = act( # np.array(screen)[None], update_eps=update_eps, **kwargs)[0] action = dqn.choose_action(np.array(screen)[None]) reset = False rew = 0 new_action = None obs, new_action = common.marine_action(env, obs, player, action) army_count = env._obs[0].observation.player_common.army_count try: if army_count > 0 and _ATTACK_SCREEN in obs[0].observation["available_actions"]: obs = env.step(actions=new_action) else: new_action = [sc2_actions.FunctionCall(_NO_OP, [])] obs = env.step(actions=new_action) except Exception as e: # print(e) 1 # Do nothing player_relative = obs[0].observation["screen"][_PLAYER_RELATIVE] new_screen = player_relative rew += obs[0].reward done = obs[0].step_type == environment.StepType.LAST selected = obs[0].observation["screen"][_SELECTED] player_y, player_x = (selected == _PLAYER_FRIENDLY).nonzero() if len(player_y) > 0: player = [int(player_x.mean()), int(player_y.mean())] if len(player) == 2: if player[0] > 32: new_screen = common.shift(LEFT, player[0] - 32, new_screen) elif player[0] < 32: new_screen = common.shift(RIGHT, 32 - player[0], new_screen) if player[1] > 32: new_screen = common.shift(UP, player[1] - 32, new_screen) elif player[1] < 32: new_screen = common.shift(DOWN, 32 - player[1], new_screen) # Store transition in the replay buffer. replay_buffer.add(screen, action, rew, new_screen, float(done)) screen = new_screen episode_rewards[-1] += rew reward = episode_rewards[-1] if done: print("Episode Reward : %s" % episode_rewards[-1]) obs = env.reset() player_relative = obs[0].observation["screen"][ _PLAYER_RELATIVE] screen = player_relative group_list = common.init(env, obs) # Select all marines first # env.step(actions=[sc2_actions.FunctionCall(_SELECT_UNIT, [_SELECT_ALL])]) episode_rewards.append(0.0) reset = True if t > learning_starts and t % train_freq == 0: # Minimize the error in Bellman's equation on a batch sampled from replay buffer. if prioritized_replay: experience = replay_buffer.sample( batch_size, beta=beta_schedule.value(t)) (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample( batch_size) weights, batch_idxes = np.ones_like(rewards), None td_errors = dqn.learn(obses_t, actions, rewards, obses_tp1, gamma, batch_size) if prioritized_replay: new_priorities = np.abs(td_errors) + prioritized_replay_eps replay_buffer.update_priorities(batch_idxes, new_priorities) if t > learning_starts and t % target_network_update_freq == 0: # Update target network periodically. dqn.update_target() mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1) num_episodes = len(episode_rewards) if done and print_freq is not None and len( episode_rewards) % print_freq == 0: logger.record_tabular("steps", t) logger.record_tabular("episodes", num_episodes) logger.record_tabular("reward", reward) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular("% time spent exploring", int(100 * exploration.value(t))) logger.dump_tabular() if (checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0): if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward: if print_freq is not None: logger.log( "Saving model due to mean reward increase: {} -> {}".format( saved_mean_reward, mean_100ep_reward)) save_checkpoint({ 'epoch': t + 1, 'state_dict': dqn.save_state_dict(), 'best_accuracy': mean_100ep_reward }, checkpoint_path) saved_mean_reward = mean_100ep_reward
def learn( env, policy_func, *, timesteps=4, timesteps_per_batch, # timesteps per actor per update clip_param, entcoeff, # clipping parameter epsilon, entropy coeff optim_epochs, optim_stepsize, optim_batchsize, # optimization hypers gamma, lam, # advantage estimation max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint callback=None, # you can do anything in the callback, since it takes locals(), globals() adam_epsilon=1e-5, schedule='constant', # annealing for stepsize parameters (epsilon and adam) save_per_iter=100, ckpt_dir=None, task="train", sample_stochastic=True, load_model_path=None, task_name=None, max_sample_traj=1500): # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", timesteps, ob_space, ac_space) # Construct network for new policy oldpi = policy_func("oldpi", timesteps, ob_space, ac_space) # Network for old policy atarg = tf.placeholder( dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return pi_vpred = tf.placeholder(dtype=tf.float32, shape=[None]) lrmult = tf.placeholder( name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule clip_param = clip_param * lrmult # Annealed cliping parameter epislon ob = U.get_placeholder_cached(name="ob") # ob_now = tf.placeholder(dtype=tf.float32, shape=[optim_batchsize, list(ob_space.shape)[0]]) ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = U.mean(kloldnew) meanent = U.mean(ent) pol_entpen = (-entcoeff) * meanent ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold surr1 = ratio * atarg # surrogate from conservative policy iteration surr2 = U.clip(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg # pol_surr = -U.mean(tf.minimum( surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) vf_loss = U.mean(tf.square(pi.vpred - ret)) # total_loss = pol_surr + pol_entpen + vf_loss total_loss = pol_surr + pol_entpen losses = [pol_surr, pol_entpen, meankl, meanent] loss_names = ["pol_surr", "pol_entpen", "kl", "ent"] var_list = pi.get_trainable_variables() vf_var_list = [ v for v in var_list if v.name.split("/")[1].startswith("vf") ] pol_var_list = [ v for v in var_list if not v.name.split("/")[1].startswith("vf") ] # lossandgrad = U.function([ob, ac, atarg ,ret, lrmult], losses + [U.flatgrad(total_loss, var_list)]) lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, pol_var_list)]) vf_grad = U.function([ob, ac, atarg, ret, lrmult], U.flatgrad(vf_loss, vf_var_list)) # adam = MpiAdam(var_list, epsilon=adam_epsilon) pol_adam = MpiAdam(pol_var_list, epsilon=adam_epsilon) vf_adam = MpiAdam(vf_var_list, epsilon=adam_epsilon) assign_old_eq_new = U.function( [], [], updates=[ tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables()) ]) compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses) U.initialize() #adam.sync() pol_adam.sync() vf_adam.sync() # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, timesteps, env, timesteps_per_batch, stochastic=True) traj_gen = traj_episode_generator(pi, env, timesteps_per_batch, stochastic=sample_stochastic) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards EpRewMean_MAX = 2.5e3 assert sum( [max_iters > 0, max_timesteps > 0, max_episodes > 0, max_seconds > 0]) == 1, "Only one time constraint permitted" if task == 'sample_trajectory': # not elegant, i know :( sample_trajectory(load_model_path, max_sample_traj, traj_gen, task_name, sample_stochastic) sys.exit() while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break elif max_seconds and time.time() - tstart >= max_seconds: break if schedule == 'constant': cur_lrmult = 1.0 elif schedule == 'linear': cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0) else: raise NotImplementedError # Save model if iters_so_far % save_per_iter == 0 and ckpt_dir is not None: U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far) logger.log("********** Iteration %i ************" % iters_so_far) # if(iters_so_far == 1): # a = 1 seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, vpred, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[ "vpred"], seg["tdlamret"] vpredbefore = seg["vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean() ) / atarg.std() # standardized advantage function estimate d = Dataset( dict(ob=ob, ac=ac, atarg=atarg, vpred=vpred, vtarg=tdlamret), shuffle=False ) #d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vpred = vpred, vtarg=tdlamret), shuffle=not pi.recurrent) optim_batchsize = optim_batchsize or ob.shape[0] if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy assign_old_eq_new() # set old parameter values to new parameter values logger.log("Optimizing...") logger.log(fmt_row(13, loss_names)) # Here we do a bunch of optimization epochs over the data for _ in range(optim_epochs): losses = [ ] # list of tuples, each of which gives the loss for a minibatch pre_obs = [seg["ob_reset"] for jmj in range(timesteps - 1)] for batch in d.iterate_once(optim_batchsize): ##feed ob, 重新处理一下ob,在batch["ob"]的最前面插入timesteps-1个env.reset的ob,然后滑动串口划分一下batch['ob] ob_now = np.append(pre_obs, batch['ob']).reshape( optim_batchsize + timesteps - 1, list(ob_space.shape)[0]) pre_obs = ob_now[-(timesteps - 1):] ob_fin = [] for jmj in range(optim_batchsize): ob_fin.append(ob_now[jmj:jmj + timesteps]) *newlosses, g = lossandgrad(ob_fin, batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) ###这里的g好像都是0 #adam.update(g, optim_stepsize * cur_lrmult) pol_adam.update(g, optim_stepsize * cur_lrmult) vf_g = vf_grad(ob_fin, batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) vf_adam.update(vf_g, optim_stepsize * cur_lrmult) losses.append(newlosses) logger.log(fmt_row(13, np.mean(losses, axis=0))) pre_obs = [seg["ob_reset"] for jmj in range(timesteps - 1)] for batch in d.iterate_once(optim_batchsize): ##feed ob, 重新处理一下ob,在batch["ob"]的最前面插入timesteps-1个env.reset的ob,然后滑动串口划分一下batch['ob] ob_now = np.append(pre_obs, batch['ob']).reshape( optim_batchsize + timesteps - 1, list(ob_space.shape)[0]) pre_obs = ob_now[-(timesteps - 1):] ob_fin = [] for jmj in range(optim_batchsize): ob_fin.append(ob_now[jmj:jmj + timesteps]) *newlosses, g = lossandgrad(ob_fin, batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) ###这里的g好像都是0 #adam.update(g, optim_stepsize * cur_lrmult) pol_adam.update(g, optim_stepsize * cur_lrmult) vf_g = vf_grad(ob_fin, batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) vf_adam.update(vf_g, optim_stepsize * cur_lrmult) logger.log("Evaluating losses...") losses = [] loss_pre_obs = [seg["ob_reset"] for jmj in range(timesteps - 1)] for batch in d.iterate_once(optim_batchsize): ### feed ob ob_now = np.append(loss_pre_obs, batch['ob']).reshape( optim_batchsize + timesteps - 1, list(ob_space.shape)[0]) loss_pre_obs = ob_now[-(timesteps - 1):] ob_fin = [] for jmj in range(optim_batchsize): ob_fin.append(ob_now[jmj:jmj + timesteps]) newlosses = compute_losses(ob_fin, batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult) losses.append(newlosses) meanlosses, _, _ = mpi_moments(losses, axis=0) logger.log(fmt_row(13, meanlosses)) for (lossval, name) in zipsame(meanlosses, loss_names): logger.record_tabular("loss_" + name, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews = map(flatten_lists, zip(*listoflrpairs)) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) if (np.mean(rewbuffer) > EpRewMean_MAX): EpRewMean_MAX = np.mean(rewbuffer) print(iters_so_far) print(np.mean(rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if MPI.COMM_WORLD.Get_rank() == 0: logger.dump_tabular()