def learn(args, env, policy_func, dataset, optim_batch_size=128, adam_epsilon=1e-5, optim_stepsize=3e-4): # ============================== INIT FROM ARGS ================================== max_iters = args.BC_max_iter pretrained = args.pretrained ckpt_dir = args.checkpoint_dir log_dir = args.log_dir task_name = args.task_name val_per_iter = int(max_iters / 10) pi = policy_func(args, "pi", env) # Construct network for new policy oldpi = policy_func(args, "oldpi", env) # placeholder ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) stochastic = U.get_placeholder_cached(name="stochastic") loss = tf.reduce_mean(tf.square(ac - pi.ac)) var_list = pi.get_trainable_variables() adam = MpiAdam(var_list, epsilon=adam_epsilon) lossandgrad = U.function([ob, ac, stochastic], [loss] + [U.flatgrad(loss, var_list)]) if not pretrained: writer = U.FileWriter(log_dir) ep_stats = stats(["Loss"]) U.initialize() adam.sync() logger.log("Pretraining with Behavior Cloning...") for iter_so_far in tqdm(range(int(max_iters + 1))): ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train') loss, g = lossandgrad(ob_expert, ac_expert, True) adam.update(g, optim_stepsize) if not pretrained: ep_stats.add_all_summary(writer, [loss], iter_so_far) if iter_so_far % val_per_iter == 0: ob_expert, ac_expert = dataset.get_next_batch(-1, 'val') loss, g = lossandgrad(ob_expert, ac_expert, False) logger.log("Validation:") logger.log("Loss: %f" % loss) if not pretrained: U.save_state(os.path.join(ckpt_dir, task_name), counter=iter_so_far) if pretrained: savedir_fname = tempfile.TemporaryDirectory().name U.save_state(savedir_fname, max_to_keep=args.max_to_keep) return savedir_fname
def learn(env, policy_func, dataset, pretrained, optim_batch_size=128, max_iters=1e4, adam_epsilon=1e-5, optim_stepsize=3e-4, ckpt_dir=None, log_dir=None, task_name=None): val_per_iter = int(max_iters / 10) ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space) # Construct network for new policy # placeholder ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) stochastic = U.get_placeholder_cached(name="stochastic") loss = tf.reduce_mean(tf.square(ac - pi.ac)) #エキスパート行動と方策行動の差の2乗の平均 var_list = pi.get_trainable_variables() adam = MpiAdam(var_list, epsilon=adam_epsilon) lossandgrad = U.function([ob, ac, stochastic], [loss] + [U.flatgrad(loss, var_list)]) #状態,行動,確率的方策(bool)を入力,loss(エキスパート行動と方策行動の差の2乗の平均)andその勾配を出力 if not pretrained: writer = U.FileWriter(log_dir) ep_stats = stats(["Loss"]) U.initialize() adam.sync() logger.log("Pretraining with Behavior Cloning...") for iter_so_far in tqdm(range(int(max_iters))): ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train') loss, g = lossandgrad(ob_expert, ac_expert, True) adam.update(g, optim_stepsize) if not pretrained: ep_stats.add_all_summary(writer, [loss], iter_so_far) if iter_so_far % val_per_iter == 0: ob_expert, ac_expert = dataset.get_next_batch(-1, 'val') loss, g = lossandgrad(ob_expert, ac_expert, False) logger.log("Validation:") logger.log("Loss: %f" % loss) if not pretrained: U.save_state(os.path.join(ckpt_dir, task_name), counter=iter_so_far) if pretrained: savedir_fname = tempfile.TemporaryDirectory().name U.save_state(savedir_fname, var_list=pi.get_variables()) return savedir_fname
def learn(env, policy_func, dataset, pretrained, optim_batch_size=128, max_iters=1e3, adam_epsilon=1e-6, optim_stepsize=2e-4, ckpt_dir=None, log_dir=None, task_name=None, high_level=False): val_per_iter = int(max_iters / 100) ob_space = env.observation_space ac_space = env.action_space start_time = time.time() if not high_level: pi_low = policy_func("pi_low", ob_space, ac_space.spaces[1]) # placeholder # ob_low = U.get_placeholder_cached(name="ob") ob_low = pi_low.ob ac_low = pi_low.pdtype.sample_placeholder([None]) # stochastic_low = U.get_placeholder_cached(name="stochastic") stochastic_low = pi_low.stochastic loss_low = tf.reduce_mean(tf.square(ac_low - pi_low.ac)) var_list_low = pi_low.get_trainable_variables() adam_low = MpiAdam(var_list_low, epsilon=adam_epsilon) lossandgrad_low = U.function([ob_low, ac_low, stochastic_low], [loss_low] + [U.flatgrad(loss_low, var_list_low)]) if not pretrained: writer = U.FileWriter(log_dir) ep_stats_low = stats(["Loss_low"]) U.initialize() adam_low.sync() logger.log("Pretraining with Behavior Cloning Low...") for iter_so_far in tqdm(range(int(max_iters))): ob_expert, ac_expert = dataset.get_next_batch( optim_batch_size, 'train', high_level) loss, g = lossandgrad_low(ob_expert, ac_expert, True) adam_low.update(g, optim_stepsize) if not pretrained: ep_stats_low.add_all_summary(writer, [loss], iter_so_far) if iter_so_far % val_per_iter == 0: ob_expert, ac_expert = dataset.get_next_batch( -1, 'val', high_level) loss, g = lossandgrad_low(ob_expert, ac_expert, False) logger.log("Validation:") logger.log("Loss: %f" % loss) if not pretrained: U.save_state(os.path.join(ckpt_dir, task_name), counter=iter_so_far) if pretrained: savedir_fname = tempfile.TemporaryDirectory().name U.save_state(savedir_fname, var_list=pi_low.get_variables()) return savedir_fname else: pi_high = policy_func("pi_high", ob_space, ac_space.spaces[0]) # high -> action_label # ob_high = U.get_placeholder_cached(name="ob") ob_high = pi_high.ob ac_high = pi_high.pdtype.sample_placeholder([None, 1]) onehot_labels = tf.one_hot(indices=tf.cast(ac_high, tf.int32), depth=3) # stochastic_high = U.get_placeholder_cached(name="stochastic") stochastic_high = pi_high.stochastic cross_entropy = tf.nn.softmax_cross_entropy_with_logits( logits=pi_high.logits, labels=onehot_labels) loss_high = tf.reduce_mean(cross_entropy) var_list_high = pi_high.get_trainable_variables() adam_high = MpiAdam(var_list_high, epsilon=adam_epsilon) lossandgrad_high = U.function([ob_high, ac_high, stochastic_high], [loss_high] + [U.flatgrad(loss_high, var_list_high)]) # train high level policy if not pretrained: writer = U.FileWriter(log_dir) # ep_stats_low = stats(["Loss_low"]) ep_stats_high = stats(["loss_high"]) U.initialize() adam_high.sync() logger.log("Pretraining with Behavior Cloning High...") for iter_so_far in tqdm(range(int(max_iters))): ob_expert, ac_expert = dataset.get_next_batch( optim_batch_size, 'train', high_level) loss, g = lossandgrad_high(ob_expert, ac_expert, True) adam_high.update(g, optim_stepsize) if not pretrained: ep_stats_high.add_all_summary(writer, [loss], iter_so_far) if iter_so_far % val_per_iter == 0: ob_expert, ac_expert = dataset.get_next_batch( -1, 'val', high_level) loss, g = lossandgrad_high(ob_expert, ac_expert, False) logger.log("Validation:") logger.log("Loss: %f" % loss) if not pretrained: U.save_state(os.path.join(ckpt_dir, task_name), counter=iter_so_far) if pretrained: savedir_fname = tempfile.TemporaryDirectory().name U.save_state(savedir_fname, var_list=pi_high.get_variables()) return savedir_fname print("--- %s seconds ---" % (time.time() - start_time))
def learn( env, policy_func, discriminator, expert_dataset, pretrained, pretrained_weight, *, g_step, d_step, episodes_per_batch, # what to train on dropout_keep_prob, sequence_size, #rnn parameters max_kl, cg_iters, gamma, lam, # advantage estimation entcoeff=0.0, cg_damping=1e-2, vf_stepsize=3e-4, d_stepsize=3e-4, vf_iters=3, max_timesteps=0, max_episodes=0, max_iters=0, # time constraint callback=None, save_per_iter=100, ckpt_dir=None, log_dir=None, load_model_path=None, task_name=None): nworkers = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- ob_space = env.observation_space ac_space = env.action_space pi = policy_func("pi", ob_space, ac_space, reuse=(pretrained_weight != None)) oldpi = policy_func("oldpi", ob_space, ac_space) atarg = tf.placeholder( dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return ob = U.get_placeholder_cached(name="ob") ac = pi.pdtype.sample_placeholder([None]) kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() meankl = U.mean(kloldnew) meanent = U.mean(ent) entbonus = entcoeff * meanent vferr = U.mean(tf.square(pi.vpred - ret)) ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold surrgain = U.mean(ratio * atarg) optimgain = surrgain + entbonus losses = [optimgain, meankl, entbonus, surrgain, meanent] loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"] dist = meankl all_var_list = pi.get_trainable_variables() var_list = [ v for v in all_var_list if v.name.split("/")[1].startswith("pol") ] vf_var_list = [ v for v in all_var_list if v.name.split("/")[1].startswith("vf") ] d_adam = MpiAdam(discriminator.get_trainable_variables()) vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) klgrads = tf.gradients(dist, var_list) flat_tangent = tf.placeholder(dtype=tf.float32, shape=[None], name="flat_tan") shapes = [var.get_shape().as_list() for var in var_list] start = 0 tangents = [] for shape in shapes: sz = U.intprod(shape) tangents.append(tf.reshape(flat_tangent[start:start + sz], shape)) start += sz gvp = tf.add_n( [U.sum(g * tangent) for (g, tangent) in zipsame(klgrads, tangents)]) #pylint: disable=E1111 fvp = U.flatgrad(gvp, var_list) assign_old_eq_new = U.function( [], [], updates=[ tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables()) ]) compute_losses = U.function([ob, ac, atarg], losses) compute_lossandgrad = U.function([ob, ac, atarg], losses + [U.flatgrad(optimgain, var_list)]) compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp) compute_vflossandgrad = U.function([ob, ret], U.flatgrad(vferr, vf_var_list)) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print( colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers return out writer = U.FileWriter(log_dir) U.initialize() th_init = get_flat() MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) d_adam.sync() vfadam.sync() print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, discriminator, episodes_per_batch, stochastic=True, seq_length=sequence_size) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards true_rewbuffer = deque(maxlen=40) assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1 g_loss_stats = stats(loss_names) d_loss_stats = stats(discriminator.loss_name) ep_stats = stats(["True_rewards", "Rewards", "Episode_length"]) # if provide pretrained weight if pretrained_weight is not None: U.load_state(pretrained_weight, var_list=pi.get_variables()) # if provieded model path if load_model_path is not None: U.load_state(load_model_path) while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break # Save model if iters_so_far % save_per_iter == 0 and ckpt_dir is not None: U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far) logger.log("********** Iteration %i ************" % iters_so_far) def fisher_vector_product(p): return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p # ------------------ Update G ------------------ logger.log("Optimizing Policy...") for _ in range(g_step): with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[ "tdlamret"] vpredbefore = seg[ "vpred"] # predicted value function before udpate atarg = (atarg - atarg.mean()) / atarg.std( ) # standardized advantage function estimate if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) args = seg["ob"], seg["ac"], atarg fvpargs = [arr[::5] for arr in args] assign_old_eq_new( ) # set old parameter values to new parameter values with timed("computegrad"): *lossbefore, g = compute_lossandgrad(*args) lossbefore = allmean(np.array(lossbefore)) g = allmean(g) if np.allclose(g, 0): logger.log("Got zero gradient. not updating") else: with timed("cg"): stepdir = cg(fisher_vector_product, g, cg_iters=cg_iters, verbose=rank == 0) assert np.isfinite(stepdir).all() shs = .5 * stepdir.dot(fisher_vector_product(stepdir)) lm = np.sqrt(shs / max_kl) # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g)) fullstep = stepdir / lm expectedimprove = g.dot(fullstep) surrbefore = lossbefore[0] stepsize = 1.0 thbefore = get_flat() for _ in range(10): thnew = thbefore + fullstep * stepsize set_from_flat(thnew) meanlosses = surr, kl, *_ = allmean( np.array(compute_losses(*args))) improve = surr - surrbefore logger.log("Expected: %.3f Actual: %.3f" % (expectedimprove, improve)) if not np.isfinite(meanlosses).all(): logger.log("Got non-finite value of losses -- bad!") elif kl > max_kl * 1.5: logger.log("violated KL constraint. shrinking step.") elif improve < 0: logger.log("surrogate didn't improve. shrinking step.") else: logger.log("Stepsize OK!") break stepsize *= .5 else: logger.log("couldn't compute a good step") set_from_flat(thbefore) if nworkers > 1 and iters_so_far % 20 == 0: paramsums = MPI.COMM_WORLD.allgather( (thnew.sum(), vfadam.getflat().sum())) # list of tuples assert all( np.allclose(ps, paramsums[0]) for ps in paramsums[1:]) with timed("vf"): for _ in range(vf_iters): for (mbob, mbret) in dataset.iterbatches( (seg["ob"], seg["tdlamret"]), include_final_partial_batch=False, batch_size=128): if hasattr(pi, "ob_rms"): pi.ob_rms.update( mbob) # update running mean/std for policy g = allmean(compute_vflossandgrad(mbob, mbret)) vfadam.update(g, vf_stepsize) g_losses = meanlosses for (lossname, lossval) in zip(loss_names, meanlosses): logger.record_tabular(lossname, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) # ------------------ Update D ------------------ logger.log("Optimizing Discriminator...") logger.log(fmt_row(13, discriminator.loss_name)) traj_gen, traj_len_gen = seg["ep_trajs"], seg["ep_lens"] #traj_expert, traj_len_expert = expert_dataset.get_next_traj_batch() batch_size = len(traj_gen) // d_step d_losses = [ ] # list of tuples, each of which gives the loss for a minibatch for traj_batch, traj_len_batch in dataset.iterbatches( (traj_gen, traj_len_gen), include_final_partial_batch=False, batch_size=batch_size): traj_expert, traj_len_expert = expert_dataset.get_next_traj_batch( len(traj_batch)) # update running mean/std for discriminator ob_batch, _ = traj2trans(traj_batch, traj_len_batch, ob_space.shape[0]) ob_expert, _ = traj2trans(traj_expert, traj_len_expert, ob_space.shape[0]) if hasattr(discriminator, "obs_rms"): discriminator.obs_rms.update( np.concatenate((ob_batch, ob_expert), 0)) *newlosses, g = discriminator.lossandgrad(traj_batch, traj_len_batch, traj_expert, traj_len_expert, dropout_keep_prob) d_adam.update(allmean(g), d_stepsize) d_losses.append(newlosses) logger.log(fmt_row(13, np.mean(d_losses, axis=0))) lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"] ) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs)) true_rewbuffer.extend(true_rets) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer)) logger.record_tabular("EpThisIter", len(lens)) episodes_so_far += len(lens) timesteps_so_far += sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank == 0: logger.dump_tabular() g_loss_stats.add_all_summary(writer, g_losses, iters_so_far) d_loss_stats.add_all_summary(writer, np.mean(d_losses, axis=0), iters_so_far) ep_stats.add_all_summary(writer, [ np.mean(true_rewbuffer), np.mean(rewbuffer), np.mean(lenbuffer) ], iters_so_far)
def learn( env, policy_func, discriminator, expert_dataset, pretrained, pretrained_weight, *, g_step, d_step, timesteps_per_batch, # what to train on max_kl, cg_iters, gamma, lam, # advantage estimation entcoeff=0.001, cg_damping=1e-2, vf_stepsize=3e-4, d_stepsize=1.5e-4, vf_iters=3, max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint callback=None, save_per_iter=100, ckpt_dir=None, log_dir=None, load_model_path=None, task_name=None, timesteps_per_actorbatch=16, clip_param=1e-5, adam_epsilon=4e-4, optim_epochs=1, optim_stepsize=4e-4, optim_batchsize=16, schedule='linear'): nworkers = MPI.COMM_WORLD.Get_size() print("##### nworkers: ", nworkers) rank = MPI.COMM_WORLD.Get_rank() np.set_printoptions(precision=3) # Setup losses and stuff # ---------------------------------------- # ob_space = np.array([5*64*64 + 10*64*64 + 11 + 524]) # env.observation_space # ac_space = np.array([1]) #env.action_space from gym import spaces ob_space = spaces.Box(low=-1000, high=10000, shape=(5 * 64 * 64 + 10 * 64 * 64 + 11 + 524, )) ac_space = spaces.Discrete(524) pi = policy_func("pi", ob_space, ac_space, reuse=(pretrained_weight != None)) oldpi = policy_func("oldpi", ob_space, ac_space) atarg = tf.placeholder( dtype=tf.float32, shape=[None]) # Target advantage function (if applicable) ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return lrmult = tf.placeholder( name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule clip_param = clip_param * lrmult # Annealed cliping parameter epislon ob = U.get_placeholder_cached(name="ob") # ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=(None, ob_space[0])) ac = pi.pdtype.sample_placeholder([None]) # prevac = pi.pdtype.sample_placeholder([None]) prevac_placeholder = U.get_placeholder_cached(name="last_action_one_hot") kloldnew = oldpi.pd.kl(pi.pd) ent = pi.pd.entropy() # ent = pi.pd.entropy_usual() # see how it works, the value is the same meankl = U.mean(kloldnew) meanent = U.mean(ent) # entbonus = entcoeff * meanent # entcoeff = entcoeff * lrmult + 1e-5 pol_entpen = (-entcoeff) * meanent ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold surr1 = ratio * atarg # surrogate from conservative policy iteration surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg # pol_surr = -tf.reduce_mean(tf.minimum( surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP) vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret)) total_loss = pol_surr + pol_entpen + vf_loss losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent] loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"] var_list = pi.get_trainable_variables() lossandgrad = U.function([ob, ac, prevac_placeholder, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)]) g_adam = MpiAdam(var_list, epsilon=adam_epsilon) assign_old_eq_new = U.function( [], [], updates=[ tf.assign(oldv, newv) for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables()) ]) compute_losses = U.function( [ob, ac, prevac_placeholder, atarg, ret, lrmult], losses) # all_var_list = pi.get_trainable_variables() # var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("pol")] # vf_var_list = [v for v in all_var_list if v.name.split("/")[1].startswith("vf")] d_adam = MpiAdam(discriminator.get_trainable_variables()) # vfadam = MpiAdam(vf_var_list) get_flat = U.GetFlat(var_list) set_from_flat = U.SetFromFlat(var_list) @contextmanager def timed(msg): if rank == 0: print(colorize(msg, color='magenta')) tstart = time.time() yield print( colorize("done in %.3f seconds" % (time.time() - tstart), color='magenta')) else: yield def allmean(x): assert isinstance(x, np.ndarray) out = np.empty_like(x) MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM) out /= nworkers return out writer = U.FileWriter(log_dir) U.initialize() th_init = get_flat() MPI.COMM_WORLD.Bcast(th_init, root=0) set_from_flat(th_init) g_adam.sync() d_adam.sync() # vfadam.sync() print("Init param sum", th_init.sum(), flush=True) # Prepare for rollouts # ---------------------------------------- seg_gen = traj_segment_generator(pi, env, discriminator, timesteps_per_batch, expert_dataset, stochastic=True) episodes_so_far = 0 timesteps_so_far = 0 iters_so_far = 0 tstart = time.time() lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards true_rewbuffer = deque(maxlen=100) assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1 g_loss_stats = stats(loss_names) d_loss_stats = stats(discriminator.loss_name) ep_stats = stats(["True_rewards", "Rewards", "Episode_length"]) # # if provide pretrained weight # if pretrained_weight is not None: # U.load_state(pretrained_weight, var_list=pi.get_variables()) # # if provieded model path # if load_model_path is not None: # U.load_state(load_model_path) while True: if callback: callback(locals(), globals()) if max_timesteps and timesteps_so_far >= max_timesteps: break elif max_episodes and episodes_so_far >= max_episodes: break elif max_iters and iters_so_far >= max_iters: break if schedule == 'constant': cur_lrmult = 1.0 elif schedule == 'linear': cur_lrmult = max( 1.0 - float(timesteps_so_far) / (max_timesteps + 1e7), 0.1) # make the smallest number as 0.1 instead of 0 else: raise NotImplementedError # Save model if iters_so_far % save_per_iter == 0 and ckpt_dir is not None: U.save_state(os.path.join(ckpt_dir, task_name), counter=iters_so_far) logger.log("********** Iteration %i ************" % iters_so_far) # def fisher_vector_product(p): # return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p # # ------------------ Update G ------------------ logger.log("Optimizing Policy...") meanlosses = [] for _ in range(g_step): with timed("sampling"): seg = seg_gen.__next__() add_vtarg_and_adv(seg, gamma, lam) # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets)) ob, ac, prevac, atarg, tdlamret = seg["ob"], seg["ac"], seg[ "prevac"], seg["adv"], seg["tdlamret"] vpredbefore = seg[ "vpred"] # predicted value function before udpate # print("before standardize atarg value: ", atarg) if atarg.std() != 0: atarg = (atarg - atarg.mean()) / atarg.std( ) # standardized advantage function estimate else: with open("debug.txt", "a+") as f: print("atarg.std() is equal to 0", atarg, file=f) # print("atarg value: ", atarg) # convert prevac to one hot one_hot_prevac = [] if type(prevac) is np.ndarray: depth = prevac.size one_hot_prevac = np.zeros((depth, 524)) one_hot_prevac[np.arange(depth), prevac] = 1 else: one_hot_prevac = np.zeros(524) one_hot_prevac[prevac] = 1 one_hot_prevac = [one_hot_prevac] prevac = one_hot_prevac d = Dataset(dict(ob=ob, ac=ac, prevac=prevac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent) optim_batchsize = optim_batchsize or ob.shape[0] # print("optim_batchsize: ", optim_batchsize) if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy assign_old_eq_new( ) # set old parameter values to new parameter values logger.log(fmt_row(13, loss_names)) for _ in range(optim_epochs): losses = [ ] # list of tuples, each of which gives the loss for a minibatch for batch in d.iterate_once(optim_batchsize): *newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch['prevac'], batch["atarg"], batch["vtarg"], cur_lrmult) g_adam.update(g, optim_stepsize * cur_lrmult) # allmean(g) x_newlosses = compute_losses(batch["ob"], batch["ac"], batch["prevac"], batch["atarg"], batch["vtarg"], cur_lrmult) meanlosses = [x_newlosses] losses.append(x_newlosses) logger.log(fmt_row(13, np.mean(losses, axis=0))) # meanlosses = losses # # logger.log("Evaluating losses...") # losses = [] # for batch in d.iterate_once(optim_batchsize): # newlosses = compute_losses(batch["ob"], batch["ac"], batch["prevac"], # batch["atarg"], batch["vtarg"], cur_lrmult) # losses.append(newlosses) # # # meanlosses,_,_ = mpi_moments(losses, axis=0) # it will be useful for multithreading meanlosses = np.mean(losses, axis=0) # logger.log(fmt_row(13, meanlosses)) g_losses = meanlosses for (lossval, name) in zipsame(meanlosses, loss_names): logger.record_tabular("loss_" + name, lossval) logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret)) # ------------------ Update D ------------------ logger.log("Optimizing Discriminator...") logger.log(fmt_row(13, discriminator.loss_name)) global UP_TO_STEP ob_expert, ac_expert, prevac_expert = expert_dataset.get_next_batch( len(ob), UP_TO_STEP) batch_size = len(ob) // d_step d_losses = [ ] # list of tuples, each of which gives the loss for a minibatch for ob_batch, ac_batch, prevac_batch in dataset.iterbatches( (ob, ac, prevac), include_final_partial_batch=False, batch_size=batch_size): # print("###### len(ob_batch): ", len(ob_batch)) ob_expert, ac_expert, prevac_expert = expert_dataset.get_next_batch( len(ob_batch), UP_TO_STEP) # update running mean/std for discriminator if hasattr(discriminator, "obs_rms"): discriminator.obs_rms.update( np.concatenate((ob_batch, ob_expert), 0)) depth = len(ac_batch) one_hot_ac_batch = np.zeros((depth, 524)) one_hot_ac_batch[np.arange(depth), ac_batch] = 1 # depth = len(prevac_batch) # one_hot_prevac_batch = np.zeros((depth, 524)) # one_hot_prevac_batch[np.arange(depth), prevac_batch] = 1 depth = len(ac_expert) one_hot_ac_expert = np.zeros((depth, 524)) one_hot_ac_expert[np.arange(depth), ac_expert] = 1 depth = len(prevac_expert) one_hot_prevac_expert = np.zeros((depth, 524)) one_hot_prevac_expert[np.arange(depth), prevac_expert] = 1 *newlosses, g = discriminator.lossandgrad(ob_batch, one_hot_ac_batch, prevac_batch, ob_expert, one_hot_ac_expert, one_hot_prevac_expert) global LAST_EXPERT_ACC, LAST_EXPERT_LOSS LAST_EXPERT_ACC = newlosses[5] LAST_EXPERT_LOSS = newlosses[1] d_adam.update(g, d_stepsize) # allmean(g) d_losses.append(newlosses) logger.log(fmt_row(13, np.mean(d_losses, axis=0))) lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"] ) # local values listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs)) true_rewbuffer.extend(true_rets) lenbuffer.extend(lens) rewbuffer.extend(rews) logger.record_tabular("EpLenMean", np.mean(lenbuffer)) logger.record_tabular("EpRewMean", np.mean(rewbuffer)) logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer)) # logger.record_tabular("EpThisIter", len(lens)) episodes_so_far = len(lens) timesteps_so_far = sum(lens) iters_so_far += 1 logger.record_tabular("EpisodesSoFar", episodes_so_far) logger.record_tabular("TimestepsSoFar", timesteps_so_far) logger.record_tabular("TimeElapsed", time.time() - tstart) if rank == 0: logger.dump_tabular() g_loss_stats.add_all_summary(writer, g_losses, iters_so_far) d_loss_stats.add_all_summary(writer, np.mean(d_losses, axis=0), iters_so_far) ep_stats.add_all_summary(writer, [ np.mean(true_rewbuffer), np.mean(rewbuffer), np.mean(lenbuffer) ], iters_so_far) global ITER_SOFAR_GLOBAL ITER_SOFAR_GLOBAL = iters_so_far # log ac picked with open('ac.txt', 'a+') as fh: print(ac, file=fh)