def test_learning_rnn(self): pol_net = PolNetLSTM( self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) pol = GaussianPol(self.env.observation_space, self.env.action_space, pol_net, rnn=True) vf_net = VNetLSTM(self.env.observation_space, h_size=32, cell_size=32) vf = DeterministicSVfunc(self.env.observation_space, vf_net, rnn=True) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_vf = torch.optim.Adam(vf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=400) traj = Traj() traj.add_epis(epis) traj = ef.compute_vs(traj, vf) traj = ef.compute_rets(traj, 0.99) traj = ef.compute_advs(traj, 0.99, 0.95) traj = ef.centerize_advs(traj) traj = ef.compute_h_masks(traj) traj.register_epis() result_dict = ppo_clip.train(traj=traj, pol=pol, vf=vf, clip_param=0.2, optim_pol=optim_pol, optim_vf=optim_vf, epoch=1, batch_size=2) result_dict = ppo_kl.train(traj=traj, pol=pol, vf=vf, kl_beta=0.1, kl_targ=0.2, optim_pol=optim_pol, optim_vf=optim_vf, epoch=1, batch_size=2, max_grad_norm=20) del sampler
def test_learning_rnn(self): pol_net = PolNetLSTM( self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) pol = GaussianPol(self.env.observation_space, self.env.action_space, pol_net, rnn=True) vf_net = VNetLSTM(self.env.observation_space, h_size=32, cell_size=32) vf = DeterministicSVfunc(self.env.observation_space, vf_net, rnn=True) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_vf = torch.optim.Adam(vf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=400) traj = Traj() traj.add_epis(epis) traj = ef.compute_vs(traj, vf) traj = ef.compute_rets(traj, 0.99) traj = ef.compute_advs(traj, 0.99, 0.95) traj = ef.centerize_advs(traj) traj = ef.compute_h_masks(traj) traj.register_epis() result_dict = trpo.train(traj, pol, vf, optim_vf, 1, 2) del sampler
s_vf_net = VNet(observation_space) if args.sampling_policy == 'teacher': teacher_sampler = EpiSampler( env, t_pol, num_parallel=args.num_parallel, seed=args.seed) student_sampler = EpiSampler( env, s_pol, num_parallel=args.num_parallel, seed=args.seed) optim_pol = torch.optim.Adam(s_pol_net.parameters(), args.pol_lr) total_epi = 0 total_step = 0 max_rew = -1e6 while args.max_epis > total_epi: with measure('sample'): if args.sampling_policy == 'teacher': epis = teacher_sampler.sample( t_pol, max_epis=args.max_epis_per_iter) else: epis = student_sampler.sample( s_pol, max_epis=args.max_epis_per_iter) with measure('train'): traj = Traj()
def test_learning(self): pol_net = PolNetLSTM( self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) pol = GaussianPol(self.env.observation_space, self.env.action_space, pol_net, rnn=True) qf_net1 = QNetLSTM(self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) qf1 = DeterministicSAVfunc( self.env.observation_space, self.env.action_space, qf_net1, rnn=True) targ_qf_net1 = QNetLSTM( self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) targ_qf_net1.load_state_dict(qf_net1.state_dict()) targ_qf1 = DeterministicSAVfunc( self.env.observation_space, self.env.action_space, targ_qf_net1, rnn=True) qf_net2 = QNetLSTM(self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) qf2 = DeterministicSAVfunc( self.env.observation_space, self.env.action_space, qf_net2, rnn=True) targ_qf_net2 = QNetLSTM( self.env.observation_space, self.env.action_space, h_size=32, cell_size=32) targ_qf_net2.load_state_dict(qf_net2.state_dict()) targ_qf2 = DeterministicSAVfunc( self.env.observation_space, self.env.action_space, targ_qf_net2, rnn=True) qfs = [qf1, qf2] targ_qfs = [targ_qf1, targ_qf2] log_alpha = nn.Parameter(torch.zeros(())) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_qf1 = torch.optim.Adam(qf_net1.parameters(), 3e-4) optim_qf2 = torch.optim.Adam(qf_net2.parameters(), 3e-4) optim_qfs = [optim_qf1, optim_qf2] optim_alpha = torch.optim.Adam([log_alpha], 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) max_pri = traj.get_max_pri() traj = ef.set_all_pris(traj, max_pri) traj = ef.compute_seq_pris(traj, 4) traj = ef.compute_h_masks(traj) for i in range(len(qfs)): traj = ef.compute_hs( traj, qfs[i], hs_name='q_hs'+str(i), input_acs=True) traj = ef.compute_hs( traj, targ_qfs[i], hs_name='targ_q_hs'+str(i), input_acs=True) traj.register_epis() result_dict = r2d2_sac.train( traj, pol, qfs, targ_qfs, log_alpha, optim_pol, optim_qfs, optim_alpha, 2, 32, 4, 2, 0.01, 0.99, 2, ) del sampler