def test_learning(self): pol_net = PolNet(self.env.ob_space, self.env.ac_space, h1=32, h2=32) pol = GaussianPol(self.env.ob_space, self.env.ac_space, pol_net) targ_pol_net = PolNet(self.env.ob_space, self.env.ac_space, 32, 32) targ_pol_net.load_state_dict(pol_net.state_dict()) targ_pol = GaussianPol( self.env.ob_space, self.env.ac_space, targ_pol_net) qf_net = QNet(self.env.ob_space, self.env.ac_space, h1=32, h2=32) qf = DeterministicSAVfunc(self.env.ob_space, self.env.ac_space, qf_net) targ_qf_net = QNet(self.env.ob_space, self.env.ac_space, 32, 32) targ_qf_net.load_state_dict(targ_qf_net.state_dict()) targ_qf = DeterministicSAVfunc( self.env.ob_space, self.env.ac_space, targ_qf_net) sampler = EpiSampler(self.env, pol, num_parallel=1) optim_pol = torch.optim.Adam(pol_net.parameters(), 3e-4) optim_qf = torch.optim.Adam(qf_net.parameters(), 3e-4) epis = sampler.sample(pol, max_steps=32) traj = Traj() traj.add_epis(epis) traj = ef.add_next_obs(traj) traj.register_epis() result_dict = svg.train( traj, pol, targ_pol, qf, targ_qf, optim_pol, optim_qf, 1, 32, 0.01, 0.9, 1) del sampler
on_traj = ef.add_next_obs(on_traj) on_traj.register_epis() off_traj.add_traj(on_traj) total_epi += on_traj.num_epi step = on_traj.num_step total_step += step result_dict = svg.train( off_traj, pol, targ_pol, qf, targ_qf, optim_pol, optim_qf, step, args.batch_size, args.tau, args.gamma, args.sampling, ) rewards = [np.sum(epi['rews']) for epi in epis] mean_rew = np.mean(rewards) logger.record_results(args.log, result_dict, score_file, total_epi, step, total_step,