Example #1
0
    def test_learning_rnn(self):
        def rew_func(next_obs,
                     acs,
                     mean_obs=0.,
                     std_obs=1.,
                     mean_acs=0.,
                     std_acs=1.):
            next_obs = next_obs * std_obs + mean_obs
            acs = acs * std_acs + mean_acs
            # Pendulum
            rews = -(torch.acos(next_obs[:, 0].clamp(min=-1, max=1))**2 + 0.1 *
                     (next_obs[:, 2].clamp(min=-8, max=8)**2) +
                     0.001 * acs.squeeze(-1)**2)
            rews = rews.squeeze(0)

            return rews

        # init models
        dm_net = ModelNetLSTM(self.env.observation_space,
                              self.env.action_space)
        dm = DeterministicSModel(self.env.observation_space,
                                 self.env.action_space,
                                 dm_net,
                                 rnn=True,
                                 data_parallel=False,
                                 parallel_dim=0)

        mpc_pol = MPCPol(self.env.observation_space,
                         self.env.action_space,
                         dm_net,
                         rew_func,
                         1,
                         1,
                         mean_obs=0.,
                         std_obs=1.,
                         mean_acs=0.,
                         std_acs=1.,
                         rnn=True)
        optim_dm = torch.optim.Adam(dm_net.parameters(), 1e-3)

        # sample with mpc policy
        sampler = EpiSampler(self.env, mpc_pol, num_parallel=1)
        epis = sampler.sample(mpc_pol, max_epis=1)

        traj = Traj()
        traj.add_epis(epis)
        traj = ef.add_next_obs(traj)
        traj = ef.compute_h_masks(traj)
        traj.register_epis()
        traj.add_traj(traj)

        # train
        result_dict = mpc.train_dm(traj, dm, optim_dm, epoch=1, batch_size=1)

        del sampler
Example #2
0
rl_sampler = EpiSampler(env,
                        mpc_pol,
                        num_parallel=args.num_parallel,
                        seed=args.seed)

# train loop
total_epi = 0
total_step = 0
counter_agg_iters = 0
max_rew = -1e+6
while args.max_epis > total_epi:
    with measure('train model'):
        result_dict = mpc.train_dm(traj,
                                   dm,
                                   optim_dm,
                                   epoch=args.epoch_per_iter,
                                   batch_size=args.batch_size
                                   if not args.rnn else args.rnn_batch_size)
    with measure('sample'):
        mpc_pol = MPCPol(ob_space, ac_space, dm.net, rew_func, args.n_samples,
                         args.horizon_of_samples, mean_obs, std_obs, mean_acs,
                         std_acs, args.rnn)
        epis = rl_sampler.sample(mpc_pol, max_epis=args.max_epis_per_iter)

        curr_traj = Traj(traj_device='cpu')
        curr_traj.add_epis(epis)

        curr_traj = ef.add_next_obs(curr_traj)
        curr_traj = ef.compute_h_masks(curr_traj)
        traj = ef.normalize_obs_and_acs(curr_traj,
                                        mean_obs,
Example #3
0
mpc_pol = MPCPol(ob_space, ac_space, dm_net, rew_func,
                 args.n_samples, args.horizon_of_samples,
                 mean_obs, std_obs, mean_acs, std_acs, args.rnn)
optim_dm = torch.optim.Adam(dm_net.parameters(), args.dm_lr)

rl_sampler = EpiSampler(
    env, mpc_pol, num_parallel=args.num_parallel, seed=args.seed)

# train loop
total_epi = 0
total_step = 0
counter_agg_iters = 0
max_rew = -1e+6
while args.max_epis > total_epi:
    with measure('train model'):
        result_dict = mpc.train_dm(
            traj, dm, optim_dm, epoch=args.epoch_per_iter, batch_size=args.batch_size)
    with measure('sample'):
        mpc_pol = MPCPol(ob_space, ac_space, dm.net, rew_func,
                         args.n_samples, args.horizon_of_samples,
                         mean_obs, std_obs, mean_acs, std_acs, args.rnn)
        epis = rl_sampler.sample(
            mpc_pol, max_epis=args.max_epis_per_iter)

        curr_traj = Traj(traj_device='cpu')
        curr_traj.add_epis(epis)

        curr_traj = ef.add_next_obs(curr_traj)
        curr_traj = ef.compute_h_masks(curr_traj)
        traj = ef.normalize_obs_and_acs(
            curr_traj, mean_obs, std_obs, mean_acs, std_acs, return_statistic=False)
        curr_traj.register_epis()