コード例 #1
0
def main(config):
    set_seed(config['seed'])

    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = globals()[config['env']]()  # instantiate env
    env = normalize(env)  # apply normalize wrapper to env

    print("env.observation_space.shape: ", env.observation_space.shape)
    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=('multiworld', config['env'], env),
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )
    sampler.set_env(env)

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        learning_rate=config['learning_rate'],
        num_ppo_steps=config['num_promp_steps'],
        clip_eps=config['clip_eps'],
        target_inner_step=config['target_inner_step'],
        init_inner_kl_penalty=config['init_inner_kl_penalty'],
        adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
    )

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
    )

    trainer.train()
コード例 #2
0
class TestLinearFeatureBaseline(unittest.TestCase):
    def setUp(self):
        self.random_env = RandomEnv()
        self.random_policy = RandomPolicy(1, 1)
        self.meta_batch_size = 2
        self.batch_size = 10
        self.path_length = 100
        self.linear = LinearFeatureBaseline()
        self.sampler = MetaSampler(self.random_env,
                                   self.random_policy,
                                   self.batch_size,
                                   self.meta_batch_size,
                                   self.path_length,
                                   parallel=True)

    def testFit(self):
        paths = self.sampler.obtain_samples()
        for task in paths.values():
            unfit_error = 0
            for path in task:
                path["returns"] = utils.discount_cumsum(path["rewards"], 0.99)
                unfit_pred = self.linear.predict(path)
                unfit_error += sum([
                    np.square(pred - actual)
                    for pred, actual in zip(unfit_pred, path['returns'])
                ])
            self.linear.fit(task)
            fit_error = 0
            for path in task:
                fit_pred = self.linear.predict(path)
                fit_error += sum([
                    np.square(pred - actual)
                    for pred, actual in zip(fit_pred, path['returns'])
                ])
            self.assertTrue(fit_error < unfit_error)

    def testSerialize(self):
        paths = self.sampler.obtain_samples()
        for task in paths.values():
            for path in task:
                path["returns"] = utils.discount_cumsum(path["rewards"], 0.99)
            self.linear.fit(task)
            fit_error_pre = 0
            for path in task:
                fit_pred = self.linear.predict(path)
                fit_error_pre += sum([
                    np.square(pred - actual)
                    for pred, actual in zip(fit_pred, path['returns'])
                ])
            pkl = pickle.dumps(self.linear)
            self.linear = pickle.loads(pkl)
            fit_error_post = 0
            for path in task:
                fit_pred = self.linear.predict(path)
                fit_error_post += sum([
                    np.square(pred - actual)
                    for pred, actual in zip(fit_pred, path['returns'])
                ])
            self.assertEqual(fit_error_pre, fit_error_post)
コード例 #3
0
 def setUp(self):
     self.random_env = RandomEnv()
     self.random_policy = RandomPolicy(1, 1)
     self.meta_batch_size = 2
     self.batch_size = 10
     self.path_length = 100
     self.linear = LinearFeatureBaseline()
     self.sampler = MetaSampler(self.random_env,
                                self.random_policy,
                                self.batch_size,
                                self.meta_batch_size,
                                self.path_length,
                                parallel=True)
コード例 #4
0
def main(config):
    set_seed(config['seed'])

    reward_baseline = LinearTimeBaseline()  # the usual baseline
    return_baseline = LinearFeatureBaseline(
    )  # the additional baseline for DICE

    env = globals()[config['env']]()  # instantiate env
    env = normalize(env)  # apply normalize wrapper to env

    meta_baseline = MetaNNBaseline(
        input_size=env.observation_space.shape[0])  # the meta baseline

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config['rollouts_per_meta_task'],
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = TMAMLMetaSampleProcessor(
        baseline=reward_baseline,
        max_path_length=config['max_path_length'],
        discount=config['discount'],
        normalize_adv=config['normalize_adv'],
        positive_adv=config['positive_adv'],
        return_baseline=return_baseline,
        metabaseline=meta_baseline,
    )

    algo = TMAML(policy=policy,
                 max_path_length=config['max_path_length'],
                 meta_batch_size=config['meta_batch_size'],
                 num_inner_grad_steps=config['num_inner_grad_steps'],
                 inner_lr=config['inner_lr'],
                 learning_rate=config['learning_rate'])

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
    )

    trainer.train()
コード例 #5
0
    def setUp(self):
        self.env = env = MetaPointEnv()

        self.baseline = baseline = LinearFeatureBaseline()

        self.policy = policy = MetaGaussianMLPPolicy(
            name="meta-policy",
            obs_dim=np.prod(env.observation_space.shape),
            action_dim=np.prod(env.action_space.shape),
            meta_batch_size=10,
            hidden_sizes=(16, 16),
            learn_std=True,
            hidden_nonlinearity=tf.tanh,
            output_nonlinearity=None,
        )

        self.sampler = MetaSampler(
            env=env,
            policy=policy,
            rollouts_per_meta_task=2,
            meta_batch_size=10,
            max_path_length=50,
            parallel=False,
        )

        self.sample_processor = MetaSampleProcessor(
            baseline=baseline,
            discount=0.99,
            gae_lambda=1.0,
            normalize_adv=True,
            positive_adv=False,
        )

        self.algo = ProMP(
            policy=policy,
            inner_lr=0.1,
            meta_batch_size=10,
            num_inner_grad_steps=2,
            learning_rate=1e-3,
            num_ppo_steps=5,
            num_minibatches=1,
            clip_eps=0.5,
            target_inner_step=2e-2,
            init_inner_kl_penalty=1e-3,
        )
コード例 #6
0
def main(config):
    # config['seed'] = 4
    experiment.set_name("short meta saving test")
    set_seed(config['seed'])
    experiment.log_parameters(config)
    # experiment.log_parameter("task limit size", 3)

    # experiment.log_metric("seed", config['seed'])
    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = globals()[config['env']]()  # instantiate env
    env = normalize(env)  # apply normalize wrapper to env

    with open('/saved_policies/mjvel.policy', 'rb') as policy_file:
        policy = pickle.load(policy_file)
        print("policy loaded")

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        learning_rate=config['learning_rate'],
        num_ppo_steps=config['num_promp_steps'],
        clip_eps=config['clip_eps'],
        target_inner_step=config['target_inner_step'],
        init_inner_kl_penalty=config['init_inner_kl_penalty'],
        adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
    )

    trainer = RLTrainer(algo=algo,
                        policy=policy,
                        env=env,
                        sampler=sampler,
                        sample_processor=sample_processor,
                        n_itr=config['n_itr'],
                        num_inner_grad_steps=config['num_inner_grad_steps'],
                        experiment=experiment)

    trainer.train()
コード例 #7
0
def main(config):
    set_seed(config['seed'])
    sess = tf.Session()

    with sess.as_default() as sess:

        data = joblib.load(load_path + "/params.pkl")
        policy = data['policy']
        env = data['env']
        baseline = data['baseline']

        # config['meta_batch_size'] = env.NUM_EVAL
        # policy.meta_batch_size = env.NUM_EVAL

        sampler = MetaSampler(
            env=env,
            policy=policy,
            rollouts_per_meta_task=config[
                'rollouts_per_meta_task'],  # Will be modified later
            meta_batch_size=config['meta_batch_size'],
            max_path_length=config['max_path_length'],
            parallel=config['parallel'],
        )

        sample_processor = MetaSampleProcessor(
            baseline=baseline,
            discount=config['discount'],
            gae_lambda=config['gae_lambda'],
            normalize_adv=config['normalize_adv'],
        )

        algo = ProMP(
            policy=policy,
            inner_lr=config['inner_lr'],
            meta_batch_size=config['meta_batch_size'],
            num_inner_grad_steps=config['num_inner_grad_steps'],
            learning_rate=config['learning_rate'],
            num_ppo_steps=config['num_promp_steps'],
            clip_eps=config['clip_eps'],
            target_inner_step=config['target_inner_step'],
            init_inner_kl_penalty=config['init_inner_kl_penalty'],
            adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
        )

        tester = Tester(
            algo=algo,
            policy=policy,
            env=env,
            sampler=sampler,
            sample_processor=sample_processor,
            #n_itr=config['n_itr'],
            eff=config['eff'],
            num_inner_grad_steps=config['num_inner_grad_steps'],
        )

        tester.train()
        sess.close()
コード例 #8
0
ファイル: norm_vpg.py プロジェクト: MinorJerry/NG_MAML
def main(config):
    set_seed(config['seed'])

    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = globals()[config['env']]()  # instantiate env
    env = normalize(env)  # apply normalize wrapper to env

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = VPGMAML(
        policy=policy,
        learning_rate=config['learning_rate'],
        inner_type=config['inner_type'],
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        exploration=False,
    )

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
    )

    trainer.train()
コード例 #9
0
def main(config):
    set_seed(config['seed'])

    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = globals()[config['env']]()  # instantiate env
    env = normalize(env)  # apply normalize wrapper to env

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        learning_rate=config['learning_rate'],
        num_ppo_steps=config['num_promp_steps'],
        clip_eps=config['clip_eps'],
        target_inner_step=config['target_inner_step'],
        init_inner_kl_penalty=config['init_inner_kl_penalty'],
        adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
    )

    gpu_config = tf.ConfigProto()
    gpu_config.gpu_options.allow_growth = True  # pylint: disable=E1101
    sess = tf.Session(config=gpu_config)

    saver = tf.train.Saver(
        keep_checkpoint_every_n_hours=config['keep_checkpoint_every_n_hours'],
        max_to_keep=config['max_checkpoints_to_keep'])

    save_path = os.path.join(args.dump_path, 'model.ckpt')

    if config['restore_path'] is not None:
        logger.log('Restoring parameters from {}'.format(
            config['restore_path']))
        saver.restore(sess, config['restore_path'])
        logger.log('Restored')

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        saver=saver,
        save_path=save_path,
        save_steps=config['save_steps'],
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        sess=sess,
    )

    trainer.train()
コード例 #10
0
def main(config):
    # config['seed'] = 4ß
    # experiment.set_name("pos task only, size = 15, logging vel")
    set_seed(config['seed'])
    # experiment.log_parameters(config)
    # experiment.log_parameter("task limit size", 3)

    # experiment.log_metric("seed", config['seed'])
    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = globals()[config['env']]()  # instantiate env
    print("env: ", env.sample_tasks)
    TASKSL1 = np.array([0, -0.3])
    env.set_tasks(TASKSL1)
    env = normalize(env)  # apply normalize wrapper to env

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        learning_rate=config['learning_rate'],
        num_ppo_steps=config['num_promp_steps'],
        clip_eps=config['clip_eps'],
        target_inner_step=config['target_inner_step'],
        init_inner_kl_penalty=config['init_inner_kl_penalty'],
        adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
    )

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
    )

    trainer.train()
コード例 #11
0
def main(args=None):
    idx = int(time.time())
    args = parse_args(args)

    config = {
        'seed': args.seed,
        'baseline': 'LinearFeatureBaseline',
        'env': 'ReachWorld',  # not used
        'rollouts_per_meta_task': args.rollout_per_meta_task,
        'max_path_length': args.max_path_length,  # 100
        'parallel': not args.seq,
        'discount': args.discount,
        'gae_lambda': args.gae_lambda,
        'normalize_adv': True,
        'hidden_sizes': args.hidden_sizes,
        'inner_lr': args.inner_lr,  # adaptation step size
        'learning_rate': args.learning_rate,  # meta-policy gradient step size
        'num_promp_steps':
        args.num_promp_steps,  # number of ProMp steps without re-sampling
        'clip_eps': args.clip_eps,  # clipping range
        'target_inner_step': args.target_inner_step,
        'init_inner_kl_penalty': args.init_inner_kl_penalty,
        'adaptive_inner_kl_penalty': args.
        adaptive_inner_kl_penalty,  # whether to use an adaptive or fixed KL-penalty coefficient
        'n_itr': args.n_itr,  # number of overall training iterations
        'meta_batch_size':
        args.meta_batch_size,  # number of sampled meta-tasks per iterations
        'num_inner_grad_steps': args.
        num_inner_grad_steps,  # number of inner / adaptation gradient steps
    }

    # configure logger
    logger.configure(dir=args.dump_path,
                     format_strs=['stdout', 'log', 'csv'],
                     snapshot_mode='last_gap')

    # dump run configuration before starting training
    json.dump(config,
              open(args.dump_path + '/params.json', 'w'),
              cls=ClassEncoder)

    set_seed(config['seed'])

    baseline = globals()[config['baseline']]()  #instantiate baseline

    env = get_env()
    #env = normalize(env) # apply normalize wrapper to env

    if isinstance(env.action_space, gym.spaces.Box):
        action_dim = np.prod(env.action_space.shape)
    elif isinstance(env.action_space, gym.spaces.Discrete):
        action_dim = env.action_space.n
    else:
        raise Exception('unknown action space, cannot get action dim')

    policy = MetaCategoricalMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=action_dim,
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=config['inner_lr'],
        meta_batch_size=config['meta_batch_size'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
        learning_rate=config['learning_rate'],
        num_ppo_steps=config['num_promp_steps'],
        clip_eps=config['clip_eps'],
        target_inner_step=config['target_inner_step'],
        init_inner_kl_penalty=config['init_inner_kl_penalty'],
        adaptive_inner_kl_penalty=config['adaptive_inner_kl_penalty'],
    )

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=config['n_itr'],
        num_inner_grad_steps=config['num_inner_grad_steps'],
    )

    trainer.train()
コード例 #12
0
class TestLikelihoodRation(unittest.TestCase):
    """
    Assure that likelihhood ratio at first gradient step is approx. one since pi_old = pi_new
    """
    def setUp(self):
        self.env = env = MetaPointEnv()

        self.baseline = baseline = LinearFeatureBaseline()

        self.policy = policy = MetaGaussianMLPPolicy(
            name="meta-policy",
            obs_dim=np.prod(env.observation_space.shape),
            action_dim=np.prod(env.action_space.shape),
            meta_batch_size=10,
            hidden_sizes=(16, 16),
            learn_std=True,
            hidden_nonlinearity=tf.tanh,
            output_nonlinearity=None,
        )

        self.sampler = MetaSampler(
            env=env,
            policy=policy,
            rollouts_per_meta_task=2,
            meta_batch_size=10,
            max_path_length=50,
            parallel=False,
        )

        self.sample_processor = MetaSampleProcessor(
            baseline=baseline,
            discount=0.99,
            gae_lambda=1.0,
            normalize_adv=True,
            positive_adv=False,
        )

        self.algo = ProMP(
            policy=policy,
            inner_lr=0.1,
            meta_batch_size=10,
            num_inner_grad_steps=2,
            learning_rate=1e-3,
            num_ppo_steps=5,
            num_minibatches=1,
            clip_eps=0.5,
            target_inner_step=2e-2,
            init_inner_kl_penalty=1e-3,
        )

    def test_likelihood_ratio(self):
        with tf.Session() as sess:

            # initialize uninitialized vars  (only initialize vars that were not loaded)
            uninit_vars = [
                var for var in tf.global_variables()
                if not sess.run(tf.is_variable_initialized(var))
            ]
            sess.run(tf.variables_initializer(uninit_vars))

            self.sampler.update_tasks()
            self.policy.switch_to_pre_update()  # Switch to pre-update policy

            all_samples_data, all_paths = [], []
            for step in range(1):
                """ -------------------- Sampling --------------------------"""
                paths = self.sampler.obtain_samples(log_prefix=str(step))
                all_paths.append(paths)
                """ ----------------- Processing Samples ---------------------"""
                samples_data = self.sample_processor.process_samples(paths,
                                                                     log=False)
                all_samples_data.append(samples_data)
                """ ------------------- Inner Policy Update --------------------"""
                obs_phs, action_phs, adv_phs, dist_info_phs, all_phs = self.algo._make_input_placeholders(
                    '')

                for i in range(self.algo.meta_batch_size):
                    obs = samples_data[i]['observations']
                    actions = samples_data[i]['actions']
                    agent_infos = samples_data[i]['agent_infos']
                    param_vals = self.policy.get_param_values()

                    likelihood_ratio_sym = self.policy.likelihood_ratio_sym(
                        obs_phs[i], action_phs[i], dist_info_phs[i],
                        self.policy.policies_params_phs[i])

                    feed_dict_params = dict(
                        zip(self.policy.policies_params_phs[i].values(),
                            param_vals.values()))

                    feed_dict_dist_infos = dict(
                        zip(dist_info_phs[i].values(), agent_infos.values()))

                    feed_dict = {obs_phs[i]: obs, action_phs[i]: actions}

                    feed_dict.update(feed_dict_params)
                    feed_dict.update(feed_dict_dist_infos)

                    lr = sess.run(likelihood_ratio_sym, feed_dict=feed_dict)

                    self.assertTrue(np.allclose(lr, 1))
コード例 #13
0
ファイル: visualize_policy.py プロジェクト: Zber5/MMAML-rl
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=params['meta_batch_size'],
        hidden_sizes=params['hidden_sizes'],
        cell_size=params['cell_size'],
        rollouts_per_meta_task=params['rollouts_per_meta_task'],
        max_path_length=params['max_path_length'],
        use_betas=params['use_betas'],
        shift_gammas=params['shift_gammas'],
    )

    sampler = MetaSampler(
        env=env,
        policy=policy,
        # This batch_size is confusing
        rollouts_per_meta_task=params['rollouts_per_meta_task'],
        meta_batch_size=params['meta_batch_size'],
        max_path_length=params['max_path_length'],
        parallel=params['parallel'],
    )

    sample_processor = MetaSampleProcessor(
        baseline=baseline,
        discount=params['discount'],
        gae_lambda=params['gae_lambda'],
        normalize_adv=params['normalize_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=params['inner_lr'],