Example #1
0
    def setUp(self):
        self.env = env = MetaPointEnv()

        self.baseline = baseline = LinearFeatureBaseline()

        self.policy = policy = MetaGaussianMLPPolicy(
            name="meta-policy",
            obs_dim=np.prod(env.observation_space.shape),
            action_dim=np.prod(env.action_space.shape),
            meta_batch_size=10,
            hidden_sizes=(16, 16),
            learn_std=True,
            hidden_nonlinearity=tf.tanh,
            output_nonlinearity=None,
        )

        self.sampler = MAMLSampler(
            env=env,
            policy=policy,
            rollouts_per_meta_task=2,
            meta_batch_size=10,
            max_path_length=50,
            parallel=False,
        )

        self.sample_processor = MAMLSampleProcessor(
            baseline=baseline,
            discount=0.99,
            gae_lambda=1.0,
            normalize_adv=True,
            positive_adv=False,
        )

        self.algo = PPOMAML(
            policy=policy,
            inner_lr=0.1,
            meta_batch_size=10,
            num_inner_grad_steps=2,
            learning_rate=1e-3,
            num_ppo_steps=5,
            num_minibatches=1,
            clip_eps=0.5,
            clip_outer=True,
            target_outer_step=0,
            target_inner_step=2e-2,
            init_outer_kl_penalty=0,
            init_inner_kl_penalty=1e-3,
            adaptive_outer_kl_penalty=False,
            adaptive_inner_kl_penalty=True,
            anneal_factor=1.0,
        )
Example #2
0
    env = normalize(AntRandGoalEnv())

    sampler = MAMLSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=BATCH_SIZE,
        meta_batch_size=META_BATCH_SIZE,
        max_path_length=PATH_LENGTH,
        parallel=True,
        envs_per_task=20,
    )

    sample_processor = MAMLSampleProcessor(
        baseline=baseline,
        discount=0.99,
        gae_lambda=1,
        normalize_adv=True,
        positive_adv=False,
    )

    # Doesn't matter which algo
    algo = VPGMAML(
        policy=policy,
        inner_lr=0.1,
        meta_batch_size=META_BATCH_SIZE,
        inner_type='likelihood_ratio',
        num_inner_grad_steps=NUM_INNER_GRAD_STEPS,
    )

    uninit_vars = [
        var for var in tf.compat.v1.global_variables()
Example #3
0
def main(config):
    set_seed(config['seed'])
    tf.compat.v1.disable_eager_execution()
    physical_devices = tf.config.list_physical_devices('GPU')
    for gpu_id in range(len(physical_devices)):
        tf.config.experimental.set_memory_growth(physical_devices[gpu_id],
                                                 True)

    baseline = LinearFeatureBaseline()
    env = normalize(ENV_DICT[config['env']]())

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=config['meta_batch_size'],
        hidden_sizes=config['hidden_sizes'],
    )

    sampler = MAMLSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=config[
            'rollouts_per_meta_task'],  # This batch_size is confusing
        meta_batch_size=config['meta_batch_size'],
        max_path_length=config['max_path_length'],
        parallel=config['parallel'],
    )

    sample_processor = MAMLSampleProcessor(
        baseline=baseline,
        discount=config['discount'],
        gae_lambda=config['gae_lambda'],
        normalize_adv=config['normalize_adv'],
        positive_adv=config['positive_adv'],
    )

    Algo = VPGSGMRL if args.algo == 'sgmrl' else VPGMAML
    algo = Algo(policy=policy,
                inner_type=config['inner_type'],
                meta_batch_size=config['meta_batch_size'],
                num_inner_grad_steps=config['num_inner_grad_steps'],
                inner_lr=config['inner_lr'],
                learning_rate=config['learning_rate'],
                exploration=(args.algo == 'emaml'))

    trainer = Trainer(algo=algo,
                      policy=policy,
                      env=env,
                      sampler=sampler,
                      sample_processor=sample_processor,
                      n_itr=config['n_itr'],
                      num_inner_grad_steps=config['num_inner_grad_steps'])

    tester = Tester(algo=algo,
                    policy=policy,
                    env=env,
                    sampler=sampler,
                    sample_processor=sample_processor,
                    n_itr=50,
                    num_inner_grad_steps=config['num_inner_grad_steps'])

    best_itr = trainer.train(tester)
    print(best_itr)
Example #4
0
def run_experiment(**kwargs):
    exp_dir = os.getcwd() + '/data/' + EXP_NAME
    logger.configure(dir=exp_dir,
                     format_strs=['stdout', 'log', 'csv'],
                     snapshot_mode='last_gap',
                     snapshot_gap=50)
    json.dump(kwargs,
              open(exp_dir + '/params.json', 'w'),
              indent=2,
              sort_keys=True,
              cls=ClassEncoder)

    # Instantiate classes
    set_seed(kwargs['seed'])

    baseline = kwargs['baseline']()

    env = normalize(kwargs['env']())  # Wrappers?

    policy = MetaGaussianMLPPolicy(
        name="meta-policy",
        obs_dim=np.prod(env.observation_space.shape),  # Todo...?
        action_dim=np.prod(env.action_space.shape),
        meta_batch_size=kwargs['meta_batch_size'],
        hidden_sizes=kwargs['hidden_sizes'],
        learn_std=kwargs['learn_std'],
        hidden_nonlinearity=kwargs['hidden_nonlinearity'],
        output_nonlinearity=kwargs['output_nonlinearity'],
    )

    # Load policy here

    sampler = MAMLSampler(
        env=env,
        policy=policy,
        rollouts_per_meta_task=kwargs['rollouts_per_meta_task'],
        meta_batch_size=kwargs['meta_batch_size'],
        max_path_length=kwargs['max_path_length'],
        parallel=kwargs['parallel'],
        envs_per_task=1,
    )

    sample_processor = MAMLSampleProcessor(
        baseline=baseline,
        discount=kwargs['discount'],
        gae_lambda=kwargs['gae_lambda'],
        normalize_adv=kwargs['normalize_adv'],
        positive_adv=kwargs['positive_adv'],
    )

    algo = ProMP(
        policy=policy,
        inner_lr=kwargs['inner_lr'],
        meta_batch_size=kwargs['meta_batch_size'],
        num_inner_grad_steps=kwargs['num_inner_grad_steps'],
        learning_rate=kwargs['learning_rate'],
        num_ppo_steps=kwargs['num_ppo_steps'],
        num_minibatches=kwargs['num_minibatches'],
        clip_eps=kwargs['clip_eps'],
        clip_outer=kwargs['clip_outer'],
        target_outer_step=kwargs['target_outer_step'],
        target_inner_step=kwargs['target_inner_step'],
        init_outer_kl_penalty=kwargs['init_outer_kl_penalty'],
        init_inner_kl_penalty=kwargs['init_inner_kl_penalty'],
        adaptive_outer_kl_penalty=kwargs['adaptive_outer_kl_penalty'],
        adaptive_inner_kl_penalty=kwargs['adaptive_inner_kl_penalty'],
        anneal_factor=kwargs['anneal_factor'],
    )

    trainer = Trainer(
        algo=algo,
        policy=policy,
        env=env,
        sampler=sampler,
        sample_processor=sample_processor,
        n_itr=kwargs['n_itr'],
        num_inner_grad_steps=kwargs['num_inner_grad_steps'],
    )

    trainer.train()
Example #5
0
class TestLikelihoodRation(unittest.TestCase):
    """
    Assure that likelihhood ratio at first gradient step is approx. one since pi_old = pi_new
    """
    def setUp(self):
        self.env = env = MetaPointEnv()

        self.baseline = baseline = LinearFeatureBaseline()

        self.policy = policy = MetaGaussianMLPPolicy(
            name="meta-policy",
            obs_dim=np.prod(env.observation_space.shape),
            action_dim=np.prod(env.action_space.shape),
            meta_batch_size=10,
            hidden_sizes=(16, 16),
            learn_std=True,
            hidden_nonlinearity=tf.tanh,
            output_nonlinearity=None,
        )

        self.sampler = MAMLSampler(
            env=env,
            policy=policy,
            rollouts_per_meta_task=2,
            meta_batch_size=10,
            max_path_length=50,
            parallel=False,
        )

        self.sample_processor = MAMLSampleProcessor(
            baseline=baseline,
            discount=0.99,
            gae_lambda=1.0,
            normalize_adv=True,
            positive_adv=False,
        )

        self.algo = PPOMAML(
            policy=policy,
            inner_lr=0.1,
            meta_batch_size=10,
            num_inner_grad_steps=2,
            learning_rate=1e-3,
            num_ppo_steps=5,
            num_minibatches=1,
            clip_eps=0.5,
            clip_outer=True,
            target_outer_step=0,
            target_inner_step=2e-2,
            init_outer_kl_penalty=0,
            init_inner_kl_penalty=1e-3,
            adaptive_outer_kl_penalty=False,
            adaptive_inner_kl_penalty=True,
            anneal_factor=1.0,
        )

    def test_likelihood_ratio(self):
        with tf.compat.v1.Session() as sess:

            # initialize uninitialized vars  (only initialize vars that were not loaded)
            uninit_vars = [
                var for var in tf.compat.v1.global_variables()
                if not sess.run(tf.compat.v1.is_variable_initialized(var))
            ]
            sess.run(tf.compat.v1.variables_initializer(uninit_vars))

            self.sampler.update_tasks()
            self.policy.switch_to_pre_update()  # Switch to pre-update policy

            all_samples_data, all_paths = [], []
            for step in range(1):
                """ -------------------- Sampling --------------------------"""
                paths = self.sampler.obtain_samples(log_prefix=str(step))
                all_paths.append(paths)
                """ ----------------- Processing Samples ---------------------"""
                samples_data = self.sample_processor.process_samples(paths,
                                                                     log=False)
                all_samples_data.append(samples_data)
                """ ------------------- Inner Policy Update --------------------"""
                obs_phs, action_phs, adv_phs, dist_info_phs, all_phs = self.algo._make_input_placeholders(
                    '')

                for i in range(self.algo.meta_batch_size):
                    obs = samples_data[i]['observations']
                    actions = samples_data[i]['actions']
                    agent_infos = samples_data[i]['agent_infos']
                    param_vals = self.policy.get_param_values()

                    likelihood_ratio_sym = self.policy.likelihood_ratio_sym(
                        obs_phs[i], action_phs[i], dist_info_phs[i],
                        self.policy.policies_params_phs[i])

                    feed_dict_params = dict(
                        zip(self.policy.policies_params_phs[i].values(),
                            param_vals.values()))

                    feed_dict_dist_infos = dict(
                        zip(dist_info_phs[i].values(), agent_infos.values()))

                    feed_dict = {obs_phs[i]: obs, action_phs[i]: actions}

                    feed_dict.update(feed_dict_params)
                    feed_dict.update(feed_dict_dist_infos)

                    lr = sess.run(likelihood_ratio_sym, feed_dict=feed_dict)

                    self.assertTrue(np.allclose(lr, 1))