Esempio n. 1
0
def train(env_id, num_timesteps, seed):
    """
    Train PPO1 model for Robotics environment, for testing purposes

    :param env_id: (str) Environment ID
    :param num_timesteps: (int) The total number of samples
    :param seed: (int) The initial seed for training
    """

    rank = MPI.COMM_WORLD.Get_rank()
    with mujoco_py.ignore_mujoco_warnings():
        workerseed = seed + 10000 * rank
        set_global_seeds(workerseed)
        env = make_robotics_env(env_id, workerseed, rank=rank)

        def policy_fn(name, ob_space, ac_space, sess=None, placeholders=None):
            return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=256, num_hid_layers=3,
                                        sess=sess, placeholders=placeholders)

        pposgd_simple.learn(env, policy_fn,
                            max_timesteps=num_timesteps,
                            timesteps_per_actorbatch=2048,
                            clip_param=0.2, entcoeff=0.0,
                            optim_epochs=5, optim_stepsize=3e-4, optim_batchsize=256,
                            gamma=0.99, lam=0.95, schedule='linear')
        env.close()
def train(env_id, num_timesteps, seed):
    from baselines.ppo1 import mlp_policy, pposgd_simple
    import baselines.common.tf_util as U
    rank = MPI.COMM_WORLD.Get_rank()
    sess = U.single_threaded_session()
    sess.__enter__()
    mujoco_py.ignore_mujoco_warnings().__enter__()
    workerseed = seed + 10000 * rank
    set_global_seeds(workerseed)
    env = make_robotics_env(env_id, workerseed, rank=rank)

    def policy_fn(name, ob_space, ac_space):
        return mlp_policy.MlpPolicy(name=name,
                                    ob_space=ob_space,
                                    ac_space=ac_space,
                                    hid_size=256,
                                    num_hid_layers=3)

    pposgd_simple.learn(
        env,
        policy_fn,
        max_timesteps=num_timesteps,
        timesteps_per_actorbatch=2048,
        clip_param=0.2,
        entcoeff=0.0,
        optim_epochs=5,
        optim_stepsize=3e-4,
        optim_batchsize=256,
        gamma=0.99,
        lam=0.95,
        schedule='linear',
    )
    env.close()
def train(env_id, num_timesteps, seed, hid_size=64, num_hid_layers=2):
    from baselines.ppo1 import mlp_policy, pposgd_simple
    assert env_id in (_MujocoEnvs + _RoboticsEnvs)
    def policy_fn(name, ob_space, ac_space):
        return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
            hid_size=hid_size, num_hid_layers=num_hid_layers)
    if env_id in _MujocoEnvs:
        env = make_mujoco_env(env_id, seed)
    elif env_id in _RoboticsEnvs:
        env = make_robotics_env(env_id, seed)
    else:
        raise ValueError('Environment `{0}` is not supported.'.format(env_id))
    # Not putting these params in config as we do not plan on changing them.
    optim_epochs = 10 if env_id in _MujocoEnvs else 5
    optim_batchsize = 64 if env_id in _MujocoEnvs else 256
    pi = pposgd_simple.learn(env, policy_fn,
            max_timesteps=num_timesteps,
            timesteps_per_actorbatch=2048,
            clip_param=0.2, entcoeff=0.0,
            optim_epochs=optim_epochs, optim_stepsize=3e-4,
            optim_batchsize=optim_batchsize,
            gamma=0.99, lam=0.95, schedule='linear',
        )
    env.close()
    return pi