def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = U.single_threaded_session()
    sess.__enter__()
    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'ant_gibson_flagrun.yaml')
    print(config_file)

    env = AntGibsonFlagRunEnv(is_discrete=False, config = config_file)
    
    def mlp_policy_fn(name, sensor_space, ac_space):
        return mlp_policy.MlpPolicy(name=name, ob_space=sensor_space, ac_space=ac_space, hid_size=64, num_hid_layers=2)

    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    pposgd_sensor.learn(env, mlp_policy_fn,
        max_timesteps=int(num_timesteps * 1.1 * 5),
        timesteps_per_actorbatch=6000,
        clip_param=0.2, entcoeff=0.00,
        optim_epochs=4, optim_stepsize=1e-4, optim_batchsize=64,
        gamma=0.99, lam=0.95,
        schedule='linear',
        save_per_acts=500
    )
    env.close()
Пример #2
0
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'ant_climb.yaml')
    print(config_file)

    env = AntClimbEnv(config=config_file)
    
    env = Monitor(env, logger.get_dir() and
        osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    def mlp_policy_fn(name, sensor_space, ac_space):
        return mlp_policy.MlpPolicy(name=name, ob_space=sensor_space, ac_space=ac_space, hid_size=64, num_hid_layers=2)

    def fuse_policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, hid_size=64, num_hid_layers=2, ac_space=ac_space, save_per_acts=10000, session=sess)

    if args.mode == "SENSOR":
        pposgd_sensor.learn(env, mlp_policy_fn,
            max_timesteps=int(num_timesteps * 1.1 * 5),
            timesteps_per_actorbatch=6000,
            clip_param=0.2, entcoeff=0.00,
            optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
            gamma=0.99, lam=0.95,
            schedule='linear',
            save_per_acts=100,
            save_name="ant_ppo_mlp"
        )
        env.close()        
    else:
        pposgd_fuse.learn(env, fuse_policy_fn,
            max_timesteps=int(num_timesteps * 1.1),
            timesteps_per_actorbatch=2000,
            clip_param=0.2, entcoeff=0.01,
            optim_epochs=4, optim_stepsize=LEARNING_RATE, optim_batchsize=64,
            gamma=0.99, lam=0.95,
            schedule='linear',
            save_per_acts=50,
            save_name="ant_ppo_fuse",
            reload_name=args.reload_name
        )
        env.close()