コード例 #1
0
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()

    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()


    # sess = U.single_threaded_session()
    #sess = utils.make_gpu_session(args.num_gpu)
    #sess.__enter__()
    #if args.meta != "":
    #    saver = tf.train.import_meta_graph(args.meta)
    #    saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'husky_gibson_flagrun_train.yaml')
    print(config_file)


    env = HuskyGibsonFlagRunEnv(config = config_file, gpu_count=args.gpu_count)

    print(env.sensor_space)

    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space = sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=64, num_hid_layers=2, session=sess)

    #env = bench.Monitor(env, logger.get_dir() and
    #                    osp.join(logger.get_dir(), str(rank)))
    #env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)


    pposgd_fuse.learn(env, policy_fn,
                        max_timesteps=int(num_timesteps * 1.1),
                        timesteps_per_actorbatch=2048,
                        clip_param=0.2, entcoeff=0.01,
                        optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
                        gamma=0.99, lam=0.95,
                        schedule='linear',
                        save_name=args.save_name,
                        save_per_acts=50,
                        reload_name=args.reload_name
                        )


    env.close()
コード例 #2
0
def enjoy(seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs',
                               'husky_gibson_flagrun_train_enjoy.yaml')
    env = HuskyGibsonFlagRunEnv(gpu_idx=0, config=config_file)
    step = env.config['n_step']
    batch = env.config['n_batch']
    iteration = env.config['n_iter']
    num_timesteps = step * batch * iteration
    tpa = step * batch

    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name,
                                      ob_space=ob_space,
                                      sensor_space=sensor_space,
                                      ac_space=ac_space,
                                      hid_size=128,
                                      num_hid_layers=2,
                                      save_per_acts=10,
                                      session=sess)

    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    args.reload_name = '/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/PPO_DEPTH_2019-11-30_500_32_100_100.model'

    pposgd_fuse.enjoy(
        env,
        policy_fn,
        max_timesteps=int(num_timesteps * 1.1),
        timesteps_per_actorbatch=tpa,
        clip_param=0.2,
        entcoeff=0.0,
        optim_epochs=4,
        optim_stepsize=3e-3,
        optim_batchsize=64,
        gamma=0.996,
        lam=0.95,
        schedule='linear',
        save_per_acts=25,
        reload_name=args.reload_name,
    )
    env.close()
コード例 #3
0
def enjoy(num_timesteps, seed):

    sess = utils.make_gpu_session(1)
    sess.__enter__()

    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs',
                               'husky_gibson_flagrun_train.yaml')
    env = HuskyGibsonFlagRunEnv(is_discrete=True,
                                gpu_count=0,
                                config=config_file)
    #env = bench.Monitor(env, logger.get_dir() and
    #                    osp.join(logger.get_dir(), str(rank)))

    env.seed(workerseed)

    # policy = cnn_policy.CnnPolicy(name='pi', ob_space=env.observation_space, ac_space=env.action_space, save_per_acts=10000, session=sess)
    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name,
                                      ob_space=ob_space,
                                      sensor_space=sensor_space,
                                      ac_space=ac_space,
                                      save_per_acts=10000,
                                      session=sess)

    policy = policy_fn("pi", env.observation_space, env.sensor_space,
                       env.action_space)  # Construct network for new policy

    reload_name = '/home/fei/Development/gibson/examples/train/models/flagrun_RGBD2_50.model'

    if reload_name:
        saver = tf.train.Saver()
        saver.restore(tf.get_default_session(), reload_name)
        print("Loaded model successfully.")

    def execute_policy(env):
        ob, ob_sensor = env.reset()
        stochastic = False
        while True:
            # with Profiler("agent act"):
            ac, vpred = policy.act(stochastic, ob, ob_sensor)
            ob, rew, new, meta = env.step(ac)
            ob_sensor = meta['sensor']
            if new:
                ob, ob_sensor = env.reset()

    gym.logger.setLevel(logging.WARN)
    #sess.run(init_op)
    execute_policy(env)
    env.close()
コード例 #4
0
def train(seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    #sess = U.single_threaded_session()
    #sess = utils.make_gpu_session(args.num_gpu)
    #sess.__enter__()

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)
    use_filler = not args.disable_filler

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs',
                               'husky_gibson_flagrun_train.yaml')
    print(config_file)

    env = HuskyGibsonFlagRunEnv(config=config_file, gpu_idx=args.gpu_idx)
    step = env.config['n_step']
    batch = env.config['n_batch']
    iteration = env.config['n_iter']
    elm_policy = env.config['elm_active']
    num_timesteps = step * batch * iteration
    tpa = step * batch

    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name,
                                      ob_space=ob_space,
                                      sensor_space=sensor_space,
                                      ac_space=ac_space,
                                      save_per_acts=10000,
                                      hid_size=128,
                                      num_hid_layers=4,
                                      session=sess,
                                      elm_mode=elm_policy)

    #env = bench.Monitor(env, logger.get_dir() and
    #                    osp.join(logger.get_dir(), str(rank)))

    args.reload_name = "/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/FLAG_DEPTH_2020-04-04_500_20_105_180.model"
    print(args.reload_name)

    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    pposgd_fuse.learn(env,
                      policy_fn,
                      max_timesteps=int(num_timesteps * 1.1),
                      timesteps_per_actorbatch=tpa,
                      clip_param=0.2,
                      entcoeff=0.01,
                      optim_epochs=4,
                      optim_stepsize=1e-3,
                      optim_batchsize=64,
                      gamma=0.99,
                      lam=0.95,
                      schedule='linear',
                      save_name="FLAG_{}_{}_{}_{}_{}".format(
                          args.mode, datetime.date.today(), step, batch,
                          iteration),
                      save_per_acts=15,
                      reload_name=args.reload_name)
    env.close()