Exemple #1
0
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'ant_climb.yaml')
    print(config_file)

    env = AntClimbEnv(config=config_file)
    
    env = Monitor(env, logger.get_dir() and
        osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    def mlp_policy_fn(name, sensor_space, ac_space):
        return mlp_policy.MlpPolicy(name=name, ob_space=sensor_space, ac_space=ac_space, hid_size=64, num_hid_layers=2)

    def fuse_policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, hid_size=64, num_hid_layers=2, ac_space=ac_space, save_per_acts=10000, session=sess)

    if args.mode == "SENSOR":
        pposgd_sensor.learn(env, mlp_policy_fn,
            max_timesteps=int(num_timesteps * 1.1 * 5),
            timesteps_per_actorbatch=6000,
            clip_param=0.2, entcoeff=0.00,
            optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
            gamma=0.99, lam=0.95,
            schedule='linear',
            save_per_acts=100,
            save_name="ant_ppo_mlp"
        )
        env.close()        
    else:
        pposgd_fuse.learn(env, fuse_policy_fn,
            max_timesteps=int(num_timesteps * 1.1),
            timesteps_per_actorbatch=2000,
            clip_param=0.2, entcoeff=0.01,
            optim_epochs=4, optim_stepsize=LEARNING_RATE, optim_batchsize=64,
            gamma=0.99, lam=0.95,
            schedule='linear',
            save_per_acts=50,
            save_name="ant_ppo_fuse",
            reload_name=args.reload_name
        )
        env.close()
Exemple #2
0
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()

    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()


    # sess = U.single_threaded_session()
    #sess = utils.make_gpu_session(args.num_gpu)
    #sess.__enter__()
    #if args.meta != "":
    #    saver = tf.train.import_meta_graph(args.meta)
    #    saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'husky_gibson_flagrun_train.yaml')
    print(config_file)


    env = HuskyGibsonFlagRunEnv(config = config_file, gpu_count=args.gpu_count)

    print(env.sensor_space)

    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space = sensor_space, ac_space=ac_space, save_per_acts=10000, hid_size=64, num_hid_layers=2, session=sess)

    #env = bench.Monitor(env, logger.get_dir() and
    #                    osp.join(logger.get_dir(), str(rank)))
    #env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)


    pposgd_fuse.learn(env, policy_fn,
                        max_timesteps=int(num_timesteps * 1.1),
                        timesteps_per_actorbatch=2048,
                        clip_param=0.2, entcoeff=0.01,
                        optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
                        gamma=0.99, lam=0.95,
                        schedule='linear',
                        save_name=args.save_name,
                        save_per_acts=50,
                        reload_name=args.reload_name
                        )


    env.close()
def train(seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    #sess = U.single_threaded_session()
    #sess = utils.make_gpu_session(args.num_gpu)
    #sess.__enter__()

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)
    use_filler = not args.disable_filler

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs',
                               'husky_gibson_flagrun_train.yaml')
    print(config_file)

    env = HuskyGibsonFlagRunEnv(config=config_file, gpu_idx=args.gpu_idx)
    step = env.config['n_step']
    batch = env.config['n_batch']
    iteration = env.config['n_iter']
    elm_policy = env.config['elm_active']
    num_timesteps = step * batch * iteration
    tpa = step * batch

    def policy_fn(name, ob_space, sensor_space, ac_space):
        return fuse_policy.FusePolicy(name=name,
                                      ob_space=ob_space,
                                      sensor_space=sensor_space,
                                      ac_space=ac_space,
                                      save_per_acts=10000,
                                      hid_size=128,
                                      num_hid_layers=4,
                                      session=sess,
                                      elm_mode=elm_policy)

    #env = bench.Monitor(env, logger.get_dir() and
    #                    osp.join(logger.get_dir(), str(rank)))

    args.reload_name = "/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/FLAG_DEPTH_2020-04-04_500_20_105_180.model"
    print(args.reload_name)

    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    pposgd_fuse.learn(env,
                      policy_fn,
                      max_timesteps=int(num_timesteps * 1.1),
                      timesteps_per_actorbatch=tpa,
                      clip_param=0.2,
                      entcoeff=0.01,
                      optim_epochs=4,
                      optim_stepsize=1e-3,
                      optim_batchsize=64,
                      gamma=0.99,
                      lam=0.95,
                      schedule='linear',
                      save_name="FLAG_{}_{}_{}_{}_{}".format(
                          args.mode, datetime.date.today(), step, batch,
                          iteration),
                      save_per_acts=15,
                      reload_name=args.reload_name)
    env.close()
def train(seed):
    rank = MPI.COMM_WORLD.Get_rank()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()

    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)
    use_filler = not args.disable_filler

    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs', 'config_husky.yaml')
    print(config_file)

    raw_env = HuskyNavigateEnv(gpu_idx=args.gpu_idx, config=config_file)
    step = raw_env.config['n_step']
    episode = raw_env.config['n_episode']
    iteration = raw_env.config['n_iter']
    elm_policy = raw_env.config['elm_active']
    num_timesteps = step * episode * iteration
    tpa = step * episode

    if args.mode == "SENSOR":  #Blind Mode

        def policy_fn(name, ob_space, ac_space):
            return mlp_policy.MlpPolicy(name=name,
                                        ob_space=ob_space,
                                        ac_space=ac_space,
                                        hid_size=128,
                                        num_hid_layers=4,
                                        elm_mode=elm_policy)
    elif args.mode == "DEPTH" or args.mode == "RGB":  #Fusing sensor space with image space

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return fuse_policy.FusePolicy(name=name,
                                          ob_space=ob_space,
                                          sensor_space=sensor_space,
                                          ac_space=ac_space,
                                          save_per_acts=10000,
                                          hid_size=128,
                                          num_hid_layers=4,
                                          session=sess,
                                          elm_mode=elm_policy)

    elif args.mode == "RESNET":

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return resnet_policy.ResPolicy(name=name,
                                           ob_space=ob_space,
                                           sensor_space=sensor_space,
                                           ac_space=ac_space,
                                           save_per_acts=10000,
                                           hid_size=128,
                                           num_hid_layers=4,
                                           session=sess,
                                           elm_mode=elm_policy)

    elif args.mode == "ODE":

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return ode_policy.ODEPolicy(name=name,
                                        ob_space=ob_space,
                                        sensor_space=sensor_space,
                                        ac_space=ac_space,
                                        save_per_acts=10000,
                                        hid_size=128,
                                        num_hid_layers=4,
                                        session=sess,
                                        elm_mode=elm_policy)

    else:  #Using only image space

        def policy_fn(name, ob_space, ac_space):
            return cnn_policy.CnnPolicy(name=name,
                                        ob_space=ob_space,
                                        ac_space=ac_space,
                                        session=sess,
                                        kind='small')

    env = Monitor(raw_env,
                  logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    args.reload_name = '/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/PPO_ODE_2020-12-05_500_50_137_150.model'
    print(args.reload_name)

    modes_camera = ["DEPTH", "RGB", "RESNET", "ODE"]
    if args.mode in modes_camera:
        pposgd_fuse.learn(env,
                          policy_fn,
                          max_timesteps=int(num_timesteps * 1.1),
                          timesteps_per_actorbatch=tpa,
                          clip_param=0.2,
                          entcoeff=0.03,
                          vfcoeff=0.01,
                          optim_epochs=4,
                          optim_stepsize=1e-3,
                          optim_batchsize=64,
                          gamma=0.99,
                          lam=0.95,
                          schedule='linear',
                          save_name="PPO_{}_{}_{}_{}_{}".format(
                              args.mode, datetime.date.today(), step, episode,
                              iteration),
                          save_per_acts=15,
                          reload_name=args.reload_name)
    else:
        if args.mode == "SENSOR": sensor = True
        else: sensor = False
        pposgd_simple.learn(env,
                            policy_fn,
                            max_timesteps=int(num_timesteps * 1.1),
                            timesteps_per_actorbatch=tpa,
                            clip_param=0.2,
                            entcoeff=0.03,
                            vfcoeff=0.01,
                            optim_epochs=4,
                            optim_stepsize=1e-3,
                            optim_batchsize=64,
                            gamma=0.996,
                            lam=0.95,
                            schedule='linear',
                            save_name="PPO_{}_{}_{}_{}_{}".format(
                                args.mode, datetime.date.today(), step,
                                episode, iteration),
                            save_per_acts=15,
                            sensor=sensor,
                            reload_name=args.reload_name)
    env.close()