Esempio n. 1
0
def game_process(conn):
    env = HuskyNavigateEnv(config='gibson_configs/beechwood_c0_rgb_skip50_random_initial_separate.yaml')
    conn.send(0)
    done = False
    while True:
        cmd, arg = conn.recv()
        if cmd=='reset':
            ob = env.reset()
            done = False
            conn.send(ob)
        elif cmd=='action':
            ob, reward, done, info = env.step(arg)
            r, p, yaw = env.robot.body_rpy
            rot_speed = np.array(
                [[np.cos(-yaw), -np.sin(-yaw), 0],
                 [np.sin(-yaw), np.cos(-yaw), 0],
                 [        0,             0, 1]]
            )
            vx, vy, vz = np.dot(rot_speed, env.robot.robot_body.speed())
            avx, avy, avz = np.dot(rot_speed, env.robot.robot_body.angular_speed())
            vel = [vx, vy, vz, avx, avy, avz]
            conn.send([ob, reward, running, vel])
        elif cmd=='observe':
            eye_pos, eye_quat = env.get_eye_pos_orientation()
            pose = [eye_pos, eye_quat]
            ob = env.render_observations(pose)
            conn.send([ob])
        elif cmd=='running':
            running = not done
            conn.send([running])
        elif cmd=='stop':
            break
    env.close()
    conn.send(0)
    conn.close()
Esempio n. 2
0
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    #sess = U.single_threaded_session()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    use_filler = not args.disable_filler
    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs', 'husky_navigate_train.yaml')
    print(config_file)

    env = HuskyNavigateEnv(is_discrete=True,
                           gpu_count=args.gpu_count,
                           config=config_file)

    policy = policy_fn("pi", env.observation_space,
                       env.action_space)  # Construct network for new policy

    def execute_policy(env):
        ob, ob_sensor = env.reset()
        stochastic = True
        while True:
            #with Profiler("agent act"):
            ac, vpred = policy.act(stochastic, ob)
            ob, rew, new, meta = env.step(ac)

            if new:
                ob, ob_sensor = env.reset()

    env = bench.Monitor(
        env,
        logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    pposgd_simple.enjoy(env,
                        policy_fn,
                        max_timesteps=int(num_timesteps * 1.1),
                        timesteps_per_actorbatch=1024,
                        clip_param=0.2,
                        entcoeff=0.01,
                        optim_epochs=4,
                        optim_stepsize=1e-3,
                        optim_batchsize=64,
                        gamma=0.99,
                        lam=0.95,
                        schedule='linear',
                        save_per_acts=50,
                        reload_name=args.reload_name)
    env.close()
def train(num_timesteps, seed):
    rank = MPI.COMM_WORLD.Get_rank()
    #sess = U.single_threaded_session()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess,tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    use_filler = not args.disable_filler
    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'configs',
                               'husky_navigate_enjoy.yaml')
    print(config_file)

    env = HuskyNavigateEnv(gpu_idx=args.gpu_idx, config = config_file)

    def policy_fn(name, ob_space, ac_space):
        if args.mode == "SENSOR":
            return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=64, num_hid_layers=2)
        else:
            #return fuse_policy.FusePolicy(name=name, ob_space=ob_space, sensor_space=sensor_space, ac_space=ac_space, save_per_acts=10000, session=sess)
        #else:
            return cnn_policy.CnnPolicy(name=name, ob_space=ob_space, ac_space=ac_space, save_per_acts=10000, session=sess, kind='small')

    env = Monitor(env, logger.get_dir() and
        osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    pposgd_simple.enjoy(env, policy_fn,
        max_timesteps=int(num_timesteps * 1.1),
        timesteps_per_actorbatch=1024,
        clip_param=0.2, entcoeff=0.01,
        optim_epochs=4, optim_stepsize=1e-3, optim_batchsize=64,
        gamma=0.99, lam=0.95,
        schedule='linear',
        save_per_acts=50,
        sensor=args.mode=="SENSOR",
        reload_name=args.reload_name
    )
    env.close()
                    os.mkdir(RGB_PATH + 'rgb')  #create rgb dir
                    os.mkdir(RGB_PATH + 'depth')

                    pos = get_present_pos_with_robotheight((x, y, z_position))
                    orn = (3.14, 0, random.uniform(0, 2 * np.pi))
                    if not setting_initial_point(pos, orn):
                        continue

                    #p.setGravity(0,0,0)#Set gravity in three axis to be 0
                    obj_id = env.robot.robot_ids
                    ground_id, = env.ground_ids
                    enableCollision = 0  #1: ENABLE 0:DISALE
                    #p.setCollisionFilterPair(obj_id[0],ground_id[0],-1,-1,enableCollision)

                    for n in range(NUMBER_OF_ACTION):
                        pos, orn = trajectory_generator(
                            pos, orn, bimap, map_para, RGB_PATH)
                        if pos == 0:  #abandon the trajectory which trap the robot
                            shutil.rmtree(PATH + 'data/' + str((x, y)))
                            break
                    else:
                        number_of_trajectory -= 1
                #cv2.imwrite(PATH+'tra_bin.png',bimap)
                cv2.imwrite(PATH + 'bi_map_adj.png', bimap)
                print('model:', folder, 'floor:', floor, 'done!')
        LIMITED_MODEL -= 1

        env.close()

    print('done')
Esempio n. 5
0
def train(seed):
    rank = MPI.COMM_WORLD.Get_rank()
    #sess = U.single_threaded_session()
    sess = utils.make_gpu_session(args.num_gpu)
    sess.__enter__()
    if args.meta != "":
        saver = tf.train.import_meta_graph(args.meta)
        saver.restore(sess, tf.train.latest_checkpoint('./'))

    if rank == 0:
        logger.configure()
    else:
        logger.configure(format_strs=[])
    workerseed = seed + 10000 * MPI.COMM_WORLD.Get_rank()
    set_global_seeds(workerseed)

    use_filler = not args.disable_filler
    config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '..', 'configs', 'config_husky_enjoy.yaml')
    #config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'..','configs','husky_gibson_flagrun_train.yaml')
    print(config_file)

    env = HuskyNavigateEnv(gpu_idx=args.gpu_idx, config=config_file)
    #env = HuskyGibsonFlagRunEnv(gpu_idx=args.gpu_idx, config = config_file)
    step = env.config['n_step']
    episode = env.config['n_episode']
    iteration = env.config['n_iter']
    target = env.config['target_pos']
    elm_policy = env.config['elm_active']
    num_timesteps = step * episode * iteration
    tpa = step * episode

    if args.mode == "SENSOR":  # Blind Mode

        def policy_fn(name, ob_space, ac_space):
            return mlp_policy.MlpPolicy(name=name,
                                        ob_space=ob_space,
                                        ac_space=ac_space,
                                        hid_size=128,
                                        num_hid_layers=4,
                                        elm_mode=elm_policy)
    elif args.mode == "DEPTH" or args.mode == "RGB":  # Fusing sensor space with image space

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return fuse_policy.FusePolicy(name=name,
                                          ob_space=ob_space,
                                          sensor_space=sensor_space,
                                          ac_space=ac_space,
                                          save_per_acts=10000,
                                          hid_size=128,
                                          num_hid_layers=4,
                                          session=sess,
                                          elm_mode=elm_policy)

    elif args.mode == "RESNET":

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return resnet_policy.ResPolicy(name=name,
                                           ob_space=ob_space,
                                           sensor_space=sensor_space,
                                           ac_space=ac_space,
                                           save_per_acts=10000,
                                           hid_size=128,
                                           num_hid_layers=4,
                                           session=sess,
                                           elm_mode=elm_policy)

    elif args.mode == "ODE":

        def policy_fn(name, ob_space, sensor_space, ac_space):
            return ode_policy.ODEPolicy(name=name,
                                        ob_space=ob_space,
                                        sensor_space=sensor_space,
                                        ac_space=ac_space,
                                        save_per_acts=10000,
                                        hid_size=128,
                                        num_hid_layers=4,
                                        session=sess,
                                        elm_mode=elm_policy)

    else:  # Using only image space

        def policy_fn(name, ob_space, ac_space):
            return cnn_policy.CnnPolicy(name=name,
                                        ob_space=ob_space,
                                        ac_space=ac_space,
                                        session=sess,
                                        kind='small')

    env = Monitor(env,
                  logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
    env.seed(workerseed)
    gym.logger.setLevel(logging.WARN)

    args.reload_name = '/home/berk/PycharmProjects/Gibson_Exercise/gibson/utils/models/PPO_ODE_2020-12-05_500_50_137_150.model'
    #args.reload_name = '/home/berk/PycharmProjects/Original_Gibs/gibson/utils/models/flagrun_RGBD2_150.model'

    print(args.reload_name)

    modes_camera = ["DEPTH", "RGB", "RESNET", "ODE"]
    if args.mode in modes_camera:
        pposgd_fuse.enjoy(env,
                          policy_fn,
                          max_timesteps=int(num_timesteps * 1.1),
                          timesteps_per_actorbatch=tpa,
                          clip_param=0.2,
                          entcoeff=0.03,
                          optim_epochs=4,
                          optim_stepsize=1e-3,
                          optim_batchsize=64,
                          gamma=0.99,
                          lam=0.95,
                          schedule='linear',
                          save_name="PPO_{}_{}_{}_{}_{}".format(
                              args.mode, datetime.date.today(), step, episode,
                              iteration),
                          save_per_acts=10,
                          reload_name=args.reload_name)
    else:
        if args.mode == "SENSOR": sensor = True
        else: sensor = False
        pposgd_simple.enjoy(env,
                            policy_fn,
                            max_timesteps=int(num_timesteps * 1.1),
                            timesteps_per_actorbatch=tpa,
                            clip_param=0.2,
                            entcoeff=0.03,
                            optim_epochs=4,
                            optim_stepsize=1e-3,
                            optim_batchsize=64,
                            gamma=0.996,
                            lam=0.95,
                            schedule='linear',
                            save_name="PPO_{}_{}_{}_{}_{}".format(
                                args.mode, datetime.date.today(), step,
                                episode, iteration),
                            save_per_acts=10,
                            sensor=sensor,
                            reload_name=args.reload_name)

    env.close()