示例#1
0
def test_dataset_splitting(split):
    dataset_config = get_config(CFG_MULTI_TEST).DATASET
    dataset_config.defrost()
    dataset_config.SPLIT = split

    if not PointNavDatasetV1.check_config_paths_exist(dataset_config):
        pytest.skip("Test skipped as dataset files are missing.")

    scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
    assert (len(scenes) >
            0), "Expected dataset contains separate episode file per scene."

    dataset_config.CONTENT_SCENES = scenes[:PARTIAL_LOAD_SCENES]
    full_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                config=dataset_config)
    full_episodes = {(ep.scene_id, ep.episode_id)
                     for ep in full_dataset.episodes}

    dataset_config.CONTENT_SCENES = scenes[:PARTIAL_LOAD_SCENES // 2]
    split1_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                  config=dataset_config)
    split1_episodes = {(ep.scene_id, ep.episode_id)
                       for ep in split1_dataset.episodes}

    dataset_config.CONTENT_SCENES = scenes[PARTIAL_LOAD_SCENES //
                                           2:PARTIAL_LOAD_SCENES]
    split2_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                  config=dataset_config)
    split2_episodes = {(ep.scene_id, ep.episode_id)
                       for ep in split2_dataset.episodes}

    assert full_episodes == split1_episodes.union(
        split2_episodes), "Split dataset is not equal to full dataset"
    assert (len(split1_episodes.intersection(split2_episodes)) == 0
            ), "Intersection of split datasets is not the empty set"
示例#2
0
文件: gibson.py 项目: srk97/Reinforce
def gibson_env(hparams):
    basic_config = cfg_env(config_file=hparams.task_config,
                           config_dir=os.path.join(dirname, 'configs'))
    scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)
    config_env = cfg_env(config_file=hparams.task_config,
                         config_dir=os.path.join(dirname, 'configs'))
    config_env.defrost()

    if len(scenes) > 0:
        random.shuffle(scenes)
        config_env.DATASET.POINTNAVV1.CONTENT_SCENES = scenes
    for sensor in hparams.sensors:
        assert sensor in ["RGB_SENSOR", "DEPTH_SENSOR"]
    config_env.SIMULATOR.AGENT_0.SENSORS = hparams.sensors
    config_env.freeze()
    config_baseline = cfg_baseline()

    dataset = PointNavDatasetV1(config_env.DATASET)

    config_env.defrost()
    config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
    config_env.freeze()

    env = NavRLEnv(config_env=config_env,
                   config_baseline=config_baseline,
                   dataset=dataset)

    return env
def test_multiple_files_scene_path():
    dataset_config = get_config(CFG_MULTI_TEST).DATASET
    if not PointNavDatasetV1.check_config_paths_exist(dataset_config):
        pytest.skip("Test skipped as dataset files are missing.")
    scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
    assert (
        len(scenes) > 0
    ), "Expected dataset contains separate episode file per scene."
    dataset_config.defrost()
    dataset_config.CONTENT_SCENES = scenes[:PARTIAL_LOAD_SCENES]
    dataset_config.SCENES_DIR = os.path.join(
        os.getcwd(), DEFAULT_SCENE_PATH_PREFIX
    )
    dataset_config.freeze()
    partial_dataset = make_dataset(
        id_dataset=dataset_config.TYPE, config=dataset_config
    )
    assert (
        len(partial_dataset.scene_ids) == PARTIAL_LOAD_SCENES
    ), "Number of loaded scenes doesn't correspond."
    print(partial_dataset.episodes[0].scene_id)
    assert os.path.exists(
        partial_dataset.episodes[0].scene_id
    ), "Scene file {} doesn't exist using absolute path".format(
        partial_dataset.episodes[0].scene_id
    )
示例#4
0
def test_single_pointnav_dataset():
    dataset_config = get_config().DATASET
    if not PointNavDatasetV1.check_config_paths_exist(dataset_config):
        pytest.skip("Test skipped as dataset files are missing.")
    scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
    assert (len(scenes) >
            0), "Expected dataset contains separate episode file per scene."
    dataset = PointNavDatasetV1(config=dataset_config)
    assert len(dataset.episodes) > 0, "The dataset shouldn't be empty."
    assert (len(
        dataset.scene_ids) == 2), "The test dataset scenes number is wrong."
    check_json_serializaiton(dataset)
示例#5
0
def test_multiple_files_pointnav_dataset(cfg_path: str):
    dataset_config = get_config(cfg_path).DATASET
    if not PointNavDatasetV1.check_config_paths_exist(dataset_config):
        pytest.skip("Test skipped as dataset files are missing.")
    scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
    assert (len(scenes) >
            0), "Expected dataset contains separate episode file per scene."
    dataset_config.defrost()
    dataset_config.CONTENT_SCENES = scenes
    dataset_config.freeze()
    partial_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                   config=dataset_config)
    check_json_serializaiton(partial_dataset)
示例#6
0
def test_multiple_files_pointnav_dataset():
    dataset_config = get_config(CFG_MULTI_TEST).DATASET
    if not PointNavDatasetV1.check_config_paths_exist(dataset_config):
        pytest.skip("Test skipped as dataset files are missing.")
    scenes = PointNavDatasetV1.get_scenes_to_load(config=dataset_config)
    assert (len(scenes) >
            0), "Expected dataset contains separate episode file per scene."
    dataset_config.defrost()
    dataset_config.CONTENT_SCENES = scenes[:PARTIAL_LOAD_SCENES]
    dataset_config.freeze()
    partial_dataset = make_dataset(id_dataset=dataset_config.TYPE,
                                   config=dataset_config)
    assert (len(partial_dataset.scene_ids) == PARTIAL_LOAD_SCENES
            ), "Number of loaded scenes doesn't correspond."
    check_json_serializaiton(partial_dataset)
示例#7
0
def construct_envs(args):
    env_configs = []
    baseline_configs = []
    args_list = []

    # TODO check params consistency here
    basic_config = cfg_env(config_paths=[args.task_config])

    print("loading scenes ...")
    scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)

    if len(scenes) > 0:
        assert len(scenes) >= args.num_processes, (
            "reduce the number of processes as there "
            "aren't enough number of scenes")
        scene_split_size = int(np.floor(len(scenes) / args.num_processes))

    print("using ", args.num_processes, " processes and ", scene_split_size,
          " scenes per process")

    for i in range(args.num_processes):
        config_env = cfg_env(config_paths=[args.task_config])
        config_env.defrost()

        if len(scenes) > 0:
            config_env.DATASET.CONTENT_SCENES = scenes[i *
                                                       scene_split_size:(i +
                                                                         1) *
                                                       scene_split_size]

        if i < args.num_processes_on_first_gpu:
            gpu_id = 0
        else:
            gpu_id = int((i - args.num_processes_on_first_gpu) //
                         args.num_processes_per_gpu) + args.sim_gpu_id
#         gpu_id = min(torch.cuda.device_count() - 1, gpu_id)
        gpu_id = 0
        config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_id
        config_env.freeze()
        env_configs.append(config_env)

        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)

        args_list.append(args)

    envs = VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(
                zip(args_list, env_configs, baseline_configs,
                    range(args.num_processes)))),
    )

    # envs = make_env_fn(args_list[0], env_configs[0], config_baseline=baseline_configs[0], rank=42)
    print("returning with environment")

    return envs
示例#8
0
def make_env_fn(config_env, config_baseline, rank):
    dataset = PointNavDatasetV1(config_env.DATASET)
    config_env.defrost()
    config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
    config_env.freeze()
    env = NavRLEnv(config_env=config_env,
                   config_baseline=config_baseline,
                   dataset=dataset)
    env.seed(rank)
    return env
示例#9
0
def test_demo_notebook():
    config = habitat.get_config("configs/tasks/pointnav_mp3d.yaml")
    config.defrost()
    config.DATASET.SPLIT = "val"

    if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip(
            "Please download the Matterport3D PointNav val dataset and Matterport3D val scenes"
        )
    else:
        pytest.main(["--nbval-lax", "notebooks/habitat-api-demo.ipynb"])
示例#10
0
def test_demo_notebook():
    config = habitat.get_config("configs/tasks/pointnav_rgbd.yaml")
    config.defrost()
    config.DATASET.SPLIT = "val"

    if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip("Please download the habitat test scenes")
    else:
        pytest.main([
            "--nbval-lax",
            "notebooks/relative_camera_views_transform_and_warping_demo.ipynb",
        ])
示例#11
0
def make_env_fn(args, config_env, rank):
    dataset = PointNavDatasetV1(config_env.DATASET)
    config_env.defrost()
    config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
    print("Loading {}".format(config_env.SIMULATOR.SCENE))
    config_env.freeze()

    env = Neural_SLAM_Env(args=args, rank=rank,
                          config_env=config_env, dataset=dataset
                          )

    env.seed(rank)
    return env
示例#12
0
def make_env_fn(args, config_env, config_baseline, rank):
    print("-------------- condig_env ---------------")
    print(config_env)
    print("-----------------------------------------")
    dataset = PointNavDatasetV1(config_env.DATASET)
    print("Loading {}".format(config_env.SIMULATOR.SCENE))
    env = PointNavEnv(args=args,
                      rank=rank,
                      config_env=config_env,
                      config_baseline=config_baseline,
                      dataset=dataset)
    env.seed(rank)
    return env
示例#13
0
def make_env_fn(config_env, config_baseline, rank, target_dim, log_dir,
                visdom_name, visdom_log_file, vis_interval, visdom_server,
                visdom_port, swap_building_k_episodes, map_kwargs,
                reward_kwargs, should_record, seed):
    if config_env.DATASET.SPLIT == 'val':
        datasetfile_path = config_env.DATASET.POINTNAVV1.DATA_PATH.format(
            split=config_env.DATASET.SPLIT)
        dataset = PointNavDatasetV1()
        with gzip.open(datasetfile_path, "rt") as f:
            dataset.from_json(f.read())
    else:
        dataset = PointNavDatasetV1(config_env.DATASET)

    config_env.defrost()
    config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
    config_env.freeze()
    env = MidlevelNavRLEnv(config_env=config_env,
                           config_baseline=config_baseline,
                           dataset=dataset,
                           target_dim=target_dim,
                           map_kwargs=map_kwargs,
                           reward_kwargs=reward_kwargs)
    env.episodes = shuffle_episodes(env, swap_every_k=swap_building_k_episodes)
    env.seed(seed)
    if should_record and visdom_log_file is not None:
        print("SETTING VISDOM MONITOR WITH VIS INTERVAL", vis_interval)
        env = VisdomMonitor(env,
                            directory=os.path.join(log_dir, visdom_name),
                            video_callable=lambda x: x % vis_interval == 0,
                            uid=str(rank),
                            server=visdom_server,
                            port=visdom_port,
                            visdom_log_file=visdom_log_file,
                            visdom_env=visdom_name)

    return env
def test_demo_notebook():
    config = habitat.get_config("configs/tasks/pointnav_mp3d.yaml")
    config.defrost()
    config.DATASET.SPLIT = "val"

    if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip(
            "Please download the Matterport3D PointNav val dataset and Matterport3D val scenes"
        )
    else:
        pytest.main(["--nbval-lax", "notebooks/habitat-api-demo.ipynb"])

        # NB: Force a gc collect run as it can take a little bit for
        # the cleanup to happen after the notebook and we get
        # a double context crash!
        gc.collect()
def test_demo_notebook():
    config = habitat.get_config("configs/tasks/pointnav_rgbd.yaml")
    config.defrost()
    config.DATASET.SPLIT = "val"

    if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip("Please download the habitat test scenes")
    else:
        pytest.main([
            "--nbval-lax",
            "notebooks/relative_camera_views_transform_and_warping_demo.ipynb",
        ])

        # NB: Force a gc collect run as it can take a little bit for
        # the cleanup to happen after the notebook and we get
        # a double context crash!
        gc.collect()
示例#16
0
def construct_envs(args):
    env_configs = []
    baseline_configs = []

    basic_config = cfg_env(config_paths=args.task_config, opts=args.opts)

    scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)

    if len(scenes) > 0:
        random.shuffle(scenes)

        assert len(scenes) >= args.num_processes, (
            "reduce the number of processes as there "
            "aren't enough number of scenes")
        scene_split_size = int(np.floor(len(scenes) / args.num_processes))

    for i in range(args.num_processes):
        config_env = cfg_env(config_paths=args.task_config, opts=args.opts)
        config_env.defrost()

        if len(scenes) > 0:
            config_env.DATASET.POINTNAVV1.CONTENT_SCENES = scenes[
                i * scene_split_size:(i + 1) * scene_split_size]

        config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = args.sim_gpu_id

        agent_sensors = args.sensors.strip().split(",")
        for sensor in agent_sensors:
            assert sensor in ["RGB_SENSOR", "DEPTH_SENSOR"]
        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors
        config_env.freeze()
        env_configs.append(config_env)

        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)

        logger.info("config_env: {}".format(config_env))

    envs = habitat.VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(zip(env_configs, baseline_configs,
                      range(args.num_processes)))),
    )

    return envs
示例#17
0
def main():
    basic_config = cfg_env(config_paths=
                           ["env/habitat/habitat_lab/configs/" + args.task_config])
    basic_config.defrost()
    basic_config.DATASET.SPLIT = args.split
    basic_config.freeze()

    scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)
    config_env = cfg_env(config_paths=
                         ["env/habitat/habitat_lab/configs/" + args.task_config])
    config_env.defrost()
    config_env.DATASET.CONTENT_SCENES = scenes

    gpu_id = 0
    config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_id

    agent_sensors = []
    agent_sensors.append("RGB_SENSOR")
    agent_sensors.append("DEPTH_SENSOR")

    config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors

    config_env.ENVIRONMENT.MAX_EPISODE_STEPS = args.max_episode_length
    config_env.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False

    config_env.SIMULATOR.RGB_SENSOR.WIDTH = args.env_frame_width
    config_env.SIMULATOR.RGB_SENSOR.HEIGHT = args.env_frame_height
    config_env.SIMULATOR.RGB_SENSOR.HFOV = args.hfov
    config_env.SIMULATOR.RGB_SENSOR.POSITION = [0, args.camera_height, 0]

    config_env.SIMULATOR.DEPTH_SENSOR.WIDTH = args.env_frame_width
    config_env.SIMULATOR.DEPTH_SENSOR.HEIGHT = args.env_frame_height
    config_env.SIMULATOR.DEPTH_SENSOR.HFOV = args.hfov
    config_env.SIMULATOR.DEPTH_SENSOR.POSITION = [0, args.camera_height, 0]

    config_env.SIMULATOR.TURN_ANGLE = 10
    config_env.DATASET.SPLIT = args.split

    config_env.freeze()

    env = make_env_fn(args, config_env, 0)
    obs, inf = env.reset()
    print("done")
示例#18
0
def _load_test_data():
    configs = []
    datasets = []
    for i in range(NUM_ENVS):
        config = get_config(CFG_TEST)
        if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
            pytest.skip("Please download Habitat test data to data folder.")

        datasets.append(
            habitat.make_dataset(id_dataset=config.DATASET.TYPE,
                                 config=config.DATASET))

        config.defrost()
        config.SIMULATOR.SCENE = datasets[-1].episodes[0].scene_id
        if not os.path.exists(config.SIMULATOR.SCENE):
            pytest.skip("Please download Habitat test data to data folder.")
        config.freeze()
        configs.append(config)

    return configs, datasets
def test_pointnav_episode_generator():
    config = get_config(CFG_TEST)
    config.defrost()
    config.DATASET.SPLIT = "val"
    config.ENVIRONMENT.MAX_EPISODE_STEPS = 500
    config.freeze()
    if not PointNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip("Test skipped as dataset files are missing.")
    with habitat.Env(config) as env:
        env.seed(config.SEED)
        random.seed(config.SEED)
        generator = pointnav_generator.generate_pointnav_episode(
            sim=env.sim,
            shortest_path_success_distance=config.TASK.SUCCESS_DISTANCE,
            shortest_path_max_steps=config.ENVIRONMENT.MAX_EPISODE_STEPS,
        )
        episodes = []
        for i in range(NUM_EPISODES):
            episode = next(generator)
            episodes.append(episode)

        for episode in pointnav_generator.generate_pointnav_episode(
            sim=env.sim,
            num_episodes=NUM_EPISODES,
            shortest_path_success_distance=config.TASK.SUCCESS_DISTANCE,
            shortest_path_max_steps=config.ENVIRONMENT.MAX_EPISODE_STEPS,
            geodesic_to_euclid_min_ratio=0,
        ):
            episodes.append(episode)

        assert len(episodes) == 2 * NUM_EPISODES
        env.episode_iterator = iter(episodes)

        for episode in episodes:
            check_shortest_path(env, episode)

        dataset = habitat.Dataset()
        dataset.episodes = episodes
        assert (
            dataset.to_json()
        ), "Generated episodes aren't json serializable."
示例#20
0
def make_habitat_vector_env(num_processes=2,
                            target_dim=7,
                            preprocessing_fn=None,
                            log_dir=None,
                            visdom_name='main',
                            visdom_log_file=None,
                            visdom_server='localhost',
                            visdom_port='8097',
                            vis_interval=200,
                            scenes=None,
                            val_scenes=['Greigsville', 'Pablo', 'Mosquito'],
                            num_val_processes=0,
                            swap_building_k_episodes=10,
                            gpu_devices=[0],
                            collate_obs_before_transform=False,
                            map_kwargs={},
                            reward_kwargs={},
                            seed=42):
    assert map_kwargs[
        'map_building_size'] > 0, 'Map building size must be positive!'
    default_reward_kwargs = {
        'slack_reward': -0.01,
        'success_reward': 10,
        'use_visit_penalty': False,
        'visit_penalty_coef': 0,
        'penalty_eps': 999,
    }
    for k, v in default_reward_kwargs.items():
        if k not in reward_kwargs:
            reward_kwargs[k] = v

    habitat_path = os.path.dirname(os.path.dirname(habitat.__file__))
    task_config = os.path.join(habitat_path,
                               'configs/tasks/pointnav_gibson_train.yaml')
    basic_config = cfg_env(config_file=task_config)
    basic_config.defrost()
    basic_config.DATASET.POINTNAVV1.DATA_PATH = os.path.join(
        habitat_path, basic_config.DATASET.POINTNAVV1.DATA_PATH)
    basic_config.freeze()

    if scenes is None:
        scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)
        random.shuffle(scenes)

    val_task_config = os.path.join(
        habitat_path, 'configs/tasks/pointnav_gibson_val_mini.yaml')
    val_cfg = cfg_env(config_file=val_task_config)
    val_cfg.defrost()
    val_cfg.DATASET.SPLIT = "val"
    val_cfg.freeze()

    scenes = [s for s in scenes if s not in val_scenes]
    if num_val_processes > 0 and len(val_scenes) % num_val_processes != 0:
        warnings.warn(
            "Please make num_val_processes ({}) evenly divide len(val_scenes) ({}) or some buildings may be overrepresented"
            .format(num_val_processes, len(val_scenes)))

    env_configs = []
    baseline_configs = []
    encoders = []
    target_dims = []
    is_val = []

    # Assign specific buildings to each process
    train_process_scenes = [[]
                            for _ in range(num_processes - num_val_processes)]
    for i, scene in enumerate(scenes):
        train_process_scenes[i % len(train_process_scenes)].append(scene)

    if num_val_processes > 0:
        val_process_scenes = [[] for _ in range(num_val_processes)]
        for i, scene in enumerate(val_scenes):
            val_process_scenes[i % len(val_process_scenes)].append(scene)

    for i in range(num_processes):
        config_env = cfg_env(task_config)
        config_env.defrost()
        config_env.DATASET.POINTNAVV1.DATA_PATH = os.path.join(
            habitat_path, basic_config.DATASET.POINTNAVV1.DATA_PATH)

        if i < num_processes - num_val_processes:
            config_env.DATASET.SPLIT = 'train'
            config_env.DATASET.POINTNAVV1.CONTENT_SCENES = train_process_scenes[
                i]
        else:
            config_env.DATASET.SPLIT = 'val'
            val_i = i - (num_processes - num_val_processes)
            config_env.DATASET.POINTNAVV1.CONTENT_SCENES = val_process_scenes[
                val_i]
        print("Env {}:".format(i),
              config_env.DATASET.POINTNAVV1.CONTENT_SCENES)

        config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_devices[
            i % len(gpu_devices)]
        agent_sensors = ["RGB_SENSOR"]
        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors
        config_env.SIMULATOR.SCENE = os.path.join(habitat_path,
                                                  config_env.SIMULATOR.SCENE)

        config_env.freeze()
        env_configs.append(config_env)
        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)
        encoders.append(preprocessing_fn)
        target_dims.append(target_dim)

    should_record = [(i == 0 or i == (num_processes - num_val_processes))
                     for i in range(num_processes)]
    envs = HabitatPreprocessVectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(
                zip(
                    env_configs,
                    baseline_configs,
                    range(num_processes),
                    target_dims,
                    [log_dir for _ in range(num_processes)],
                    [visdom_name for _ in range(num_processes)],
                    [visdom_log_file for _ in range(num_processes)],
                    [vis_interval for _ in range(num_processes)],
                    [visdom_server for _ in range(num_processes)],
                    [visdom_port for _ in range(num_processes)],
                    [swap_building_k_episodes for _ in range(num_processes)],
                    [map_kwargs for _ in range(num_processes)],
                    [reward_kwargs for _ in range(num_processes)],
                    should_record,
                    [seed + i for i in range(num_processes)],
                ))),
        preprocessing_fn=preprocessing_fn,
        collate_obs_before_transform=collate_obs_before_transform)
    envs.observation_space = envs.observation_spaces[0]
    envs.action_space = spaces.Discrete(3)
    envs.reward_range = None
    envs.metadata = None
    envs.is_embodied = True
    return envs
def test_new_actions():
    if not PointNavDatasetV1.check_config_paths_exist(
            config=habitat.get_config().DATASET):
        pytest.skip("Please download Habitat test data to data folder.")

    new_actions.main()
示例#22
0
def test_register_new_sensors_and_measures():
    if not PointNavDatasetV1.check_config_paths_exist(
            config=habitat.get_config().DATASET):
        pytest.skip("Please download Habitat test data to data folder.")

    register_new_sensors_and_measures.main()
def test_visualizations_example():
    if not PointNavDatasetV1.check_config_paths_exist(
            config=habitat.get_config().DATASET):
        pytest.skip("Please download Habitat test data to data folder.")
    visualization_examples.main()
def test_readme_example():
    if not PointNavDatasetV1.check_config_paths_exist(
            config=habitat.get_config().DATASET):
        pytest.skip("Please download Habitat test data to data folder.")
    example()
示例#25
0
def construct_envs(args):
    env_configs = []
    # baseline_configs = []
    args_list = []

    basic_config = cfg_env(
        config_paths=["env/habitat/habitat_lab/configs/" + args.task_config])
    basic_config.defrost()
    basic_config.DATASET.SPLIT = args.split
    basic_config.freeze()

    scenes = PointNavDatasetV1.get_scenes_to_load(basic_config.DATASET)

    if len(scenes) > 0:
        assert len(scenes) >= args.num_processes, (
            "reduce the number of processes as there "
            "aren't enough number of scenes")
        scene_split_size = int(np.floor(len(scenes) / args.num_processes))

    for i in range(args.num_processes):
        config_env = cfg_env(config_paths=[
            "env/habitat/habitat_lab/configs/" + args.task_config
        ])
        config_env.defrost()

        if len(scenes) > 0:
            config_env.DATASET.CONTENT_SCENES = scenes[i *
                                                       scene_split_size:(i +
                                                                         1) *
                                                       scene_split_size]

        if i < args.num_processes_on_first_gpu:
            gpu_id = 0
        else:
            gpu_id = int((i - args.num_processes_on_first_gpu) //
                         args.num_processes_per_gpu) + args.sim_gpu_id
        gpu_id = min(torch.cuda.device_count() - 1, gpu_id)
        config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_id

        agent_sensors = []
        agent_sensors.append("RGB_SENSOR")
        agent_sensors.append("DEPTH_SENSOR")

        config_env.SIMULATOR.AGENT_0.SENSORS = agent_sensors

        config_env.ENVIRONMENT.MAX_EPISODE_STEPS = args.max_episode_length
        config_env.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False

        config_env.SIMULATOR.RGB_SENSOR.WIDTH = args.env_frame_width
        config_env.SIMULATOR.RGB_SENSOR.HEIGHT = args.env_frame_height
        config_env.SIMULATOR.RGB_SENSOR.HFOV = args.hfov
        config_env.SIMULATOR.RGB_SENSOR.POSITION = [0, args.camera_height, 0]

        config_env.SIMULATOR.DEPTH_SENSOR.WIDTH = args.env_frame_width
        config_env.SIMULATOR.DEPTH_SENSOR.HEIGHT = args.env_frame_height
        config_env.SIMULATOR.DEPTH_SENSOR.HFOV = args.hfov
        config_env.SIMULATOR.DEPTH_SENSOR.POSITION = [0, args.camera_height, 0]

        config_env.SIMULATOR.TURN_ANGLE = 10
        config_env.DATASET.SPLIT = args.split

        config_env.freeze()
        env_configs.append(config_env)

        # Baseline configs are not used
        # config_baseline = cfg_baseline()
        # baseline_configs.append(config_baseline)

        args_list.append(args)

    envs = VectorEnv(
        make_env_fn=make_env_fn,
        env_fn_args=tuple(
            tuple(zip(args_list, env_configs, range(args.num_processes)))),
    )

    return envs
def make_habitat_vector_env(
    scenario='PointNav',
    num_processes=2,
    target_dim=7,
    preprocessing_fn=None,
    log_dir=None,
    visdom_name='main',
    visdom_log_file=None,
    visdom_server='localhost',
    visdom_port='8097',
    vis_interval=200,
    train_scenes=None,
    val_scenes=None,
    num_val_processes=0,
    swap_building_k_episodes=10,
    gpu_devices=[0],
    map_kwargs={},
    reward_kwargs={},
    seed=42,
    test_mode=False,
    debug_mode=False,
    scenario_kwargs={},
):
    assert map_kwargs[
        'map_building_size'] > 0, 'Map building size must be positive!'
    default_reward_kwargs = {
        'slack_reward': -0.01,
        'success_reward': 10,
        'use_visit_penalty': False,
        'visit_penalty_coef': 0,
        'penalty_eps': 999,
        'sparse': False,
        'dist_coef': 1.0,
    }
    for k, v in default_reward_kwargs.items():
        if k not in reward_kwargs:
            reward_kwargs[k] = v

    habitat_path = os.path.dirname(os.path.dirname(habitat.__file__))
    if scenario == 'PointNav' or scenario == 'Exploration':
        task_config = os.path.join(habitat_path,
                                   'configs/tasks/pointnav_gibson_train.yaml')
        # only difference is that Exploration needs DEPTH_SENSOR but that is added in the Env
        # task_config = os.path.join(habitat_path, 'configs/tasks/exploration_gibson.yaml')

    env_configs = []
    baseline_configs = []
    encoders = []
    target_dims = []
    is_val = []

    # Assign specific episodes to each process
    config_env = cfg_env(task_config)

    # Load dataset
    print('Loading val dataset (partition by episode)...')
    datasetfile_path = config_env.DATASET.POINTNAVV1.DATA_PATH.format(
        split='val')
    dataset = PointNavDatasetV1()
    with gzip.open(datasetfile_path, "rt") as f:
        dataset.from_json(f.read())
    val_datasets = get_splits(dataset, max(num_val_processes, 1))
    #     for d in val_datasets:
    #         d.episodes = [d.episodes[0]]
    print('Loaded.')

    print('Loading train dataset (partition by building)...')
    train_datasets = []
    if num_processes - num_val_processes > 0:
        #         dataset = PointNavDatasetV1(config_env.DATASET)
        train_datasets = [
            None for _ in range(num_processes - num_val_processes)
        ]
    print('Loaded.')

    # Assign specific buildings to each process
    if num_processes > num_val_processes:
        train_process_scenes = [[] for _ in range(num_processes -
                                                  num_val_processes)]
        if train_scenes is None:
            train_scenes = PointNavDatasetV1.get_scenes_to_load(
                config_env.DATASET)
            random.shuffle(train_scenes)

        for i, scene in enumerate(train_scenes):
            train_process_scenes[i % len(train_process_scenes)].append(scene)

        # If n processes > n envs, some processes can use all envs
        for j, process in enumerate(train_process_scenes):
            if len(process) == 0:
                train_process_scenes[j] = list(train_scenes)

    get_scenes = lambda d: list(
        Counter([e.scene_id.split('/')[-1].split(".")[0]
                 for e in d.episodes]).items())
    for i in range(num_processes):
        config_env = cfg_env(task_config)
        config_env.defrost()

        if i < num_processes - num_val_processes:
            config_env.DATASET.SPLIT = 'train'
            #             config_env.DATASET.POINTNAVV1.CONTENT_SCENES = get_scenes(train_datasets[i])
            config_env.DATASET.POINTNAVV1.CONTENT_SCENES = train_process_scenes[
                i]
        else:
            val_i = i - (num_processes - num_val_processes)
            config_env.DATASET.SPLIT = 'val'
            if val_scenes is not None:
                config_env.DATASET.POINTNAVV1.CONTENT_SCENES = val_scenes
            else:
                config_env.DATASET.POINTNAVV1.CONTENT_SCENES = get_scenes(
                    val_datasets[val_i])

        print("Env {}:".format(i),
              config_env.DATASET.POINTNAVV1.CONTENT_SCENES)

        config_env.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = gpu_devices[
            i % len(gpu_devices)]
        config_env.SIMULATOR.SCENE = os.path.join(habitat_path,
                                                  config_env.SIMULATOR.SCENE)
        config_env.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR"]

        # Now define the config for the sensor
        #         config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
        #         config.TASK.AGENT_POSITION_SENSOR.TYPE = "agent_position_sensor"
        #         config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")

        config_env.TASK.MEASUREMENTS.append('COLLISIONS')

        config_env.freeze()
        env_configs.append(config_env)
        config_baseline = cfg_baseline()
        baseline_configs.append(config_baseline)
        encoders.append(preprocessing_fn)
        target_dims.append(target_dim)

    should_record = [(i == 0 or i == (num_processes - num_val_processes))
                     for i in range(num_processes)]
    if debug_mode:
        env = make_env_fn(
            scenario,
            env_configs[0],
            baseline_configs[0],
            0,
            0,
            1,
            target_dim,
            log_dir,
            visdom_name,
            visdom_log_file,
            vis_interval,
            visdom_server,
            visdom_port,
            swap_building_k_episodes,
            map_kwargs,
            reward_kwargs,
            False,
            seed,  # TODO set should_record to True
            test_mode,
            (train_datasets + val_datasets)[0],
            scenario_kwargs)
        envs = PreprocessEnv(env, preprocessing_fn=preprocessing_fn)
    else:
        envs = HabitatPreprocessVectorEnv(
            make_env_fn=make_env_fn,
            env_fn_args=tuple(
                tuple(
                    zip(
                        [scenario for _ in range(num_processes)],
                        env_configs,
                        baseline_configs,
                        range(num_processes),
                        [num_val_processes for _ in range(num_processes)],
                        [num_processes for _ in range(num_processes)],
                        target_dims,
                        [log_dir for _ in range(num_processes)],
                        [visdom_name for _ in range(num_processes)],
                        [visdom_log_file for _ in range(num_processes)],
                        [vis_interval for _ in range(num_processes)],
                        [visdom_server for _ in range(num_processes)],
                        [visdom_port for _ in range(num_processes)],
                        [
                            swap_building_k_episodes
                            for _ in range(num_processes)
                        ],
                        [map_kwargs for _ in range(num_processes)],
                        [reward_kwargs for _ in range(num_processes)],
                        should_record,
                        [seed + i for i in range(num_processes)],
                        [test_mode for _ in range(num_processes)],
                        train_datasets + val_datasets,
                        [scenario_kwargs for _ in range(num_processes)],
                    ))),
            preprocessing_fn=preprocessing_fn,
        )
        envs.observation_space = envs.observation_spaces[0]
    envs.action_space = spaces.Discrete(3)
    envs.reward_range = None
    envs.metadata = None
    envs.is_embodied = True
    return envs
def make_env_fn(scenario, config_env, config_baseline, rank, num_val_processes,
                num_processes, target_dim, log_dir, visdom_name,
                visdom_log_file, vis_interval, visdom_server, visdom_port,
                swap_building_k_episodes, map_kwargs, reward_kwargs,
                should_record, seed, test_mode, dataset, scenario_kwargs):
    if config_env.DATASET.SPLIT == 'train':
        dataset = PointNavDatasetV1(config_env.DATASET)

    habitat_path = os.path.dirname(os.path.dirname(habitat.__file__))
    for ep in dataset.episodes:
        ep.scene_id = os.path.join(habitat_path, ep.scene_id)

    config_env.defrost()
    config_env.SIMULATOR.SCENE = dataset.episodes[0].scene_id
    config_env.freeze()

    if scenario == 'PointNav':
        dataset.episodes = [
            epi for epi in dataset.episodes if epi.info['geodesic_distance'] <
            scenario_kwargs['max_geodesic_dist']
        ]
        env = MidlevelNavRLEnv(config_env=config_env,
                               config_baseline=config_baseline,
                               dataset=dataset,
                               target_dim=target_dim,
                               map_kwargs=map_kwargs,
                               reward_kwargs=reward_kwargs,
                               loop_episodes=not test_mode,
                               scenario_kwargs=scenario_kwargs)
    elif scenario == 'Exploration':
        env = ExplorationRLEnv(config_env=config_env,
                               config_baseline=config_baseline,
                               dataset=dataset,
                               map_kwargs=map_kwargs,
                               reward_kwargs=reward_kwargs,
                               loop_episodes=not test_mode,
                               scenario_kwargs=scenario_kwargs)
    else:
        assert False, f'do not recognize scenario {scenario}'

    if test_mode:
        env.episodes = env.episodes
    else:
        env.episodes = shuffle_episodes(env,
                                        swap_every_k=swap_building_k_episodes)

    env.seed(seed)
    if should_record and visdom_log_file is not None:
        print(
            f"Recording videos from env {rank} every {vis_interval} episodes (via visdom)"
        )
        env = VisdomMonitor(env,
                            directory=os.path.join(log_dir, visdom_name),
                            video_callable=lambda x: x % vis_interval == 0,
                            uid=str(rank),
                            server=visdom_server,
                            port=visdom_port,
                            visdom_log_file=visdom_log_file,
                            visdom_env=visdom_name)

    return env
def test_shortest_path_follower_example():
    if not PointNavDatasetV1.check_config_paths_exist(
            config=habitat.get_config().DATASET):
        pytest.skip("Please download Habitat test data to data folder.")
    shortest_path_follower_example.main()