Example #1
0
def test_threaded_vectorized_env():
    configs, datasets = _load_test_data()
    num_envs = len(configs)
    env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
    with habitat.ThreadedVectorEnv(env_fn_args=env_fn_args) as envs:
        envs.reset()

        for _ in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
            observations = envs.step(
                sample_non_stop_action(envs.action_spaces[0], num_envs)
            )
            assert len(observations) == num_envs
Example #2
0
def test_threaded_vectorized_env():
    configs, datasets = _load_test_data()
    num_envs = len(configs)
    env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
    envs = habitat.ThreadedVectorEnv(env_fn_args=env_fn_args)
    envs.reset()
    non_stop_actions = [
        act for act in range(envs.action_spaces[0].n)
        if act != SimulatorActions.STOP
    ]

    for i in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
        observations = envs.step(np.random.choice(non_stop_actions, num_envs))
        assert len(observations) == num_envs

    envs.close()
Example #3
0
def test_threaded_vectorized_env():
    configs, datasets = _load_test_data()
    num_envs = len(configs)
    env_fn_args = tuple(zip(configs, datasets, range(num_envs)))
    envs = habitat.ThreadedVectorEnv(env_fn_args=env_fn_args)
    envs.reset()
    non_stop_actions = [
        k for k, v in SIM_ACTION_TO_NAME.items()
        if v != SimulatorActions.STOP.value
    ]

    for i in range(2 * configs[0].ENVIRONMENT.MAX_EPISODE_STEPS):
        observations = envs.step(np.random.choice(non_stop_actions, num_envs))
        assert len(observations) == num_envs

    envs.close()
Example #4
0
def make_task_envs(env_types, nav_configs, nav_datasets, shell_args):
    data_keys = list(nav_datasets.keys())
    nav_datasets = [{key: nav_datasets[key][ii]
                     for key in data_keys}
                    for ii in range(len(nav_datasets[data_keys[0]]))]
    env_fn_args: Tuple[Tuple] = tuple(
        zip(env_types, nav_configs, nav_datasets,
            range(shell_args.seed, shell_args.seed + len(nav_configs))))

    if shell_args.use_multithreading:
        envs = habitat.ThreadedVectorEnv(make_env_fn, env_fn_args)
    else:
        envs = habitat.VectorEnv(make_env_fn,
                                 env_fn_args,
                                 multiprocessing_start_method="forkserver")
    envs = HabitatVecEnvWrapper(envs)
    return envs
Example #5
0
def construct_envs(
    config: Config,
    env_class: Union[Type[Env], Type[RLEnv]],
    workers_ignore_signals: bool = False,
) -> VectorEnv:
    r"""Create VectorEnv object with specified config and env class type.
    To allow better performance, dataset are split into small ones for
    each individual env, grouped by scenes.

    :param config: configs that contain num_environments as well as information
    :param necessary to create individual environments.
    :param env_class: class type of the envs to be created.
    :param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor

    :return: VectorEnv object created according to specification.
    """

    num_environments = config.NUM_ENVIRONMENTS
    configs = []
    env_classes = [env_class for _ in range(num_environments)]
    dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE)
    scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
    if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
        scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)

    if num_environments > 1:
        if len(scenes) == 0:
            raise RuntimeError(
                "No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes"
            )

        if len(scenes) < num_environments:
            # Hack to train with just one scene
            assert len(scenes) == 1
            scenes = [scenes[0] for _ in range(num_environments)]
            #raise RuntimeError(
            #    "reduce the number of processes as there "
            #    "aren't enough number of scenes"
            #)

        random.shuffle(scenes)

    scene_splits: List[List[str]] = [[] for _ in range(num_environments)]
    for idx, scene in enumerate(scenes):
        scene_splits[idx % len(scene_splits)].append(scene)

    assert sum(map(len, scene_splits)) == len(scenes)

    for i in range(num_environments):
        proc_config = config.clone()
        proc_config.defrost()

        task_config = proc_config.TASK_CONFIG
        task_config.SEED = task_config.SEED + i
        if len(scenes) > 0:
            task_config.DATASET.CONTENT_SCENES = scene_splits[i]

        task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
            config.SIMULATOR_GPU_ID)

        task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS

        proc_config.freeze()
        configs.append(proc_config)

    if config.IS_DEBUG_MODE or num_environments == 1:
        envs = habitat.ThreadedVectorEnv(
            make_env_fn=make_env_fn,
            env_fn_args=tuple(zip(configs, env_classes)),
            workers_ignore_signals=workers_ignore_signals,
        )
    else:
        envs = habitat.VectorEnv(
            make_env_fn=make_env_fn,
            env_fn_args=tuple(zip(configs, env_classes)),
            workers_ignore_signals=workers_ignore_signals,
        )
    return envs