def main():
    # Get the default config node
    config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml")
    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    with habitat.Env(config=config) as env:
        print(env.reset()["agent_position"])
        print(env.get_metrics()["episode_info"])
        print(env.step("MOVE_FORWARD")["agent_position"])
        print(env.get_metrics()["episode_info"])
Beispiel #2
0
def get_config(training: bool = False,
               top_down_map: bool = False,
               max_steps: Optional[Union[int, float]] = None,
               task: str = 'pointnav',
               train_dataset: str = 'habitat_test',
               train_split: str = 'train',
               eval_dataset: str = 'habitat_test',
               eval_split: str = 'val',
               gpu_id: int = 0,
               image_key: str = 'rgb',
               goal_key: str = 'pointgoal_with_gps_compass',
               depth_key: Optional[str] = None,
               reward_function: RewardFunction = gin.REQUIRED,
               eval_episodes_per_scene: int = 3) -> Dict[str, Any]:
    mode = 'train' if training else 'eval'
    dataset = train_dataset if training else eval_dataset
    split = train_split if training else eval_split
    opts = ['SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID', gpu_id]
    if max_steps:
        opts += ['ENVIRONMENT.MAX_EPISODE_STEPS', int(max_steps)]
    opts += ['DATASET.SPLIT', split]
    if not training:
        opts += ['ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE', False]
        opts += [
            'ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_EPISODES',
            eval_episodes_per_scene
        ]
    if not task.endswith('.yaml'):
        task = f'{get_config_dir()}/habitat/tasks/{task}.yaml'
    if not dataset.endswith('.yaml'):
        dataset = f'{get_config_dir()}/habitat/datasets/{dataset}.yaml'
    task_file = Path(wandb.run.dir) / 'task.yaml'
    dataset_file = Path(wandb.run.dir) / f'{mode}_dataset.yaml'
    copyfile(task, task_file)
    copyfile(dataset, dataset_file)
    wandb.save(str(task_file))
    wandb.save(str(dataset_file))
    if not HabitatSimActions.has_action('TURN_ANGLE'):
        HabitatSimActions.extend_action_space('TURN_ANGLE')
    config = habitat.get_config([task, dataset], opts)
    config.defrost()
    config.TASK.SUCCESS = habitat.Config()
    config.TASK.SUCCESS.TYPE = 'Success'
    config.TASK.SUCCESS.SUCCESS_DISTANCE = config.TASK.SUCCESS_DISTANCE
    config.TASK.MEASUREMENTS.append('SUCCESS')
    config.TASK.ACTIONS.TURN_ANGLE = habitat.Config()
    config.TASK.ACTIONS.TURN_ANGLE.TYPE = 'TurnAngleAction'
    config.TASK.POSSIBLE_ACTIONS = ['STOP', 'MOVE_FORWARD', 'TURN_ANGLE']
    if top_down_map and 'TOP_DOWN_MAP' not in config.TASK.MEASUREMENTS:
        # Top-down map is expensive to compute, so we only enable it when needed.
        config.TASK.MEASUREMENTS.append('TOP_DOWN_MAP')
    config.freeze()
    return {
        'config': config,
        'image_key': image_key,
        'goal_key': goal_key,
        'depth_key': depth_key,
        'reward_function': reward_function
    }
Beispiel #3
0
def add_panoramic_camera(task_config):
    task_config.SIMULATOR.RGB_SENSOR_LEFT = task_config.SIMULATOR.RGB_SENSOR.clone(
    )
    task_config.SIMULATOR.RGB_SENSOR_LEFT.TYPE = "PanoramicPartRGBSensor"
    task_config.SIMULATOR.RGB_SENSOR_LEFT.ORIENTATION = [0, 2 / 3 * np.pi, 0]
    task_config.SIMULATOR.RGB_SENSOR_LEFT.ANGLE = "left"
    task_config.SIMULATOR.RGB_SENSOR_RIGHT = task_config.SIMULATOR.RGB_SENSOR.clone(
    )
    task_config.SIMULATOR.RGB_SENSOR_RIGHT.TYPE = "PanoramicPartRGBSensor"
    task_config.SIMULATOR.RGB_SENSOR_RIGHT.ORIENTATION = [0, -2 / 3 * np.pi, 0]
    task_config.SIMULATOR.RGB_SENSOR_RIGHT.ANGLE = "right"
    task_config.SIMULATOR.AGENT_0.SENSORS = [
        'RGB_SENSOR', 'RGB_SENSOR_LEFT', 'RGB_SENSOR_RIGHT'
    ]

    task_config.SIMULATOR.DEPTH_SENSOR_LEFT = task_config.SIMULATOR.DEPTH_SENSOR.clone(
    )
    task_config.SIMULATOR.DEPTH_SENSOR_LEFT.TYPE = "PanoramicPartDepthSensor"
    task_config.SIMULATOR.DEPTH_SENSOR_LEFT.ORIENTATION = [0, 2 / 3 * np.pi, 0]
    task_config.SIMULATOR.DEPTH_SENSOR_LEFT.ANGLE = "left"
    task_config.SIMULATOR.DEPTH_SENSOR_RIGHT = task_config.SIMULATOR.DEPTH_SENSOR.clone(
    )
    task_config.SIMULATOR.DEPTH_SENSOR_RIGHT.TYPE = "PanoramicPartDepthSensor"
    task_config.SIMULATOR.DEPTH_SENSOR_RIGHT.ORIENTATION = [
        0, -2 / 3 * np.pi, 0
    ]
    task_config.SIMULATOR.DEPTH_SENSOR_RIGHT.ANGLE = "right"
    task_config.SIMULATOR.AGENT_0.SENSORS += [
        'DEPTH_SENSOR', 'DEPTH_SENSOR_LEFT', 'DEPTH_SENSOR_RIGHT'
    ]

    task_config.TASK.CUSTOM_VISTARGET_SENSOR = habitat.Config()
    task_config.TASK.CUSTOM_VISTARGET_SENSOR.TYPE = 'CustomVisTargetSensor'

    task_config.TASK.PANORAMIC_SENSOR = habitat.Config()
    task_config.TASK.PANORAMIC_SENSOR.TYPE = 'PanoramicRGBSensor'
    task_config.TASK.PANORAMIC_SENSOR.WIDTH = task_config.SIMULATOR.RGB_SENSOR.WIDTH
    task_config.TASK.PANORAMIC_SENSOR.HEIGHT = task_config.SIMULATOR.RGB_SENSOR.HEIGHT
    task_config.TASK.PANORAMIC_DEPTH_SENSOR = task_config.SIMULATOR.DEPTH_SENSOR.clone(
    )
    task_config.TASK.PANORAMIC_DEPTH_SENSOR.TYPE = 'PanoramicDepthSensor'
    task_config.TASK.PANORAMIC_DEPTH_SENSOR.WIDTH = task_config.SIMULATOR.DEPTH_SENSOR.WIDTH
    task_config.TASK.PANORAMIC_DEPTH_SENSOR.HEIGHT = task_config.SIMULATOR.DEPTH_SENSOR.HEIGHT

    if "STOP" not in task_config.TASK.POSSIBLE_ACTIONS:
        task_config.TASK.SUCCESS.TYPE = "Success_woSTOP"
    task_config.TASK.SUCCESS.SUCCESS_DISTANCE = task_config.TASK.SUCCESS_DISTANCE

    return task_config
Beispiel #4
0
def main():
    config = habitat.get_config(config_paths="/home/rene/catkin_ws/src/habitat-api/configs/tasks/pointnav_rgbd.yaml")

    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO.TYPE = "EpisodeInfo"
    config.TASK.EPISODE_INFO.VALUE = 5

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")


    # Now define the config for the sensor
    config.TASK.SEMANTIC_SEGMENTATION_MASK_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.SEMANTIC_SEGMENTATION_MASK_SENSOR.TYPE = "semantic_segmentation_mask"
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("SEMANTIC_SEGMENTATION_MASK_SENSOR")

    config.freeze()

    my_env = sim_env(config)
    # start the thread that publishes sensor readings
    my_env.start()

    rospy.Subscriber("/cmd_vel", Twist, callback, (my_env), queue_size=1)
    # define a list capturing how long it took
    # to update agent orientation for past 3 instances
    # TODO modify dt_list to depend on r1
    dt_list = [0.009, 0.009, 0.009]
    while not rospy.is_shutdown():

        start_time = time.time()
        # cv2.imshow("bc_sensor", my_env.observations['bc_sensor'])
        # cv2.waitKey(100)
        # time.sleep(0.1)
        my_env.update_orientation()

        dt_list.insert(0, time.time() - start_time)
        dt_list.pop()
        my_env.set_dt(sum(dt_list) / len(dt_list))
def config_custom(path="configs/tasks/pointnav_my.yaml"):
    config = habitat.get_config(path)
    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    return config
Beispiel #6
0
def run_exp(exp_config: str,
            run_type: str,
            ckpt_path="",
            run_id=None,
            run_suffix="",
            opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        ckpt_path: If training, ckpt to resume. If evaluating, ckpt to evaluate.
        run_id: If using slurm batch, run id to prefix.s
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    # Add tracking of the number of episodes
    config.defrost()

    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK_CONFIG.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")
    config.freeze()

    variant_name = os.path.split(exp_config)[1].split('.')[0]
    config.defrost()
    if run_suffix != "" and run_suffix is not None:
        variant_name = f"{variant_name}-{run_suffix}"

    if not osp.exists(config.LOG_FILE):
        os.makedirs(config.LOG_FILE)

    config.TENSORBOARD_DIR = os.path.join(config.TENSORBOARD_DIR, variant_name)
    config.CHECKPOINT_FOLDER = os.path.join(config.CHECKPOINT_FOLDER,
                                            variant_name)
    config.VIDEO_DIR = os.path.join(config.VIDEO_DIR, variant_name)
    config.LOG_FILE = os.path.join(config.LOG_FILE,
                                   f"{variant_name}.log")  # actually a logdir
    if run_type == "eval":
        # config.TRAINER_NAME = "ppo"
        config.NUM_PROCESSES = 6  # nice
    else:
        # Add necessary supervisory signals
        train_sensors = config.RL.AUX_TASKS.required_sensors
        config.SENSORS.extend(
            train_sensors)  # the task cfg sensors are overwritten by this one

    if run_id is None:
        random.seed(config.TASK_CONFIG.SEED)
        np.random.seed(config.TASK_CONFIG.SEED)
        trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
        assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
        trainer = trainer_init(config)

        # If not doing multiple runs (with run_id), default behavior is to overwrite
        if run_type == "train":
            if ckpt_path is not None:
                ckpt_dir, ckpt_file = os.path.split(ckpt_path)
                ckpt_index = ckpt_file.split('.')[1]
                ckpt = int(ckpt_index)
                start_updates = ckpt * config.CHECKPOINT_INTERVAL + 1
                trainer.train(ckpt_path=ckpt_path,
                              ckpt=ckpt,
                              start_updates=start_updates)
            elif not DO_PRESERVE_RUNS:
                # if os.path.exists(config.TENSORBOARD_DIR):
                #     print("Removing tensorboard directory...")
                #     shutil.rmtree(config.TENSORBOARD_DIR, ignore_errors=True)
                # if os.path.exists(config.CHECKPOINT_FOLDER):
                #     print("Removing checkpoint folder...")
                #     shutil.rmtree(config.CHECKPOINT_FOLDER, ignore_errors=True)
                # if os.path.exists(config.LOG_FILE):
                #     print("Removing log file...")
                #     shutil.rmtree(config.LOG_FILE, ignore_errors=True)
                trainer.train()
            else:
                # if os.path.exists(config.TENSORBOARD_DIR) or os.path.exists(config.CHECKPOINT_FOLDER) \
                #     or os.path.exists(config.LOG_FILE):
                #     print(f"TB dir exists: {os.path.exists(config.TENSORBOARD_DIR)}")
                #     print(f"Ckpt dir exists: {os.path.exists(config.CHECKPOINT_FOLDER)}")
                #     print(f"Log file exists: {os.path.exists(config.LOG_FILE)}")
                #     print("Run artifact exists, please clear manually")
                #     exit(1)
                trainer.train()
        elif run_type == "eval":
            trainer.eval(ckpt_path)
        return

    run_prefix = f'run_{run_id}'
    seed = run_id
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(config.TASK_CONFIG.SEED)

    # Exetnds off old modifications
    tb_dir = os.path.join(config.TENSORBOARD_DIR, run_prefix)
    ckpt_dir = os.path.join(config.CHECKPOINT_FOLDER, run_prefix)
    log_dir, log_file = os.path.split(config.LOG_FILE)
    log_file_extended = f"{run_prefix}--{log_file}"
    log_file_path = os.path.join(log_dir, log_file_extended)

    config.TASK_CONFIG.SEED = seed
    config.TENSORBOARD_DIR = tb_dir
    config.CHECKPOINT_FOLDER = ckpt_dir
    config.LOG_FILE = log_file_path

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)
    if run_type == "train":
        if ckpt_path is None:
            # if DO_PRESERVE_RUNS and (os.path.exists(tb_dir) or os.path.exists(ckpt_dir) or os.path.exists(log_file_extended)):
            #     print(f"TB dir exists: {os.path.exists(tb_dir)}")
            #     print(f"Ckpt dir exists: {os.path.exists(ckpt_dir)}")
            #     print(f"Log file exists: {os.path.exists(log_file_extended)}")
            #     print("Run artifact exists, please clear manually")
            #     exit(1)
            # else:
            #     shutil.rmtree(tb_dir, ignore_errors=True)
            #     shutil.rmtree(ckpt_dir, ignore_errors=True)
            #     if os.path.exists(log_file_extended):
            #         os.remove(log_file_extended)
            trainer.train()
        else:  # Resume training from checkpoint
            # Parse the checkpoint #, calculate num updates, update the config
            ckpt_dir, ckpt_file = os.path.split(ckpt_path)
            ckpt_index = ckpt_file.split('.')[1]
            true_path = os.path.join(ckpt_dir, run_prefix,
                                     f"{run_prefix}.{ckpt_index}.pth")
            ckpt = int(ckpt_index)
            start_updates = ckpt * config.CHECKPOINT_INTERVAL + 1
            trainer.train(ckpt_path=true_path,
                          ckpt=ckpt,
                          start_updates=start_updates)
    else:
        ckpt_dir, ckpt_file = os.path.split(ckpt_path)
        ckpt_index = ckpt_file.split('.')[1]
        true_path = os.path.join(ckpt_dir, run_prefix,
                                 f"{run_prefix}.{ckpt_index}.pth")
        trainer.eval(true_path)
Beispiel #7
0
def test_noise_models_rgbd():
    DEMO_MODE = False
    N_STEPS = 100

    config = get_config()
    config.defrost()
    config.SIMULATOR.SCENE = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    valid_start_position = [-1.3731, 0.08431, 8.60692]

    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]
    test_episode = NavigationEpisode(
        episode_id="0",
        scene_id=config.SIMULATOR.SCENE,
        start_position=valid_start_position,
        start_rotation=start_rotation,
        goals=[NavigationGoal(position=goal_position)],
    )

    print(f"{test_episode}")
    with habitat.Env(config=config, dataset=None) as env:

        env.episode_iterator = iter([test_episode])
        no_noise_obs = [env.reset()]
        no_noise_states = [env.sim.get_agent_state()]

        actions = [
            sample_non_stop_action(env.action_space) for _ in range(N_STEPS)
        ]
        for action in actions:
            no_noise_obs.append(env.step(action))
            no_noise_states.append(env.sim.get_agent_state())
        env.close()

        config.defrost()

        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL = "GaussianNoiseModel"
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.INTENSITY_CONSTANT = 0.5
        config.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL = "RedwoodDepthNoiseModel"

        config.SIMULATOR.ACTION_SPACE_CONFIG = "pyrobotnoisy"
        config.SIMULATOR.NOISE_MODEL = habitat.Config()
        config.SIMULATOR.NOISE_MODEL.ROBOT = "LoCoBot"
        config.SIMULATOR.NOISE_MODEL.CONTROLLER = "Proportional"
        config.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER = 0.5

        config.freeze()

        env = habitat.Env(config=config, dataset=None)

        env.episode_iterator = iter([test_episode])

        obs = env.reset()
        assert np.linalg.norm(
            obs["rgb"].astype(np.float) -
            no_noise_obs[0]["rgb"].astype(np.float)) > 1.5e-2 * np.linalg.norm(
                no_noise_obs[0]["rgb"].astype(
                    np.float)), "No RGB noise detected."

        assert np.linalg.norm(obs["depth"].astype(np.float) -
                              no_noise_obs[0]["depth"].astype(np.float)
                              ) > 1.5e-2 * np.linalg.norm(
                                  no_noise_obs[0]["depth"].astype(
                                      np.float)), "No Depth noise detected."

        images = []
        state = env.sim.get_agent_state()
        angle_diffs = []
        pos_diffs = []
        for action in actions:
            prev_state = state
            obs = env.step(action)
            state = env.sim.get_agent_state()
            position_change = np.linalg.norm(np.array(state.position) -
                                             np.array(prev_state.position),
                                             ord=2)

            if action["action"][:5] == "TURN_":
                angle_diff = abs(
                    angle_between_quaternions(state.rotation,
                                              prev_state.rotation) -
                    np.deg2rad(config.SIMULATOR.TURN_ANGLE))
                angle_diffs.append(angle_diff)
            else:
                pos_diffs.append(
                    abs(position_change - config.SIMULATOR.FORWARD_STEP_SIZE))

            if DEMO_MODE:
                images.append(observations_to_image(obs, {}))

        if DEMO_MODE:
            images_to_video(images, "data/video/test_noise", "test_noise")

        assert (np.mean(angle_diffs) >
                0.025), "No turn action actuation noise detected."
        assert (np.mean(pos_diffs) >
                0.025), "No forward action actuation noise detected."
Beispiel #8
0
        )

    # This is called whenver reset is called or an action is taken
    def get_observation(self, observations, *args, episode, **kwargs):
        return self._sim.get_agent_state().position


# %%
if __name__ == "__main__":
    config = habitat.get_config(
        config_paths="./configs/test/habitat_all_sensors_test.yaml"
    )

    config.defrost()
    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "agent_position_sensor"
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    try:
        env.close()
    except NameError:
        pass
    env = habitat.Env(config=config)

    # %%
    obs = env.reset()
def example():
    config = habitat.get_config("configs/tasks/pointnav_my.yaml")
    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    if len(sys.argv) > 1:
        max_depth = int(sys.argv[1])
    else:
        max_depth = 3
    env = habitat.Env(config=config)
    with torch.no_grad():
        agent = MBTSAgent(max_depth=max_depth)

    print("Environment creation successful")
    observations = env.reset()

    cv2.imshow("",
               observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Agent stepping around inside environment.")
    print("==========================================")
    print("Search tree depth : {}".format(max_depth))
    #     print(env.observation_space)
    count_steps = 0
    view_flag = False
    action = None
    while not env.episode_over:
        keystroke = cv2.waitKey(0)

        if keystroke == ord(RESET):
            observations = env.reset()
            count_steps = 0
            view_flag = False
            action = None
            cv2.imshow("", observations["depth"])
        if keystroke == ord(FORWARD_KEY):
            action = 1  #habitat.SimulatorActions.MOVE_FORWARD
            print("action: FORWARD")
            view_flag = False
        elif keystroke == ord(LEFT_KEY):
            action = 2  #habitat.SimulatorActions.TURN_LEFT
            print("action: LEFT")
            view_flag = False
        elif keystroke == ord(RIGHT_KEY):
            action = 3  #habitat.SimulatorActions.TURN_RIGHT
            print("action: RIGHT")
            view_flag = False
        elif keystroke == ord(FINISH):
            action = 0  #habitat.SimulatorActions.STOP
            print("action: FINISH")
            view_flag = False
        elif keystroke == ord(AGENT):
            state_depth = observations['depth'].swapaxes(0, 2).swapaxes(1, 2)
            #             action = agent.greedy_policy(state_depth)
            action, q = agent.predict(state_depth)
            view_flag = False
            print("Agent action: {}".format(ACTION_DICT_REVERSE[action]))


#             print(q)
        elif keystroke == ord(VIEW_FORWARD_KEY):
            agent.show_future(0)
            continue
        elif keystroke == ord(VIEW_LEFT_KEY):
            agent.show_future(1)
            continue
        elif keystroke == ord(VIEW_RIGHT_KEY):
            agent.show_future(2)
            continue
        elif keystroke == ord(VIEW_FINISH):
            continue
        elif keystroke == ord(VIEW_AGENT):
            agent.show_future()
            continue
        else:
            print("INVALID KEY")
            continue

        observations = env.step(action)
        count_steps += 1
        print("x: {}, y: {}".format(observations["agent_position"][0],
                                    observations["agent_position"][2]))
        cv2.imshow(
            "",
            observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Episode finished after {} steps.".format(count_steps))

    if action == 0:
        pos_x, pos_y = observations['agent_position'][0], observations[
            'agent_position'][2]
        goal_x, goal_y = 2, 12
        dist = math.sqrt((pos_x - goal_x)**2 + (pos_y - goal_y)**2)
        if dist < 2:
            print("you successfully navigated to destination point")
        else:
            print(dist)
            print("your navigation was unsuccessful")
Beispiel #10
0
def habitat_objectnav_make_env_fn(cfg,
                                  rank,
                                  log_dir,
                                  visdom_name='main',
                                  visdom_log_file=None,
                                  vis_interval=200,
                                  visdom_server='localhost',
                                  visdom_port='8097'):

    habitat_cfg = get_config(cfg.task.habitat.config_file)

    training_scenes = [
        'PX4nDJXEHrG', '5q7pvUzZiYa', 'S9hNv5qa7GM', 'ac26ZMwG7aT',
        '29hnd4uzFmX', '82sE5b5pLXE', 'p5wJjkQkbXX', 'B6ByNegPMKs',
        '17DRP5sb8fy', 'pRbA3pwrgk9', 'gZ6f7yhEvPG', 'HxpKQynjfin',
        'ZMojNkEp431', '5LpN3gDmAk7', 'dhjEzFoUFzH', 'vyrNrziPKCB',
        'sKLMLpTHeUy', '759xd9YjKW5', 'sT4fr6TAbpF', '1pXnuDYAj8r',
        'E9uDoFAP3SH', 'GdvgFV5R1Z5', 'rPc6DW4iMge', 'D7N2EKCX4Sj',
        'uNb9QFRL6hY', 'VVfe2KiqLaN', 'Vvot9Ly1tCj', 's8pcmisQ38h',
        'EDJbREhghzL', 'YmJkqBEsHnH', 'XcA2TqTSSAj', '7y3sRwLe3Va',
        'e9zR4mvMWw7', 'JeFG25nYj2p', 'VLzqgDo317F', 'kEZ7cmS4wCh',
        'r1Q1Z4BcV1o', 'qoiz87JEwZ2', '1LXtFkjw3qL', 'VFuaQ6m2Qom',
        'b8cTxDM8gDG', 'ur6pFq6Qu1A', 'V2XKFyX4ASd', 'Uxmj2M2itWa',
        'Pm6F8kyY3z2', 'PuKPg4mmafe', '8WUmhLawc2A', 'ULsKaCPVFJR',
        'r47D5H71a5s', 'jh4fc5c5qoQ', 'JF19kD82Mey', 'D7G3Y4RVNrH',
        'cV4RVeZvu5T', 'mJXqzFtmKg4', 'i5noydFURQK', 'aayBHfsNo7d'
    ]

    length = int(len(training_scenes) / cfg.training.num_envs)

    habitat_cfg.defrost()
    if rank < cfg.training.num_envs:
        habitat_cfg.DATASET.SPLIT = 'train'
    else:
        habitat_cfg.DATASET.SPLIT = 'val'

    cfg.defrost()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR = habitat.Config()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.TYPE = 'CustomObjectSensor'
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.GOAL_SPEC = "OBJECT_IMG"
    habitat_cfg.DATASET.CONTENT_SCENES = training_scenes[rank *
                                                         length:(rank + 1) *
                                                         length]
    habitat_cfg.freeze()
    cfg.task.habitat.TASK_CONFIG = habitat_cfg
    cfg.freeze()

    def filter_fn(episode):
        if episode.info['geodesic_distance'] > 3.0: return False
        else: return True

    dataset = make_dataset(habitat_cfg.DATASET.TYPE,
                           config=habitat_cfg.DATASET,
                           **{'filter_fn': filter_fn})
    env = ObjectNavENV(cfg, dataset=dataset)
    env.seed(rank)
    env = VisdomMonitor(env,
                        directory=os.path.join(log_dir, visdom_name),
                        video_callable=lambda x: x % vis_interval == 0,
                        uid=str(rank),
                        server=visdom_server,
                        port=visdom_port,
                        visdom_log_file=visdom_log_file,
                        visdom_env=visdom_name)

    return env
Beispiel #11
0
    from envs.habitat_objectnav_env import ObjectNavENV
    from habitat.config.default import get_config as get_habitat_config
    from configs.default_cfg import get_config
    import habitat
    from habitat import make_dataset
    import numpy as np
    import envs.habitat_utils
    import time
    import psutil

    pid = os.getpid()
    current_process = psutil.Process(pid)

    habitat_cfg = get_habitat_config('configs/objectnav_mp3d.yaml')
    habitat_cfg.defrost()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR = habitat.Config()
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.TYPE = 'CustomObjectSensor'
    habitat_cfg.TASK.CUSTOM_OBJECT_GOAL_SENSOR.GOAL_SPEC = "OBJECT_IMG"
    habitat_cfg.freeze()
    cfg = get_config()
    cfg.defrost()
    cfg.task.habitat.TASK_CONFIG = habitat_cfg
    cfg.freeze()

    training_scenes = [
        'PX4nDJXEHrG', '5q7pvUzZiYa', 'S9hNv5qa7GM', 'ac26ZMwG7aT',
        '29hnd4uzFmX', '82sE5b5pLXE', 'p5wJjkQkbXX', 'B6ByNegPMKs',
        '17DRP5sb8fy', 'pRbA3pwrgk9', 'gZ6f7yhEvPG', 'HxpKQynjfin',
        'ZMojNkEp431', '5LpN3gDmAk7', 'dhjEzFoUFzH', 'vyrNrziPKCB',
        'sKLMLpTHeUy', '759xd9YjKW5', 'sT4fr6TAbpF', '1pXnuDYAj8r',
        'E9uDoFAP3SH', 'GdvgFV5R1Z5', 'rPc6DW4iMge', 'D7N2EKCX4Sj',
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--task-config", type=str, default="configs/tasks/pointnav.yaml")
    parser.add_argument("--publish-odom", type=bool, default=True)
    parser.add_argument("--create-map", type=bool, default=False)
    parser.add_argument("--save-observations", type=bool, default=False)
    parser.add_argument("--preset-trajectory", type=bool, default=False)
    args = parser.parse_args()
    # Now define the config for the sensor
    config = habitat.get_config(args.task_config)
    config.defrost()
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "position_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()
    max_depth = config.SIMULATOR.DEPTH_SENSOR.MAX_DEPTH
    print(args.create_map)

    agent = KeyboardAgent(args.save_observations)
    env = habitat.Env(config=config)
    if args.create_map:
        print('create map')
        top_down_map = maps.get_topdown_map(env.sim, map_resolution=(5000, 5000))
        print(top_down_map.min(), top_down_map.mean(), top_down_map.max())
        recolor_map = np.array([[0, 0, 0], [128, 128, 128], [255, 255, 255]], dtype=np.uint8)
        range_x = np.where(np.any(top_down_map, axis=1))[0]
        range_y = np.where(np.any(top_down_map, axis=0))[0]
        padding = int(np.ceil(top_down_map.shape[0] / 125))
        range_x = (
            max(range_x[0] - padding, 0),
            min(range_x[-1] + padding + 1, top_down_map.shape[0]),
        )
        range_y = (
            max(range_y[0] - padding, 0),
            min(range_y[-1] + padding + 1, top_down_map.shape[1]),
        )
        top_down_map = top_down_map[
            range_x[0] : range_x[1], range_y[0] : range_y[1]
        ]
        top_down_map = recolor_map[top_down_map]
        imsave('top_down_map.png', top_down_map)
    observations = env.reset()
    if not args.preset_trajectory:
        while not keyboard.is_pressed('q'):
            action = agent.act(observations)
            observations = env.step(action)
    else:
        fin = open('actions.txt', 'r')
        actions = [int(x) for x in fin.readlines()]
        for action in actions:
            agent.act(observations)
            observations = env.step(action)
        fin.close()
    if args.save_observations:
        with h5py.File('observations.hdf5', 'w') as f:
            f.create_dataset("points", data=np.array(agent.points))
            f.create_dataset("true_positions", data=np.array(agent.true_positions))
            f.create_dataset("true_rotations", data=np.array(agent.true_rotations))
            f.create_dataset("predicted_positions", data=np.array(agent.predicted_positions))
            f.create_dataset("predicted_rotations", data=np.array(agent.predicted_rotations))
            f.create_dataset("rgb", data=np.array(agent.rgbs))
            f.create_dataset("depth", data=np.array(agent.depths))
            f.create_dataset("true_timestamps", data=np.array(agent.true_timestamps))
            f.create_dataset("predicted_timestamps", data=np.array(agent.predicted_timestamps))
        print(np.array(agent.points).shape, np.array(agent.rgbs).shape)
        fout = open('actions.txt', 'w')
        for action in agent.actions:
            print(action, file=fout)
        fout.close()
Beispiel #13
0
def example():
    config = habitat.get_config("configs/tasks/pointnav_my.yaml")
    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    env = habitat.Env(config=config)
    agent = DQN_agent(env, hyperparams)
    agent.load_model_best()
    #     agent.load_model()
    print("Environment creation successful")
    observations = env.reset()
    #     print(observations)
    print("Destination, distance: {:3f}, theta(radians): {:.2f}".format(
        observations["pointgoal_with_gps_compass"][0],
        observations["pointgoal_with_gps_compass"][1]))
    cv2.imshow("",
               observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Agent stepping around inside environment.")
    print(env.observation_space)
    count_steps = 0

    while not env.episode_over:
        keystroke = cv2.waitKey(0)

        if keystroke == ord(FORWARD_KEY):
            action = 1  #habitat.SimulatorActions.MOVE_FORWARD
            print("action: FORWARD")
        elif keystroke == ord(LEFT_KEY):
            action = 2  #habitat.SimulatorActions.TURN_LEFT
            print("action: LEFT")
        elif keystroke == ord(RIGHT_KEY):
            action = 3  #habitat.SimulatorActions.TURN_RIGHT
            print("action: RIGHT")
        elif keystroke == ord(FINISH):
            action = 0  #habitat.SimulatorActions.STOP
            print("action: FINISH")
        elif keystroke == ord(AGENT):
            state_depth = observations['depth'].swapaxes(0, 2).swapaxes(1, 2)
            #             action = agent.greedy_policy(state_depth)
            action, q = agent.eval_model.predict(state_depth)
            print(q)
        else:
            print("INVALID KEY")
            continue

        observations = env.step(action)
        reward, done = agent.reward_func(observations, action)
        print(reward, done)
        count_steps += 1
        #         env = habitat.Env(
        #             config=config
        #         )
        #         observations = env.reset()
        # x:1.9, y: 0.17
        #         print("Destination, distance: {:3f}, theta(radians): {:.2f}".format(
        #             observations["pointgoal_with_gps_compass"][0], observations["pointgoal_with_gps_compass"][1]))
        print("x: {}, y: {}".format(observations["agent_position"][0],
                                    observations["agent_position"][2]))
        cv2.imshow(
            "",
            observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Episode finished after {} steps.".format(count_steps))

    if action == 0 and observations["pointgoal_with_gps_compass"][0] < 0.2:
        print("you successfully navigated to destination point")
    else:
        print("your navigation was unsuccessful")
Beispiel #14
0
def example():
    config = habitat.get_config("configs/tasks/pointnav_my.yaml")
    config.defrost()

    # Add things to the config to for the measure
    config.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    env = habitat.Env(config=config)
    with torch.no_grad():
        agent = AE_DQN_agent(env, hyperparams)
        #         agent.load_model_best_ae()
        #         agent.load_model_best_dqn()
        agent.load_model_best_ae()
        agent.load_model_best_dqn()
        agent.eval_mode()
#     agent.load_model()
    print("Environment creation successful")
    observations = env.reset()
    #     print(observations)
    print("Destination, distance: {:3f}, theta(radians): {:.2f}".format(
        observations["pointgoal_with_gps_compass"][0],
        observations["pointgoal_with_gps_compass"][1]))
    cv2.imshow("",
               observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Agent stepping around inside environment.")
    print(env.observation_space)
    count_steps = 0
    view_flag = False
    while not env.episode_over:
        keystroke = cv2.waitKey(0)

        if keystroke == ord(FORWARD_KEY):
            action = 1  #habitat.SimulatorActions.MOVE_FORWARD
            print("action: FORWARD")
            view_flag = False
        elif keystroke == ord(LEFT_KEY):
            action = 2  #habitat.SimulatorActions.TURN_LEFT
            print("action: LEFT")
            view_flag = False
        elif keystroke == ord(RIGHT_KEY):
            action = 3  #habitat.SimulatorActions.TURN_RIGHT
            print("action: RIGHT")
            view_flag = False
        elif keystroke == ord(FINISH):
            action = 0  #habitat.SimulatorActions.STOP
            print("action: FINISH")
            view_flag = False
        elif keystroke == ord(AGENT):
            state_depth = observations['depth'].swapaxes(0, 2).swapaxes(1, 2)
            #             action = agent.greedy_policy(state_depth)
            action, q = agent.eval_model.predict(state_depth)
            view_flag = False
            print(q)
        elif keystroke == ord(VIEW_FORWARD_KEY):
            action = np.array([0, 1, 0, 0])
            if view_flag:
                state_depth = view_obs.swapaxes(0, 2).swapaxes(1, 2)
            else:
                state_depth = observations['depth'].swapaxes(0,
                                                             2).swapaxes(1, 2)
                view_flag = True
        elif keystroke == ord(VIEW_LEFT_KEY):
            action = np.array([0, 0, 1, 0])
            if view_flag:
                state_depth = view_obs.swapaxes(0, 2).swapaxes(1, 2)
            else:
                state_depth = observations['depth'].swapaxes(0,
                                                             2).swapaxes(1, 2)
                view_flag = True
        elif keystroke == ord(VIEW_RIGHT_KEY):
            action = np.array([0, 0, 0, 1])
            if view_flag:
                state_depth = view_obs.swapaxes(0, 2).swapaxes(1, 2)
            else:
                state_depth = observations['depth'].swapaxes(0,
                                                             2).swapaxes(1, 2)
                view_flag = True
        elif keystroke == ord(VIEW_FINISH):
            action = np.array([1, 0, 0, 0])
            if view_flag:
                state_depth = view_obs.swapaxes(0, 2).swapaxes(1, 2)
            else:
                state_depth = observations['depth'].swapaxes(0,
                                                             2).swapaxes(1, 2)
                view_flag = True

        elif keystroke == ord(VIEW_AGENT):
            if view_flag:
                state_depth = view_obs.swapaxes(0, 2).swapaxes(1, 2)
            else:
                state_depth = observations['depth'].swapaxes(0,
                                                             2).swapaxes(1, 2)
                view_flag = True
            action, q = agent.eval_model.predict(state_depth)
            print(action)
            if action == 0:
                print("predict finish")
                continue
            action_vector = np.zeros(4)
            action_vector[action] = 1
            action = action_vector

        else:
            print("INVALID KEY")
            continue

        if view_flag:
            with torch.no_grad():

                #                 print(state_depth)
                #                 print(state_depth.shape)
                #                 state_depth = observations['depth'].swapaxes(0, 2).swapaxes(1, 2)
                action = FloatTensor(action)
                view_obs = agent.ae_model.predict(state_depth, action)[0]
                view_obs = view_obs.detach().cpu().numpy().swapaxes(
                    0, 2).swapaxes(0, 1)
                #                 print(view_obs.shape)

                view_obs = np.clip(view_obs, 0, 1)


#                 print(view_obs)
            cv2.imshow("view", view_obs)
            continue

        observations = env.step(action)
        #         reward, done = agent.reward_func(observations, action)
        #         print(reward, done)
        count_steps += 1
        #         env = habitat.Env(
        #             config=config
        #         )
        #         observations = env.reset()
        # x:1.9, y: 0.17
        #         print("Destination, distance: {:3f}, theta(radians): {:.2f}".format(
        #             observations["pointgoal_with_gps_compass"][0], observations["pointgoal_with_gps_compass"][1]))
        print("x: {}, y: {}".format(observations["agent_position"][0],
                                    observations["agent_position"][2]))
        cv2.imshow(
            "",
            observations["depth"])  #transform_rgb_bgr(observations["rgb"]))

    print("Episode finished after {} steps.".format(count_steps))

    if action == 0:
        pos_x, pos_y = observations['agent_position'][0], observations[
            'agent_position'][2]
        goal_x, goal_y = 2, 12
        dist = math.sqrt((pos_x - goal_x)**2 + (pos_y - goal_y)**2)
        if dist < 2:
            print("you successfully navigated to destination point")
        else:
            print(dist)
            print("your navigation was unsuccessful")
Beispiel #15
0
def construct_envs(config: Config, env_class: Type[Union[Env,
                                                         RLEnv]]) -> VectorEnv:
    r"""Create VectorEnv object with specified config and env class type.
    To allow better performance, dataset are split into small ones for
    each individual env, grouped by scenes.

    Args:
        config: configs that contain num_processes as well as information
        necessary to create individual environments.
        env_class: class type of the envs to be created.

    Returns:
        VectorEnv object created according to specification.
    """

    num_processes = config.NUM_PROCESSES
    configs = []
    env_classes = [env_class for _ in range(num_processes)]
    dataset = make_dataset(config.TASK_CONFIG.DATASET.TYPE,
                           **{'filter_fn': filter_fn})
    scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES
    if "*" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:
        scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)

    if num_processes > 1:
        if len(scenes) == 0:
            raise RuntimeError(
                "No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes"
            )

        if len(scenes) < num_processes:
            raise RuntimeError("reduce the number of processes as there "
                               "aren't enough number of scenes")

        random.shuffle(scenes)

    scene_splits = [[] for _ in range(num_processes)]
    for idx, scene in enumerate(scenes):
        scene_splits[idx % len(scene_splits)].append(scene)
    print('Total Process : %d ' % num_processes)
    for i, s in enumerate(scene_splits):
        print('proc %d :' % i, s)
    assert sum(map(len, scene_splits)) == len(scenes)

    for i in range(num_processes):
        proc_config = config.clone()
        proc_config.defrost()

        task_config = proc_config.TASK_CONFIG
        if len(scenes) > 0:
            task_config.DATASET.CONTENT_SCENES = scene_splits[i]

        task_config.TASK.CUSTOM_OBJECT_GOAL_SENSOR = habitat.Config()
        task_config.TASK.CUSTOM_OBJECT_GOAL_SENSOR.TYPE = 'CustomObjectSensor'
        task_config.TASK.CUSTOM_OBJECT_GOAL_SENSOR.GOAL_SPEC = "OBJECT_IMG"

        task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (
            config.SIMULATOR_GPU_ID)

        task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS

        proc_config.freeze()
        configs.append(proc_config)

    envs = habitat.VectorEnv(make_env_fn=make_env_fn,
                             env_fn_args=tuple(
                                 tuple(
                                     zip(configs, env_classes,
                                         range(num_processes),
                                         [{
                                             'filter_fn': filter_fn
                                         }] * num_processes))))
    return envs