Exemple #1
0
def main():
    habitat.SimulatorActions.extend_action_space("NOISY_LEFT")
    habitat.SimulatorActions.extend_action_space("NOISY_RIGHT")
    habitat.SimulatorActions.extend_action_space("NOISY_FORWARD")

    config = habitat.get_config(
        config_paths="../habitat-api/configs/tasks/pointnav.yaml")
    config.defrost()
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoNoisyMove"
    config.DATASET.DATA_PATH = "../habitat-api/" + config.DATASET.DATA_PATH
    config.SIMULATOR.SCENE = '../habitat-api/' + config.SIMULATOR.SCENE
    config.freeze()

    env = habitat.Env(config=config)
    env.reset()
    env.step(habitat.SimulatorActions.NOISY_LEFT)
    env.step(habitat.SimulatorActions.NOISY_RIGHT)
    env.step(habitat.SimulatorActions.NOISY_FORWARD)
    env.close()

    config.defrost()
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoisyMove"
    config.freeze()

    env = habitat.Env(config=config)
    env.reset()
    env.step(habitat.SimulatorActions.NOISY_LEFT)
    env.step(habitat.SimulatorActions.NOISY_RIGHT)
    env.step(habitat.SimulatorActions.NOISY_FORWARD)
    env.close()
def main():
    HabitatSimActions.extend_action_space("STRAFE_LEFT")
    HabitatSimActions.extend_action_space("STRAFE_RIGHT")

    config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml")
    config.defrost()

    config.TASK.POSSIBLE_ACTIONS = config.TASK.POSSIBLE_ACTIONS + [
        "STRAFE_LEFT",
        "STRAFE_RIGHT",
    ]
    config.TASK.ACTIONS.STRAFE_LEFT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_LEFT.TYPE = "StrafeLeft"
    config.TASK.ACTIONS.STRAFE_RIGHT = habitat.config.Config()
    config.TASK.ACTIONS.STRAFE_RIGHT.TYPE = "StrafeRight"
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoNoiseStrafe"
    config.freeze()

    with habitat.Env(config=config) as env:
        env.reset()
        env.step("STRAFE_LEFT")
        env.step("STRAFE_RIGHT")

    config.defrost()
    config.SIMULATOR.ACTION_SPACE_CONFIG = "NoiseStrafe"
    config.freeze()

    with habitat.Env(config=config) as env:
        env.reset()
        env.step("STRAFE_LEFT")
        env.step("STRAFE_RIGHT")
def smoke_test_sensor(config, N_STEPS=100):
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    valid_start_position = [-1.3731, 0.08431, 8.60692]

    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]
    test_episode = NavigationEpisode(
        episode_id="0",
        scene_id=config.SIMULATOR.SCENE,
        start_position=valid_start_position,
        start_rotation=start_rotation,
        goals=[NavigationGoal(position=goal_position)],
    )

    with habitat.Env(config=config, dataset=None) as env:

        env.episode_iterator = iter([test_episode])
        no_noise_obs = env.reset()
        assert no_noise_obs is not None

        actions = [
            sample_non_stop_action(env.action_space) for _ in range(N_STEPS)
        ]
        for action in actions:
            assert env.step(action) is not None
Exemple #4
0
def main():
    # Get the default config node
    config = habitat.get_config(config_paths="configs/tasks/pointnav.yaml")
    config.defrost()

    # Add things to the config to for the measure
    config.task.episode_info_example = habitat.config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")

    # Now define the config for the sensor
    config.TASK.AGENT_POSITION_SENSOR = habitat.Config()
    # Use the custom name
    config.TASK.AGENT_POSITION_SENSOR.TYPE = "my_supercool_sensor"
    config.TASK.AGENT_POSITION_SENSOR.ANSWER_TO_LIFE = 42
    # Add the sensor to the list of sensors in use
    config.TASK.SENSORS.append("AGENT_POSITION_SENSOR")
    config.freeze()

    with habitat.Env(config=config) as env:
        print(env.reset()["agent_position"])
        print(env.get_metrics()["episode_info"])
        print(env.step("MOVE_FORWARD")["agent_position"])
        print(env.get_metrics()["episode_info"])
Exemple #5
0
    def __init__(self, par, seq_len, config_file):
        self.config_file = '{}/{}'.format(par.habitat_root, config_file)

        self.seq_len = seq_len
        self.dets_nClasses = par.dets_nClasses

        config = get_config(self.config_file)
        config.defrost()
        config.SIMULATOR.DEPTH_SENSOR.NORMALIZE_DEPTH = False
        config.freeze()
        self.hfov = float(config.SIMULATOR.DEPTH_SENSOR.HFOV) * np.pi / 180.
        #self.intr = np.zeros((4), dtype=np.float32)
        #self.intr[0] = 1./np.tan(self.hfov/2.)*(config.SIMULATOR.DEPTH_SENSOR.WIDTH/2.) # fx
        #self.intr[1] = 1./np.tan(self.hfov/2.)*(config.SIMULATOR.DEPTH_SENSOR.HEIGHT/2.) # fy
        #self.intr[2] = config.SIMULATOR.DEPTH_SENSOR.WIDTH/2. # cx
        #self.intr[3] = config.SIMULATOR.DEPTH_SENSOR.HEIGHT/2. # cy
        dataset = make_dataset(id_dataset=config.DATASET.TYPE,
                               config=config.DATASET)
        self.env = habitat.Env(config=config, dataset=dataset)

        self.orig_res = (config.SIMULATOR.DEPTH_SENSOR.HEIGHT,
                         config.SIMULATOR.DEPTH_SENSOR.WIDTH)
        self.cropSize = par.crop_size
        self.normalize = True
        self.pixFormat = 'NCHW'
Exemple #6
0
    def __init__(self, par, seq_len, config_file, action_list):
        self.config_file = '{}/{}'.format(par.habitat_root, config_file)

        self.seq_len = seq_len
        self.actions = action_list
        self.dets_nClasses = par.dets_nClasses

        config = get_config(self.config_file)
        config.defrost()
        config.SIMULATOR.DEPTH_SENSOR.NORMALIZE_DEPTH = False
        config.TASK.MEASUREMENTS.append('COLLISIONS')
        config.freeze()
        self.hfov = float(config.SIMULATOR.DEPTH_SENSOR.HFOV) * np.pi / 180.
        dataset = make_dataset(id_dataset=config.DATASET.TYPE,
                               config=config.DATASET)
        self.env = habitat.Env(config=config, dataset=dataset)

        self.cropSize = par.crop_size
        self.cropSizeObsv = par.crop_size_obsv
        self.orig_res = (config.SIMULATOR.DEPTH_SENSOR.HEIGHT,
                         config.SIMULATOR.DEPTH_SENSOR.WIDTH)
        self.normalize = True
        self.pixFormat = 'NCHW'

        self.dg = myDistanceToGoal(self.env.sim, config)
def test_action_space_shortest_path():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    env = habitat.Env(config=config, dataset=None)

    # action space shortest path
    source_position = env.sim.sample_navigable_point()
    angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
    angle = np.radians(np.random.choice(angles))
    source_rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
    source = AgentState(source_position, source_rotation)

    reachable_targets = []
    unreachable_targets = []
    while len(reachable_targets) < 5:
        position = env.sim.sample_navigable_point()
        angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
        angle = np.radians(np.random.choice(angles))
        rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
        if env.sim.geodesic_distance(source_position, position) != np.inf:
            reachable_targets.append(AgentState(position, rotation))

    while len(unreachable_targets) < 3:
        position = env.sim.sample_navigable_point()
        angles = [x for x in range(-180, 180, config.SIMULATOR.TURN_ANGLE)]
        angle = np.radians(np.random.choice(angles))
        rotation = [0, np.sin(angle / 2), 0, np.cos(angle / 2)]
        if env.sim.geodesic_distance(source_position, position) == np.inf:
            unreachable_targets.append(AgentState(position, rotation))

    targets = reachable_targets
    shortest_path1 = env.sim.action_space_shortest_path(source, targets)
    assert shortest_path1 != []

    targets = unreachable_targets
    shortest_path2 = env.sim.action_space_shortest_path(source, targets)
    assert shortest_path2 == []

    targets = reachable_targets + unreachable_targets
    shortest_path3 = env.sim.action_space_shortest_path(source, targets)

    # shortest_path1 should be identical to shortest_path3
    assert len(shortest_path1) == len(shortest_path3)
    for i in range(len(shortest_path1)):
        assert np.allclose(shortest_path1[i].position,
                           shortest_path3[i].position)
        assert np.allclose(shortest_path1[i].rotation,
                           shortest_path3[i].rotation)
        assert shortest_path1[i].action == shortest_path3[i].action

    targets = unreachable_targets + [source]
    shortest_path4 = env.sim.action_space_shortest_path(source, targets)
    assert len(shortest_path4) == 1
    assert np.allclose(shortest_path4[0].position, source.position)
    assert np.allclose(shortest_path4[0].rotation, source.rotation)
    assert shortest_path4[0].action is None

    env.close()
Exemple #8
0
def test_pointgoal_with_gps_compass_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = [
        "POINTGOAL_WITH_GPS_COMPASS_SENSOR",
        "COMPASS_SENSOR",
        "GPS_SENSOR",
        "POINTGOAL_SENSOR",
    ]
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.POINTGOAL_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.GPS_SENSOR.DIMENSIONALITY = 3

    config.freeze()
    with habitat.Env(config=config, dataset=None) as env:
        # start position is checked for validity for the specific test scene
        valid_start_position = [-1.3731, 0.08431, 8.60692]
        expected_pointgoal = [0.1, 0.2, 0.3]
        goal_position = np.add(valid_start_position, expected_pointgoal)

        # starting quaternion is rotated 180 degree along z-axis, which
        # corresponds to simulator using z-negative as forward action
        start_rotation = [0, 0, 0, 1]

        env.episode_iterator = iter(
            [
                NavigationEpisode(
                    episode_id="0",
                    scene_id=config.SIMULATOR.SCENE,
                    start_position=valid_start_position,
                    start_rotation=start_rotation,
                    goals=[NavigationGoal(position=goal_position)],
                )
            ]
        )

        env.reset()
        for _ in range(100):
            obs = env.step(sample_non_stop_action(env.action_space))
            pointgoal = obs["pointgoal"]
            pointgoal_with_gps_compass = obs["pointgoal_with_gps_compass"]
            compass = float(obs["compass"][0])
            gps = obs["gps"]
            # check to see if taking non-stop actions will affect static point_goal
            assert np.allclose(
                pointgoal_with_gps_compass,
                quaternion_rotate_vector(
                    quaternion.from_rotation_vector(
                        compass * np.array([0, 1, 0])
                    ).inverse(),
                    pointgoal - gps,
                ),
                atol=1e-5,
            )
def get_topdown_map(config_paths, map_name):

    config = habitat.get_config(config_paths=config_paths)
    dataset = habitat.make_dataset(id_dataset=config.DATASET.TYPE,
                                   config=config.DATASET)
    env = habitat.Env(config=config, dataset=dataset)
    env.reset()

    square_map_resolution = 5000
    top_down_map = maps.get_topdown_map(env.sim,
                                        map_resolution=(square_map_resolution,
                                                        square_map_resolution))

    # Image containing 0 if occupied, 1 if unoccupied, and 2 if border (if
    # the flag is set)
    top_down_map[np.where(top_down_map == 0)] = 125
    top_down_map[np.where(top_down_map == 1)] = 255
    top_down_map[np.where(top_down_map == 2)] = 0

    imageio.imsave(os.path.join(MAP_DIR, map_name + ".pgm"), top_down_map)

    complete_name = os.path.join(MAP_DIR, map_name + ".yaml")
    f = open(complete_name, "w+")

    f.write("image: " + map_name + ".pgm\n")
    f.write("resolution: " +
            str((COORDINATE_MAX - COORDINATE_MIN) / square_map_resolution) +
            "\n")
    f.write("origin: [" + str(-1) + "," + str(-1) + ", 0.000000]\n")
    f.write("negate: 0\noccupied_thresh: 0.65\nfree_thresh: 0.196")
    f.close()
def test_tactile():
    config = get_config(CFG_TEST)
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config = get_config()
    config.defrost()
    config.TASK.SENSORS = ["PROXIMITY_SENSOR"]
    config.TASK.MEASUREMENTS = ["COLLISIONS"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)
    env.reset()
    random.seed(1234)

    for _ in range(20):
        _random_episode(env, config)
        env.reset()
        assert env.get_metrics()["collisions"] is None

        my_collisions_count = 0
        action = env._sim.index_forward_action
        for _ in range(10):
            obs = env.step(action)
            collisions = env.get_metrics()["collisions"]
            proximity = obs["proximity"]
            if proximity < COLLISION_PROXIMITY_TOLERANCE:
                my_collisions_count += 1

            assert my_collisions_count == collisions

    env.close()
    def __init__(self, env_config_file):
        threading.Thread.__init__(self)
        self.env = habitat.Env(config=habitat.get_config(env_config_file))
        # always assume height equals width
        self._sensor_resolution = {
            "RGB": self.env._sim.config["RGB_SENSOR"]["HEIGHT"],
            "DEPTH": self.env._sim.config["DEPTH_SENSOR"]["HEIGHT"],
            "BC_SENSOR": self.env._sim.config["BC_SENSOR"]["HEIGHT"],
        }
        self.env._sim._sim.agents[
            0].move_filter_fn = self.env._sim._sim._step_filter
        self.observations = self.env.reset()

        self.env._sim._sim.agents[0].state.velocity = np.float32([0, 0, 0])
        self.env._sim._sim.agents[0].state.angular_velocity = np.float32(
            [0, 0, 0])

        self._pub_rgb = rospy.Publisher("~rgb",
                                        numpy_msg(Floats),
                                        queue_size=1)
        self._pub_depth = rospy.Publisher("~depth",
                                          numpy_msg(Floats),
                                          queue_size=1)

        # additional RGB sensor I configured
        self._pub_bc_sensor = rospy.Publisher("~bc_sensor",
                                              numpy_msg(Floats),
                                              queue_size=1)

        self._pub_depth_and_pointgoal = rospy.Publisher("depth_and_pointgoal",
                                                        numpy_msg(Floats),
                                                        queue_size=1)
        print("created habitat_plant succsefully")
Exemple #12
0
def shortest_path_example(mode):
    config = habitat.get_config(config_file="tasks/pointnav.yaml")
    env = habitat.Env(config=config)
    goal_radius = env.episodes[0].goals[0].radius
    if goal_radius is None:
        goal_radius = config.SIMULATOR.FORWARD_STEP_SIZE
    follower = ShortestPathFollower(env.sim, goal_radius, False)
    follower.mode = mode

    print("Environment creation successful")
    for episode in range(3):
        observations = env.reset()
        dirname = os.path.join(IMAGE_DIR, "shortest_path_example", mode,
                               "%02d" % episode)
        if os.path.exists(dirname):
            shutil.rmtree(dirname)
        os.makedirs(dirname)
        print("Agent stepping around inside environment.")
        count_steps = 0
        while not env.episode_over:
            best_action = follower.get_next_action(
                env.current_episode.goals[0].position)
            observations = env.step(SIM_NAME_TO_ACTION[best_action.value])
            count_steps += 1
            im = observations["rgb"]
            imageio.imsave(os.path.join(dirname, "%03d.jpg" % count_steps), im)
        print("Episode finished after {} steps.".format(count_steps))
def test_mp3d_eqa_sim():
    eqa_config = get_config(CFG_TEST)

    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
        eqa_config.DATASET
    ):
        pytest.skip("Please download Matterport3D EQA dataset to data folder.")

    dataset = make_dataset(
        id_dataset=eqa_config.DATASET.TYPE, config=eqa_config.DATASET
    )
    with habitat.Env(config=eqa_config, dataset=dataset) as env:
        env.episodes = dataset.episodes[:EPISODES_LIMIT]

        env.reset()
        while not env.episode_over:
            obs = env.step(env.task.action_space.sample())
            if not env.episode_over:
                assert "rgb" in obs, "RGB image is missing in observation."
                assert obs["rgb"].shape[:2] == (
                    eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                    eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,
                ), (
                    "Observation resolution {} doesn't correspond to config "
                    "({}, {}).".format(
                        obs["rgb"].shape[:2],
                        eqa_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                        eqa_config.SIMULATOR.RGB_SENSOR.WIDTH,
                    )
                )
Exemple #14
0
def test_ppo_agents():

    agent_config = ppo_agents.get_default_config()
    agent_config.MODEL_PATH = ""
    agent_config.defrost()
    config_env = habitat.get_config(config_paths=CFG_TEST)
    if not os.path.exists(config_env.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    benchmark = habitat.Benchmark(config_paths=CFG_TEST)

    for input_type in ["blind", "rgb", "depth", "rgbd"]:
        for resolution in [256, 384]:
            config_env.defrost()
            config_env.SIMULATOR.AGENT_0.SENSORS = []
            if input_type in ["rgb", "rgbd"]:
                config_env.SIMULATOR.AGENT_0.SENSORS += ["RGB_SENSOR"]
                agent_config.RESOLUTION = resolution
                config_env.SIMULATOR.RGB_SENSOR.WIDTH = resolution
                config_env.SIMULATOR.RGB_SENSOR.HEIGHT = resolution
            if input_type in ["depth", "rgbd"]:
                config_env.SIMULATOR.AGENT_0.SENSORS += ["DEPTH_SENSOR"]
                agent_config.RESOLUTION = resolution
                config_env.SIMULATOR.DEPTH_SENSOR.WIDTH = resolution
                config_env.SIMULATOR.DEPTH_SENSOR.HEIGHT = resolution

            config_env.freeze()

            del benchmark._env
            benchmark._env = habitat.Env(config=config_env)
            agent_config.INPUT_TYPE = input_type

            agent = ppo_agents.PPOAgent(agent_config)
            habitat.logger.info(benchmark.evaluate(agent, num_episodes=10))
Exemple #15
0
def test_object_nav_task():
    config = get_config(CFG_TEST)

    if not ObjectNavDatasetV1.check_config_paths_exist(config.DATASET):
        pytest.skip(
            "Please download Matterport3D scene and ObjectNav Datasets to data folder."
        )

    dataset = make_dataset(id_dataset=config.DATASET.TYPE,
                           config=config.DATASET)
    with habitat.Env(config=config, dataset=dataset) as env:
        for i in range(10):
            env.reset()
            while not env.episode_over:
                action = env.action_space.sample()
                habitat.logger.info(f"Action : "
                                    f"{action['action']}, "
                                    f"args: {action['action_args']}.")
                env.step(action)

            metrics = env.get_metrics()
            logger.info(metrics)

        with pytest.raises(AssertionError):
            env.step({"action": MoveForwardAction.name})
Exemple #16
0
    def __init__(self, config, habitat_config):
        self.action_names = config.TASK.ACTION_NAMES
        self.action_mapping = {
            action_name: index
            for index, action_name in enumerate(self.action_names)
        }
        self.action_mapping.update({
            index: action_name
            for index, action_name in enumerate(self.action_names)
        })
        self.modalities = config.TASK.MODUALITIES

        self.cell_height, self.cell_width = config.TASK.CELL_HEIGHT, config.TASK.CELL_WIDTH
        self.last_cell_x, self.last_cell_y = None, None
        self.visited_cells = []
        self.reward_rate = config.TASK.REWARD_RATE
        self.collision_penalty_rate = config.TASK.COLLISION_PENALTY_RATE

        self.env = habitat.Env(config=habitat_config)

        self.observations = None
        self.prev_x, self.prev_y = None, None
        self.curr_x, self.curr_y = None, None
        self.current_action = None
        self.curr_action = None
        self.prev_action = None
        self.is_episode_finished = False
def test_env():
    config = get_config(CFG_TEST)
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    env = habitat.Env(config=config, dataset=None)
    env.episodes = [
        NavigationEpisode(
            episode_id="0",
            scene_id=config.SIMULATOR.SCENE,
            start_position=[03.00611, 0.072447, -2.67867],
            start_rotation=[0, 0.163276, 0, 0.98658],
            goals=[],
        )
    ]

    env.reset()
    non_stop_actions = [
        k for k, v in SIM_ACTION_TO_NAME.items()
        if v != SimulatorActions.STOP.value
    ]
    for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
        env.step(np.random.choice(non_stop_actions))

    # check for steps limit on environment
    assert env.episode_over is True, ("episode should be over after "
                                      "max_episode_steps")

    env.reset()

    env.step(SIM_NAME_TO_ACTION[SimulatorActions.STOP.value])
    # check for STOP action
    assert env.episode_over is True, ("episode should be over after STOP "
                                      "action")

    env.close()
Exemple #18
0
def example_get_topdown_map():
    config = habitat.get_config(config_file="tasks/pointnav.yaml")
    dataset = habitat.make_dataset(
        id_dataset=config.DATASET.TYPE, config=config.DATASET
    )
    env = habitat.Env(config=config, dataset=dataset)
    env.reset()
    top_down_map = maps.get_topdown_map(env.sim, map_resolution=(5000, 5000))
    recolor_map = np.array(
        [[255, 255, 255], [128, 128, 128], [0, 0, 0]], dtype=np.uint8
    )
    range_x = np.where(np.any(top_down_map, axis=1))[0]
    range_y = np.where(np.any(top_down_map, axis=0))[0]
    padding = int(np.ceil(top_down_map.shape[0] / 125))
    range_x = (
        max(range_x[0] - padding, 0),
        min(range_x[-1] + padding + 1, top_down_map.shape[0]),
    )
    range_y = (
        max(range_y[0] - padding, 0),
        min(range_y[-1] + padding + 1, top_down_map.shape[1]),
    )
    top_down_map = top_down_map[
        range_x[0] : range_x[1], range_y[0] : range_y[1]
    ]
    top_down_map = recolor_map[top_down_map]
    imageio.imsave(os.path.join(IMAGE_DIR, "top_down_map.png"), top_down_map)
Exemple #19
0
def test_task_actions():
    config = habitat.get_config(config_paths=CFG_TEST)
    config.defrost()
    config.TASK.POSSIBLE_ACTIONS = config.TASK.POSSIBLE_ACTIONS + ["TELEPORT"]
    config.freeze()

    env = habitat.Env(config=config)
    env.reset()
    env.step(
        action={
            "action": "TELEPORT",
            "action_args": {
                "position": TELEPORT_POSITION,
                "rotation": TELEPORT_ROTATION,
            },
        }
    )
    agent_state = env.sim.get_agent_state()
    assert np.allclose(
        np.array(TELEPORT_POSITION, dtype=np.float32), agent_state.position
    ), "mismatch in position after teleport"
    assert np.allclose(
        np.array(TELEPORT_ROTATION, dtype=np.float32),
        np.array([*agent_state.rotation.imag, agent_state.rotation.real]),
    ), "mismatch in rotation after teleport"
    env.step("TURN_RIGHT")
    env.close()
def test_heading_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["HEADING_SENSOR"]
    config.freeze()
    env = habitat.Env(config=config, dataset=None)
    env.reset()
    random.seed(123)
    np.random.seed(123)

    for _ in range(100):
        random_heading = np.random.uniform(-np.pi, np.pi)
        random_rotation = [
            0,
            np.sin(random_heading / 2),
            0,
            np.cos(random_heading / 2),
        ]
        env.episode_iterator = iter([
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=[03.00611, 0.072447, -2.67867],
                start_rotation=random_rotation,
                goals=[],
            )
        ])

        obs = env.reset()
        heading = obs["heading"]
        assert np.allclose(heading, random_heading)

    env.close()
Exemple #21
0
def test_get_observations_at():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = []
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    with habitat.Env(config=config, dataset=None) as env:

        # start position is checked for validity for the specific test scene
        valid_start_position = [-1.3731, 0.08431, 8.60692]
        expected_pointgoal = [0.1, 0.2, 0.3]
        goal_position = np.add(valid_start_position, expected_pointgoal)

        # starting quaternion is rotated 180 degree along z-axis, which
        # corresponds to simulator using z-negative as forward action
        start_rotation = [0, 0, 0, 1]

        env.episode_iterator = iter([
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=valid_start_position,
                start_rotation=start_rotation,
                goals=[NavigationGoal(position=goal_position)],
            )
        ])

        obs = env.reset()
        start_state = env.sim.get_agent_state()
        for _ in range(100):
            # Note, this test will not currently work for camera change actions
            # (look up/down), only for movement actions.
            new_obs = env.step(sample_non_stop_action(env.action_space))
            for key, val in new_obs.items():
                agent_state = env.sim.get_agent_state()
                if not (np.allclose(agent_state.position, start_state.position)
                        and np.allclose(agent_state.rotation,
                                        start_state.rotation)):
                    assert not np.allclose(val, obs[key])
            obs_at_point = env.sim.get_observations_at(
                start_state.position,
                start_state.rotation,
                keep_agent_at_new_pose=False,
            )
            for key, val in obs_at_point.items():
                assert np.allclose(val, obs[key])

        obs_at_point = env.sim.get_observations_at(
            start_state.position,
            start_state.rotation,
            keep_agent_at_new_pose=True,
        )
        for key, val in obs_at_point.items():
            assert np.allclose(val, obs[key])
        agent_state = env.sim.get_agent_state()
        assert np.allclose(agent_state.position, start_state.position)
        assert np.allclose(agent_state.rotation, start_state.rotation)
    def __init__(self, env_config_file):
        threading.Thread.__init__(self)
        self.env = habitat.Env(config=habitat.get_config(env_config_file))
        # always assume height equals width
        self._sensor_resolution = {
            "RGB": self.env._sim.config["RGB_SENSOR"]["HEIGHT"],
            "DEPTH": self.env._sim.config["DEPTH_SENSOR"]["HEIGHT"],
            "BC_SENSOR": self.env._sim.config["BC_SENSOR"]["HEIGHT"],
        }
        self.env._sim._sim.agents[
            0].move_filter_fn = self.env._sim._sim._step_filter
        self.observations = self.env.reset()

        # add a sphere
        # load some object templates from configuration files
        sphere_template_id = self.env._sim._sim.load_object_configs(
            str("/home/eric/habitat-sim-may-2020/habitat-sim/data/test_assets/objects/sphere"
                ))[0]
        id_sphere = self.env._sim._sim.add_object(sphere_template_id)
        self.env._sim._sim.set_translation(np.array([-2.63, 0.114367, 19.3]),
                                           id_sphere)

        # load and initialize the lobot_merged asset
        locobot_template_id = self.env._sim._sim.load_object_configs(
            "/home/eric/habitat-sim-may-2020/habitat-sim/data/objects/locobot_merged"
        )[0]
        # print("locobot_template_id is " + str(locobot_template_id))
        # add robot object to the scene with the agent/camera SceneNode attached
        self.id_agent_obj = self.env._sim._sim.add_object(
            locobot_template_id, self.env._sim._sim.agents[0].scene_node)
        # print("id of agent object is " + str(self.id_agent_obj))
        # set the agent's body to dynamic
        self.env._sim._sim.set_object_motion_type(
            hsim.physics.MotionType.DYNAMIC, self.id_agent_obj)

        self.env._sim._sim.agents[0].state.velocity = np.float32([0, 0, 0])
        self.env._sim._sim.agents[0].state.angular_velocity = np.float32(
            [0, 0, 0])
        self.vel_control = self.env._sim._sim.get_object_velocity_control(
            self.id_agent_obj)
        self.env._sim._sim.set_translation(
            np.array([-1.7927, 0.114367, 19.1552]), self.id_agent_obj)

        self._pub_rgb = rospy.Publisher("~rgb",
                                        numpy_msg(Floats),
                                        queue_size=1)
        self._pub_depth = rospy.Publisher("~depth",
                                          numpy_msg(Floats),
                                          queue_size=1)

        # additional RGB sensor I configured
        self._pub_bc_sensor = rospy.Publisher("~bc_sensor",
                                              numpy_msg(Floats),
                                              queue_size=1)

        self._pub_depth_and_pointgoal = rospy.Publisher("depth_and_pointgoal",
                                                        numpy_msg(Floats),
                                                        queue_size=1)
        print("created habitat_plant succsefully")
Exemple #23
0
def test_imagegoal_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = ["IMAGEGOAL_SENSOR"]
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR"]
    config.freeze()
    with habitat.Env(config=config, dataset=None) as env:

        # start position is checked for validity for the specific test scene
        valid_start_position = [-1.3731, 0.08431, 8.60692]
        pointgoal = [0.1, 0.2, 0.3]
        goal_position = np.add(valid_start_position, pointgoal)

        pointgoal_2 = [0.3, 0.2, 0.1]
        goal_position_2 = np.add(valid_start_position, pointgoal_2)

        # starting quaternion is rotated 180 degree along z-axis, which
        # corresponds to simulator using z-negative as forward action
        start_rotation = [0, 0, 0, 1]

        env.episode_iterator = iter(
            [
                NavigationEpisode(
                    episode_id="0",
                    scene_id=config.SIMULATOR.SCENE,
                    start_position=valid_start_position,
                    start_rotation=start_rotation,
                    goals=[NavigationGoal(position=goal_position)],
                ),
                NavigationEpisode(
                    episode_id="1",
                    scene_id=config.SIMULATOR.SCENE,
                    start_position=valid_start_position,
                    start_rotation=start_rotation,
                    goals=[NavigationGoal(position=goal_position_2)],
                ),
            ]
        )
        obs = env.reset()
        for _ in range(100):
            new_obs = env.step(sample_non_stop_action(env.action_space))
            # check to see if taking non-stop actions will affect static image_goal
            assert np.allclose(obs["imagegoal"], new_obs["imagegoal"])
            assert np.allclose(obs["rgb"].shape, new_obs["imagegoal"].shape)

        previous_episode_obs = obs
        _ = env.reset()
        for _ in range(10):
            new_obs = env.step(sample_non_stop_action(env.action_space))
            # check to see if taking non-stop actions will affect static image_goal
            assert not np.allclose(
                previous_episode_obs["imagegoal"], new_obs["imagegoal"]
            )
            assert np.allclose(
                previous_episode_obs["rgb"].shape, new_obs["imagegoal"].shape
            )
Exemple #24
0
def agent_demo():
    config = get_habitat_config(
        os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav.yaml")
    )
    config.defrost()
    config.DATASET.DATA_PATH = os.path.join(
        HABITAT_DATASETS_DIR, "pointnav/gibson/v1/train/train.json.gz"
    )
    config.DATASET.SCENES_DIR = HABITAT_SCENE_DATASETS_DIR
    config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = 0
    config.SIMULATOR.TURN_ANGLE = 45
    config.freeze()
    env = habitat.Env(config=config)

    print("Environment creation successful")
    observations = env.reset()
    cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))

    print("Agent stepping around inside environment.")

    count_steps = 0
    action = None
    while not env.episode_over:
        keystroke = cv2.waitKey(0)

        if keystroke == ord(FORWARD_KEY):
            action = 1
            print("action: FORWARD")
        elif keystroke == ord(LEFT_KEY):
            action = 2
            print("action: LEFT")
        elif keystroke == ord(RIGHT_KEY):
            action = 3
            print("action: RIGHT")
        elif keystroke == ord(FINISH):
            action = 0
            print("action: FINISH")
        else:
            print("INVALID KEY")
            continue

        observations = env.step(action)
        count_steps += 1

        print("Position:", env.sim.get_agent_state().position)
        print("Quaternions:", env.sim.get_agent_state().rotation)
        quat = Quaternion(env.sim.get_agent_state().rotation.components)
        print(quat.radians)
        cv2.imshow("RGB", transform_rgb_bgr(observations["rgb"]))

    print("Episode finished after {} steps.".format(count_steps))

    if action == habitat.SimulatorActions.STOP and observations["pointgoal"][0] < 0.2:
        print("you successfully navigated to destination point")
    else:
        print("your navigation was unsuccessful")
Exemple #25
0
def test_r2r_vln_sim():
    vln_config = get_config(CFG_TEST)

    if not r2r_vln_dataset.VLNDatasetV1.check_config_paths_exist(
        vln_config.DATASET
    ):
        pytest.skip(
            "Please download Matterport3D R2R VLN dataset to data folder."
        )

    dataset = make_dataset(
        id_dataset=vln_config.DATASET.TYPE, config=vln_config.DATASET
    )

    env = habitat.Env(config=vln_config, dataset=dataset)
    env.episodes = dataset.episodes[:EPISODES_LIMIT]

    follower = ShortestPathFollower(
        env.sim, goal_radius=0.5, return_one_hot=False
    )
    assert env

    for i in range(len(env.episodes)):
        env.reset()
        path = env.current_episode.reference_path + [
            env.current_episode.goals[0].position
        ]
        for point in path:
            done = False
            while not done:
                best_action = follower.get_next_action(point)
                if best_action == None:
                    break
                obs = env.step(best_action)
                assert "rgb" in obs, "RGB image is missing in observation."
                assert (
                    "instruction" in obs
                ), "Instruction is missing in observation."
                assert (
                    obs["instruction"]["text"]
                    == env.current_episode.instruction.instruction_text
                ), "Instruction from sensor does not match the intruction from the episode"

                assert obs["rgb"].shape[:2] == (
                    vln_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                    vln_config.SIMULATOR.RGB_SENSOR.WIDTH,
                ), (
                    "Observation resolution {} doesn't correspond to config "
                    "({}, {}).".format(
                        obs["rgb"].shape[:2],
                        vln_config.SIMULATOR.RGB_SENSOR.HEIGHT,
                        vln_config.SIMULATOR.RGB_SENSOR.WIDTH,
                    )
                )

    env.close()
Exemple #26
0
 def __init__(self,
              config: Config,
              dataset: Dataset,
              x_display: str = None) -> None:
     # print("habitat_plugin env constructor")
     self.x_display = x_display
     self.env = habitat.Env(config=config, dataset=dataset)
     # Set the target to a random goal from the provided list for this episode
     self.goal_index = 0
     self.last_geodesic_distance = None
def example():
    # Note: Use with for the example testing, doesn't need to be like this on the README
    config = habitat.get_config(
        "habitat-lab/configs/tasks/objectnav_mp3d.yaml")
    with habitat.Env(config=config) as env:
        print("Environment creation successful")
        observations = env.reset()  # noqa: F841
        print(observations)
        while not env.episode_over:
            observations = env.step(env.action_space.sample())  # noqa: F841
        print("Episode finished")
Exemple #28
0
def example():
    env = habitat.Env(config=habitat.get_config("configs/tasks/vln_r2r.yaml"))

    print("Environment creation successful")
    observations = env.reset()

    print("Agent stepping around inside environment.")
    count_steps = 0
    while not env.episode_over:
        observations = env.step(env.action_space.sample())
        count_steps += 1
    print("Episode finished after {} steps.".format(count_steps))
Exemple #29
0
 def __init__(self,
              config: Config,
              dataset: Dataset,
              x_display: str = None) -> None:
     # print("habitat_plugin env constructor")
     self.x_display = x_display
     self.env = habitat.Env(config=config, dataset=dataset)
     # Set the target to a random goal from the provided list for this episode
     self.goal_index = 0
     self.last_geodesic_distance = None
     self.distance_cache = DynamicDistanceCache(rounding=1)
     self._current_frame: Optional[np.ndarray] = None
Exemple #30
0
def generate_and_save_image_sets(pointnav_file_dir,
                                 scene_names,
                                 config,
                                 save_dir,
                                 split='train'):

    for scene_name in scene_names:
        pn_file = os.path.join(pointnav_file_dir, scene_name + '.json.gz')
        print(
            "================================================================================"
        )
        print(pn_file)
        print(
            "================================================================================"
        )
        config.defrost()
        config.DATASET.DATA_PATH = pn_file
        config.freeze()

        if split == "train":
            num_sets = 150
        else:
            num_sets = 250

        for k in range(num_sets):
            dir_to_save = os.path.join(save_dir, split, scene_name, str(k))
            info_file = os.path.join(dir_to_save, 'info.json')
            info = json.load(open(info_file))

            env = habitat.Env(config=config)

            episode_ids_to_use = [ep["episode_id"] for ep in info
                                  ]  # Get previously used episodes

            episode_id_list = [ep.episode_id for ep in env.episodes]
            indices_to_use = [
                episode_id_list.index(i) for i in episode_ids_to_use
            ]  # These indices
            # correspond to the
            # previously used
            # episodes

            temp = [env.episodes[i] for i in indices_to_use]
            env.episodes = temp  # Only use the previously used episodes

            for i in range(len(env.episodes)):
                observations = env.reset()  # reset moves to the next episode
                filename = os.path.join(dir_to_save, '{}.png'.format(i))
                if not os.path.exists(filename):
                    save_sample_rgb(observations['rgb'], filename)

            env.close()