コード例 #1
0
def test_config_eq():
    cfg1 = habitat_sim.Configuration(habitat_sim.SimulatorConfiguration(),
                                     [habitat_sim.AgentConfiguration()])
    cfg2 = habitat_sim.Configuration(habitat_sim.SimulatorConfiguration(),
                                     [habitat_sim.AgentConfiguration()])

    assert cfg1 == cfg2
コード例 #2
0
    def create_sim_config(
            self, _sensor_suite: SensorSuite) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        # sim_config.scene.id = '/private/home/medhini/navigation-analysis-habitat/habitat-api/' + self.config.SCENE
        sim_config.scene.id = self.config.SCENE
        sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(config_from=self._get_agent_config(),
                         config_to=agent_config)

        sensor_specifications = []
        for sensor in _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2])
            sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)
            sim_sensor_cfg.position = sensor.config.POSITION
            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type  # type: ignore
            sensor_specifications.append(sim_sensor_cfg)

        # If there is no sensors specified create a dummy sensor so simulator
        # won't throw an error
        if not _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            sim_sensor_cfg.resolution = [1, 1]
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = registry.get_action_space_configuration(
            self.config.ACTION_SPACE_CONFIG)(self.config).get()

        return habitat_sim.Configuration(sim_config, [agent_config])
コード例 #3
0
    def create_sim_config(
            self, _sensor_suite: SensorSuite) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        overwrite_config(config_from=self.config.HABITAT_SIM_V0,
                         config_to=sim_config)
        sim_config.scene.id = self.config.SCENE
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(config_from=self._get_agent_config(),
                         config_to=agent_config)

        sensor_specifications = []
        for sensor in _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            overwrite_config(config_from=sensor.config,
                             config_to=sim_sensor_cfg)
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2])
            sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)

            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type  # type: ignore
            sim_sensor_cfg.gpu2gpu_transfer = (
                self.config.HABITAT_SIM_V0.GPU_GPU)
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = registry.get_action_space_configuration(
            self.config.ACTION_SPACE_CONFIG)(self.config).get()

        return habitat_sim.Configuration(sim_config, [agent_config])
コード例 #4
0
def test_default_body_contorls(action, expected):
    scene_graph = habitat_sim.SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space = dict(
        move_backward=habitat_sim.ActionSpec(
            "move_backward", habitat_sim.ActuationSpec(amount=0.25)),
        move_forward=habitat_sim.ActionSpec(
            "move_forward", habitat_sim.ActuationSpec(amount=0.25)),
        move_left=habitat_sim.ActionSpec(
            "move_left", habitat_sim.ActuationSpec(amount=0.25)),
        move_right=habitat_sim.ActionSpec(
            "move_right", habitat_sim.ActuationSpec(amount=0.25)),
        turn_left=habitat_sim.ActionSpec(
            "turn_left", habitat_sim.ActuationSpec(amount=10.0)),
        turn_right=habitat_sim.ActionSpec(
            "turn_right", habitat_sim.ActuationSpec(amount=10.0)),
    )
    agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(),
                              agent_config)

    state = agent.state
    agent.act(action)
    new_state = agent.state

    _check_state_expected(state, new_state, expected)
    for k, v in state.sensor_states.items():
        assert k in new_state.sensor_states
        _check_state_expected(v, new_state.sensor_states[k], expected)
コード例 #5
0
def test_default_sensor_contorls(action, expected):
    scene_graph = habitat_sim.SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space = dict(
        move_up=habitat_sim.ActionSpec("move_up",
                                       habitat_sim.ActuationSpec(amount=0.25)),
        move_down=habitat_sim.ActionSpec(
            "move_down", habitat_sim.ActuationSpec(amount=0.25)),
        look_left=habitat_sim.ActionSpec(
            "look_left", habitat_sim.ActuationSpec(amount=10.0)),
        look_right=habitat_sim.ActionSpec(
            "look_right", habitat_sim.ActuationSpec(amount=10.0)),
        look_up=habitat_sim.ActionSpec("look_up",
                                       habitat_sim.ActuationSpec(amount=10.0)),
        look_down=habitat_sim.ActionSpec(
            "look_down", habitat_sim.ActuationSpec(amount=10.0)),
    )
    agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(),
                              agent_config)

    state = agent.state
    agent.act(action)
    new_state = agent.state

    _check_state_same(state, new_state)
    for k, v in state.sensor_states.items():
        assert k in new_state.sensor_states
        _check_state_expected(v, new_state.sensor_states[k], expected)
コード例 #6
0
ファイル: RandomWalk.py プロジェクト: glattm/SemanticSlam
def confSim(settings):
    sim_config = habitat_sim.SimulatorConfiguration()
    sim_config.scene_id = settings["scene"]

    #Configuration for Sensors
    sensor_spec = []
    color_sensor_spec = habitat_sim.SensorSpec()
    color_sensor_spec.uuid = "color_sensor"
    color_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR
    color_sensor_spec.resolution = [settings["height"], settings["width"]]
    color_sensor_spec.position = [0.0, settings["sensor_height"], 0.0]
    sensor_spec.append(color_sensor_spec)

    depth_sensor_spec = habitat_sim.SensorSpec()
    depth_sensor_spec.uuid = "depth_sensor"
    depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH
    depth_sensor_spec.resolution = [settings["height"], settings["width"]]
    depth_sensor_spec.position = [0.0, settings["sensor_height"], 0.0]
    sensor_spec.append(depth_sensor_spec)

    #Configure Agent
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.sensor_specifications = sensor_spec

    return habitat_sim.Configuration(sim_config,[agent_config])
コード例 #7
0
def setup_sim_and_sensors():
    left_rgb_sensor = create_sensor(orientation=[0.0, 0.0, 0.0],
                                    position=[eye_seperation / 2, 0, 0],
                                    sensor_uuid="left_rgb_sensor")
    right_rgb_sensor = create_sensor(position=[-eye_seperation / 2, 0, 0],
                                     sensor_uuid="right_rgb_sensor")
    depth_sensor = create_sensor(sensor_uuid="depth_sensor", camera_type="D")

    # agent configuration has the sensor_specifications objects as a list
    new_agent_config = habitat_sim.AgentConfiguration()
    new_agent_config.sensor_specifications = [
        left_rgb_sensor, right_rgb_sensor, depth_sensor
    ]
    # Configuration of the backend of the simulator includes default_agent_id set to 0
    backend_cfg = habitat_sim.SimulatorConfiguration()
    default_agent_id = backend_cfg.default_agent_id
    backend_cfg.foveation_distortion = True
    backend_cfg.scene.id = ("../multi_agent/data_files/skokloster-castle.glb")
    # backend_cfg.scene.id = (
    #    "/Users/rajan/My_Replica/replica_v1/room_2/mesh.ply"
    # )
    # Tie the backend of the simulator and a list of agent configurations
    new_Configuration = habitat_sim.Configuration(backend_cfg,
                                                  [new_agent_config])
    # When Simulator is called the agent configuration becomes Agents (only one agent in our case]
    new_sim = habitat_sim.Simulator(new_Configuration)
    return new_sim, default_agent_id
コード例 #8
0
    def create_sim_config(
            self, _sensor_suite: SensorSuite) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        sim_config.scene.id = self.config.SCENE
        sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID
        sim_config.allow_sliding = self.config.HABITAT_SIM_V0.ALLOW_SLIDING
        sim_config.enable_physics = self.config.HABITAT_SIM_V0.ENABLE_PHYSICS
        sim_config.physics_config_file = (
            self.config.HABITAT_SIM_V0.PHYSICS_CONFIG_FILE)
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(config_from=self._get_agent_config(),
                         config_to=agent_config)

        sensor_specifications = []
        for sensor in _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2])
            sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)
            sim_sensor_cfg.position = sensor.config.POSITION
            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type  # type: ignore
            sim_sensor_cfg.gpu2gpu_transfer = (
                self.config.HABITAT_SIM_V0.GPU_GPU)
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = registry.get_action_space_configuration(
            self.config.ACTION_SPACE_CONFIG)(self.config).get()

        return habitat_sim.Configuration(sim_config, [agent_config])
コード例 #9
0
def test_multiple_construct_destroy():
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()

    sim_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"

    for _ in range(3):
        with habitat_sim.Simulator(habitat_sim.Configuration(sim_cfg, [agent_config])):
            pass
コード例 #10
0
def setup_agent(dataset_path, random_pos, image_size, focal):

    random_up, random_left, random_back = random_pos
    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene.id = (dataset_path)

    hfov = 2 * np.arctan(0.5 * image_size[1] / focal) / np.pi * 180

    # First, let's create a stereo RGB agent
    left_rgb_sensor = habitat_sim.SensorSpec()
    # Give it the uuid of left_sensor, this will also be how we
    # index the observations to retrieve the rendering from this sensor
    left_rgb_sensor.uuid = "left_rgb_sensor"
    left_rgb_sensor.resolution = image_size
    # The left RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the left of the center of the agent
    left_rgb_sensor.position = random_up * habitat_sim.geo.UP + random_left * habitat_sim.geo.LEFT + random_back * habitat_sim.geo.BACK
    left_rgb_sensor.parameters["hfov"] = str(hfov)

    # Same deal with the right sensor
    right_rgb_sensor = habitat_sim.SensorSpec()
    right_rgb_sensor.uuid = "right_rgb_sensor"
    right_rgb_sensor.resolution = image_size
    # The right RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the right of the center of the agent
    right_rgb_sensor.position = left_rgb_sensor.position + 0.5 * habitat_sim.geo.RIGHT
    right_rgb_sensor.parameters["hfov"] = str(hfov)

    # Now let's do the exact same thing but for a depth camera stereo pair!
    left_depth_sensor = habitat_sim.SensorSpec()
    left_depth_sensor.uuid = "left_depth_sensor"
    left_depth_sensor.resolution = image_size
    left_depth_sensor.position = left_rgb_sensor.position
    # The only difference is that we set the sensor type to DEPTH
    left_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH
    left_depth_sensor.parameters["hfov"] = str(hfov)

    right_depth_sensor = habitat_sim.SensorSpec()
    right_depth_sensor.uuid = "right_depth_sensor"
    right_depth_sensor.resolution = image_size
    right_depth_sensor.position = right_rgb_sensor.position
    # The only difference is that we set the sensor type to DEPTH
    right_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH
    right_depth_sensor.parameters["hfov"] = str(hfov)

    # Now we simly set the agent's list of sensor specs to be the two specs for our two sensors
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.sensor_specifications = [
        left_rgb_sensor, right_rgb_sensor, left_depth_sensor,
        right_depth_sensor
    ]

    sim = habitat_sim.Simulator(
        habitat_sim.Configuration(backend_cfg, [agent_config]))

    return sim
コード例 #11
0
def test_no_move_fun():
    scene_graph = habitat_sim.SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space = dict(move_forward=habitat_sim.ActionSpec(
        "DNF", habitat_sim.ActuationSpec(amount=0.25)))
    agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(),
                              agent_config)

    with pytest.raises(AssertionError):
        agent.act("move_forward")
コード例 #12
0
def _test_keep_agent_tgt():
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()

    sim_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
    agents = []

    for _ in range(3):
        with habitat_sim.Simulator(
                habitat_sim.Configuration(sim_cfg, [agent_config])) as sim:
            agents.append(sim.get_agent(0))
コード例 #13
0
    def create_sim_config(
            self, _sensor_suite: SensorSuite) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        sim_config.scene.id = self.config.SCENE
        sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(config_from=self._get_agent_config(),
                         config_to=agent_config)

        sensor_specifications = []
        for sensor in _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2])
            sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)
            sim_sensor_cfg.position = sensor.config.POSITION
            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type  # type: ignore
            sensor_specifications.append(sim_sensor_cfg)

        # If there is no sensors specified create a dummy sensor so simulator
        # won't throw an error
        if not _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            sim_sensor_cfg.resolution = [1, 1]
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = {
            SimulatorActions.LEFT.value:
            habitat_sim.ActionSpec(
                "turn_left",
                habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
            ),
            SimulatorActions.RIGHT.value:
            habitat_sim.ActionSpec(
                "turn_right",
                habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
            ),
            SimulatorActions.FORWARD.value:
            habitat_sim.ActionSpec(
                "move_forward",
                habitat_sim.ActuationSpec(
                    amount=self.config.FORWARD_STEP_SIZE),
            ),
            SimulatorActions.STOP.value:
            habitat_sim.ActionSpec("stop"),
        }

        return habitat_sim.Configuration(sim_config, [agent_config])
コード例 #14
0
def test_multiple_construct_destroy():
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()

    sim_cfg.scene_id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
    hab_cfg = habitat_sim.Configuration(sim_cfg, [agent_config])
    mm = habitat_sim.metadata.MetadataMediator(sim_cfg)
    hab_cfg_mm = habitat_sim.Configuration(sim_cfg, [agent_config], mm)

    test_list = [hab_cfg, hab_cfg_mm]
    for ctor_arg in test_list:
        for _ in range(3):
            with habitat_sim.Simulator(ctor_arg):
                pass
コード例 #15
0
ファイル: HabitatSim.py プロジェクト: wx-b/oc-fewshot-public
    def _make_cfg(self, settings):
        sim_cfg = habitat_sim.SimulatorConfiguration()
        sim_cfg.gpu_device_id = int(self.params.get('gpu',
                                                    _DEFAULT_GPU_DEVICE))
        sim_cfg.scene.id = settings["scene"]

        # Setup Sensors
        # ------------------------------------------------------------
        # Note: all sensors must have the same resolution
        sensors = {
            "color_sensor": {
                "sensor_type": habitat_sim.SensorType.COLOR,
                "resolution": [settings["height"], settings["width"]],
                "position": [0.0, settings["sensor_height"], 0.0],
            },
            "semantic_sensor": {
                "sensor_type": habitat_sim.SensorType.SEMANTIC,
                "resolution": [settings["height"], settings["width"]],
                "position": [0.0, settings["sensor_height"], 0.0],
            }
        }

        if self.sim_settings['depth_sensor']:
            sensors["depth_sensor"] = {
                "sensor_type": habitat_sim.SensorType.DEPTH,
                "resolution": [settings["height"], settings["width"]],
                "position": [0.0, settings["sensor_height"], 0.0],
            }

        sensor_specs = []
        for sensor_uuid, sensor_params in sensors.items():
            if settings[sensor_uuid]:
                sensor_spec = habitat_sim.SensorSpec()
                sensor_spec.uuid = sensor_uuid
                sensor_spec.sensor_type = sensor_params["sensor_type"]
                sensor_spec.resolution = sensor_params["resolution"]
                sensor_spec.position = sensor_params["position"]
                sensor_spec.parameters["hfov"] = str(settings["hfov"])
                sensor_specs.append(sensor_spec)
        # ------------------------------------------------------------

        # Setup Agent
        # ------------------------------------------------------------
        agent_cfg = habitat_sim.AgentConfiguration()
        agent_cfg.sensor_specifications = sensor_specs
        agent_cfg.action_space = {}
        # ------------------------------------------------------------

        return habitat_sim.Configuration(sim_cfg, [agent_cfg])
コード例 #16
0
def test_keep_agent():
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()

    sim_cfg.scene_id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
    agents = []
    hab_cfg = habitat_sim.Configuration(sim_cfg, [agent_config])
    mm = habitat_sim.metadata.MetadataMediator(sim_cfg)
    hab_cfg_mm = habitat_sim.Configuration(sim_cfg, [agent_config], mm)

    test_list = [hab_cfg, hab_cfg_mm]
    for ctor_arg in test_list:
        for _ in range(3):
            with habitat_sim.Simulator(ctor_arg) as sim:
                agents.append(sim.get_agent(0))
コード例 #17
0
    def create_sim_config(
        self, _sensor_suite: SensorSuite
    ) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        # Check if Habitat-Sim is post Scene Config Update
        if not hasattr(sim_config, "scene_id"):
            raise RuntimeError(
                "Incompatible version of Habitat-Sim detected, please upgrade habitat_sim"
            )
        overwrite_config(
            config_from=self.habitat_config.HABITAT_SIM_V0,
            config_to=sim_config,
        )
        sim_config.scene_id = self.habitat_config.SCENE
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(
            config_from=self._get_agent_config(), config_to=agent_config
        )

        sensor_specifications = []
        for sensor in _sensor_suite.sensors.values():
            sim_sensor_cfg = habitat_sim.SensorSpec()
            overwrite_config(
                config_from=sensor.config, config_to=sim_sensor_cfg
            )
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2]
            )
            sim_sensor_cfg.parameters["hfov"] = str(sensor.config.HFOV)

            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            # We know that the Sensor has to be one of these Sensors
            sensor = cast(HabitatSimVizSensors, sensor)
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type
            sim_sensor_cfg.gpu2gpu_transfer = (
                self.habitat_config.HABITAT_SIM_V0.GPU_GPU
            )
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = registry.get_action_space_configuration(
            self.habitat_config.ACTION_SPACE_CONFIG
        )(self.habitat_config).get()

        return habitat_sim.Configuration(sim_config, [agent_config])
コード例 #18
0
def test_no_navmesh_smoke():
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()
    # No sensors as we are only testing to see if things work
    # with no navmesh and the navmesh isn't used for any exisitng sensors
    agent_config.sensor_specifications = []

    sim_cfg.scene_id = "data/test_assets/scenes/stage_floor1.glb"

    with habitat_sim.Simulator(
        habitat_sim.Configuration(sim_cfg, [agent_config])
    ) as sim:
        sim.initialize_agent(0)

        random.seed(0)
        for _ in range(50):
            obs = sim.step(random.choice(list(agent_config.action_space.keys())))
            # Can't collide with no navmesh
            assert not obs["collided"]
コード例 #19
0
def test_no_navmesh_smoke(sim):
    sim_cfg = habitat_sim.SimulatorConfiguration()
    agent_config = habitat_sim.AgentConfiguration()
    # No sensors as we are only testing to see if things work
    # with no navmesh and the navmesh isn't used for any exisitng sensors
    agent_config.sensor_specifications = []

    sim_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
    # Make it try to load a navmesh that doesn't exists
    sim_cfg.scene.filepaths["navmesh"] = "/tmp/dne.navmesh"

    sim.reconfigure(habitat_sim.Configuration(sim_cfg, [agent_config]))

    sim.initialize_agent(0)

    random.seed(0)
    for _ in range(50):
        obs = sim.step(random.choice(list(agent_config.action_space.keys())))
        # Can't collide with no navmesh
        assert not obs["collided"]
コード例 #20
0
    return new_sensor


if __name__ == "__main__":
    print("The system version is {}".format(sys.version))

    left_rgb_sensor: object = create_sensor([0.0, 0.0, 0.0],
                                            sensor_uuid="left_rgb_sensor")
    right_rgb_sensor: object = create_sensor([0.0, 0.0, 0.0],
                                             sensor_uuid="right_rgb_sensor")
    depth_sensor: object = create_test_sensor(sensor_uuid="depth_sensor",
                                              camera_type="D")

    # agent configuration has the sensor_specifications objects as a list
    my_agent_config = habitat_sim.AgentConfiguration()
    my_agent_config.sensor_specifications = [
        left_rgb_sensor, right_rgb_sensor, depth_sensor
    ]

    # Configuration of the backend of the simulator includes default_agent_id set to 0
    backend_cfg = habitat_sim.SimulatorConfiguration()
    default_agent_id = backend_cfg.default_agent_id
    backend_cfg.foveation_distortion = True
    #backend_cfg.foveation_distortion = False
    backend_cfg.scene.id = (
        "/Users/rajan/My_Replica/replica_v1/room_2/mesh.ply")

    # Tie the backend of the simulator and a list of agent configurations
    my_Configuration = habitat_sim.Configuration(backend_cfg,
                                                 [my_agent_config])
コード例 #21
0
def test_pyrobot_noisy_actions(noise_multiplier, robot, controller):
    np.random.seed(0)
    scene_graph = SceneGraph()
    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space = dict(
        noisy_move_backward=habitat_sim.ActionSpec(
            "pyrobot_noisy_move_backward",
            habitat_sim.PyRobotNoisyActuationSpec(
                amount=1.0,
                robot=robot,
                controller=controller,
                noise_multiplier=noise_multiplier,
            ),
        ),
        noisy_move_forward=habitat_sim.ActionSpec(
            "pyrobot_noisy_move_forward",
            habitat_sim.PyRobotNoisyActuationSpec(
                amount=1.0,
                robot=robot,
                controller=controller,
                noise_multiplier=noise_multiplier,
            ),
        ),
        noisy_turn_left=habitat_sim.ActionSpec(
            "pyrobot_noisy_turn_left",
            habitat_sim.PyRobotNoisyActuationSpec(
                amount=90.0,
                robot=robot,
                controller=controller,
                noise_multiplier=noise_multiplier,
            ),
        ),
        noisy_turn_right=habitat_sim.ActionSpec(
            "pyrobot_noisy_turn_right",
            habitat_sim.PyRobotNoisyActuationSpec(
                amount=90.0,
                robot=robot,
                controller=controller,
                noise_multiplier=noise_multiplier,
            ),
        ),
        move_backward=habitat_sim.ActionSpec(
            "move_backward", habitat_sim.ActuationSpec(amount=1.0)
        ),
        move_forward=habitat_sim.ActionSpec(
            "move_forward", habitat_sim.ActuationSpec(amount=1.0)
        ),
        turn_left=habitat_sim.ActionSpec(
            "turn_left", habitat_sim.ActuationSpec(amount=90.0)
        ),
        turn_right=habitat_sim.ActionSpec(
            "turn_right", habitat_sim.ActuationSpec(amount=90.0)
        ),
    )
    agent = habitat_sim.Agent(scene_graph.get_root_node().create_child(), agent_config)

    for base_action in {act.replace("noisy_", "") for act in agent_config.action_space}:
        state = agent.state
        state.rotation = np.quaternion(1, 0, 0, 0)
        agent.state = state
        agent.act(base_action)
        base_state = agent.state

        delta_translations = []
        delta_rotations = []
        for _ in range(300):
            agent.state = state
            agent.act(f"noisy_{base_action}")
            noisy_state = agent.state

            delta_translations.append(_delta_translation(base_state, noisy_state))
            delta_rotations.append(_delta_rotation(base_state, noisy_state))

        delta_translations_arr = np.stack(delta_translations)
        delta_rotations_arr = np.stack(delta_rotations)
        if "move" in base_action:
            noise_model = pyrobot_noise_models[robot][controller].linear_motion
        else:
            noise_model = pyrobot_noise_models[robot][controller].rotational_motion
        EPS = 5e-2
        assert (
            np.linalg.norm(
                noise_model.linear.mean * noise_multiplier
                - np.abs(delta_translations_arr.mean(0))
            )
            < EPS
        )
        assert (
            np.linalg.norm(
                noise_model.rotation.mean * noise_multiplier
                - np.abs(delta_rotations_arr.mean(0))
            )
            < EPS
        )

        assert (
            np.linalg.norm(
                noise_model.linear.cov * noise_multiplier
                - np.diag(delta_translations_arr.std(0) ** 2)
            )
            < EPS
        )
        assert (
            np.linalg.norm(
                noise_model.rotation.cov * noise_multiplier
                - (delta_rotations_arr.std(0) ** 2)
            )
            < EPS
        )
コード例 #22
0
def setupAgentwithSensors(display=True):
    global cv2
    # Only import cv2 if we are doing to display
    if display:
        import cv2 as _cv2

        cv2 = _cv2

        cv2.namedWindow("stereo_pair")
        # cv2.namedWindow("depth_pair")
    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene.id = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    )  # data/scene_datasets/habitat-test-scenes/skokloster-castle.glb

    # cam_baseline = 0.2
    # cam_focalLength = 450
    # height = 640
    # width = 960
    # constructPyramid()

    hfov = 2 * math.degrees(math.atan(width / (2 * cam_focalLength)))
    vfov = 2 * math.degrees(math.atan(height / (2 * cam_focalLength)))
    # First, let's create a stereo RGB agent
    left_rgb_sensor = habitat_sim.SensorSpec()
    # Give it the uuid of left_sensor, this will also be how we
    # index the observations to retrieve the rendering from this sensor
    left_rgb_sensor.uuid = "left_sensor"
    left_rgb_sensor.resolution = [height, width]
    left_rgb_sensor.parameters["hfov"] = str(hfov)
    left_rgb_sensor.parameters["vfov"] = str(vfov)
    # The left RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the left of the center of the agent
    left_rgb_sensor.position = 1.5 * habitat_sim.geo.UP + (
        cam_baseline / 2) * habitat_sim.geo.LEFT

    # Same deal with the right sensor
    right_rgb_sensor = habitat_sim.SensorSpec()
    right_rgb_sensor.uuid = "right_sensor"
    right_rgb_sensor.resolution = [height, width]
    right_rgb_sensor.parameters["hfov"] = str(hfov)
    right_rgb_sensor.parameters["vfov"] = str(vfov)
    # The right RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the right of the center of the agent
    right_rgb_sensor.position = 1.5 * habitat_sim.geo.UP + (
        cam_baseline / 2) * habitat_sim.geo.RIGHT

    # Now let's do the exact same thing but for a depth camera stereo pair!
    left_depth_sensor = habitat_sim.SensorSpec()
    left_depth_sensor.uuid = "left_sensor_depth"
    left_depth_sensor.resolution = [height, width]
    left_depth_sensor.parameters["hfov"] = str(hfov)
    left_depth_sensor.parameters["vfov"] = str(vfov)
    left_depth_sensor.position = 1.5 * habitat_sim.geo.UP + (
        cam_baseline / 2) * habitat_sim.geo.LEFT
    # The only difference is that we set the sensor type to DEPTH
    left_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH

    right_depth_sensor = habitat_sim.SensorSpec()
    right_depth_sensor.uuid = "right_sensor_depth"
    right_depth_sensor.resolution = [height, width]
    right_depth_sensor.parameters["hfov"] = str(hfov)
    right_depth_sensor.parameters["vfov"] = str(vfov)
    right_depth_sensor.position = 1.5 * habitat_sim.geo.UP + (
        cam_baseline / 2) * habitat_sim.geo.RIGHT
    # The only difference is that we set the sensor type to DEPTH
    right_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH

    agent_config = habitat_sim.AgentConfiguration(
    )  # set configuration for agent (id = 0)
    agent_config.sensor_specifications = [
        left_rgb_sensor, right_rgb_sensor, left_depth_sensor,
        right_depth_sensor
    ]

    sim = habitat_sim.simulator.Simulator(
        habitat_sim.Configuration(backend_cfg, [agent_config]))

    # set agent position
    inital_state_agent = place_agent(sim, agent_config)
    # print("inital agent state: {0} ".format(inital_state_agent))
    print("inital agent's sensor state: {0} ".format(
        inital_state_agent.sensor_states))
    # set action
    _render(sim, isdepth=True)
コード例 #23
0
def main():
    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene_id = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
    backend_cfg.enable_physics = True

    # Leaving the context with the background thread can improve
    # performance, but makes it so that sim.get_sensor_observations_async_finish()
    # no longer returns the context back to the main thread. If you set this to
    # true and get context assert failures, call sim.renderer.acquire_gl_context()
    # to move the OpenGL context back to the main thread.
    # The default is False
    backend_cfg.leave_context_with_background_renderer = False

    agent_cfg = habitat_sim.AgentConfiguration(
        sensor_specifications=[habitat_sim.CameraSensorSpec()])
    cfg = habitat_sim.Configuration(backend_cfg, [agent_cfg])

    # Number of physics steps per render
    n_physics_steps_per_render = 4
    # Render at 30 hz
    physics_step_time = 1.0 / (30 * n_physics_steps_per_render)

    sim = habitat_sim.Simulator(cfg)

    for i in range(n_physics_steps_per_render):
        # Always call sim.step_physics when not using async rendering
        if i == 0:
            sim.start_async_render_and_step_physics(physics_step_time)
        else:
            sim.step_physics(physics_step_time)

        # Call sim.renderer.acquire_gl_context() if you need to create or destroy an object.
        # Note that this will block until rendering is done.

    obs = sim.get_sensor_observations_async_finish()
    # To change back to normal rendering, use
    #     obs = sim.get_sensor_observations()

    # You can add/remove objects after the call to get_sensor_observations_async_finish()
    # if backend_cfg.leave_context_with_background_renderer was left as False, it that as
    # true, you'd need to call
    #     sim.renderer.acquire_gl_context()
    # Calling acquire_gl_context() is a noop if the main thread already has the OpenGL context

    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene_id = "data/scene_datasets/habitat-test-scenes/apartment_1.glb"
    backend_cfg.enable_physics = True
    new_cfg = habitat_sim.Configuration(backend_cfg, [agent_cfg])

    # To create a new instance of the simulator to swap scene or similar,
    # do not delete the simulator instance as this may leak GPU memory
    # Instead, call sim.close(destroy=False) as this will close/delete
    # everything except the OpenGL context and the background render
    # thread.
    sim.close(destroy=False)
    # Then use reconfigure.
    sim.reconfigure(new_cfg)
    # If
    #     sim.close()
    #     sim = habitat_sim.Simulator(new_cfg)
    # was used, that could end up leaking memory

    sim.start_async_render_and_step_physics(physics_step_time)
    obs = sim.get_sensor_observations_async_finish()  # noqa: F841

    # Call close with destroy=True here because this example is over :)
    sim.close(destroy=True)
コード例 #24
0
ファイル: agent_oreo.py プロジェクト: srkiyengar/oreohabitat
    def __init__(self,
                 scene,
                 result_folder,
                 depth_camera=False,
                 loc_depth_cam='c',
                 foveation=False):

        self.agent_config = habitat_sim.AgentConfiguration()
        # Left sensor - # oreo perspective - staring at -ive z
        self.left_sensor = habitat_sim.SensorSpec()
        self.left_sensor.sensor_type = habitat_sim.SensorType.COLOR
        self.left_sensor.resolution = sensor_resolution
        self.left_sensor.uuid = "left_rgb_sensor"
        self.left_sensor.position = [-eye_separation / 2, 0.0, 0.0]
        self.left_sensor.orientation = np.array([0.0, 0.0, 0.0], dtype=float)
        self.left_sensor_hfov = math.radians(
            int(self.left_sensor.parameters['hfov']))
        self.focal_distance = sensor_resolution[0] / (
            2 * math.tan(self.left_sensor_hfov / 2))

        # Right sensor - # oreo perspective - staring at -ive z
        self.right_sensor = habitat_sim.SensorSpec()
        self.right_sensor.sensor_type = habitat_sim.SensorType.COLOR
        self.right_sensor.resolution = sensor_resolution
        self.right_sensor.uuid = "right_rgb_sensor"
        self.right_sensor.position = [eye_separation / 2, 0.0, 0.0]
        self.right_sensor.orientation = np.array([0.0, 0.0, 0.0], dtype=float)
        self.right_sensor_hfov = math.radians(
            int(self.right_sensor.parameters['hfov']))
        if self.right_sensor_hfov != self.left_sensor_hfov:
            print("Warning - Right and Left Sensor widths are not identical!")

        # Depth camera - At the origin of the reference coordinate axes (habitat frame)
        if depth_camera:
            self.num_sensors = 3
            self.depth_sensor = habitat_sim.SensorSpec()
            self.depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH
            self.depth_sensor.resolution = sensor_resolution
            self.depth_sensor.uuid = "depth_sensor"
            if loc_depth_cam == 'l':
                self.depth_sensor.position = self.left_sensor.position
            elif loc_depth_cam == 'r':
                self.depth_sensor.position = self.right_sensor.position
            else:
                self.depth_sensor.position = [0.0, 0.0, 0.0]

            self.depth_sensor.orientation = np.array([0.0, 0.0, 0.0],
                                                     dtype=float)
            self.agent_config.sensor_specifications = [
                self.right_sensor, self.left_sensor, self.depth_sensor
            ]
        else:
            self.num_sensors = 2
            self.agent_config.sensor_specifications = [
                self.right_sensor, self.left_sensor
            ]

        self.backend_cfg = habitat_sim.SimulatorConfiguration()

        if foveation:
            self.backend_cfg.foveation_distortion = True

        self.backend_cfg.scene.id = scene  #This works in older habitat versions
        # self.backend_cfg.scene_id = scene #newer versions like the colab install

        self.destination = os.path.realpath(result_folder)
        if not os.path.isdir(self.destination):
            os.makedirs(self.destination)

        # Tie the backend of the simulator and the list of agent configurations (only one)
        self.sim_configuration = habitat_sim.Configuration(
            self.backend_cfg, [self.agent_config])
        self.sim = habitat_sim.Simulator(self.sim_configuration)
        self.agent_id = self.backend_cfg.default_agent_id
        self.agent = self.sim.get_agent(self.agent_id)
        self.initial_agent_state = self.agent.get_state()
        print(
            f"Agent rotation {self.initial_agent_state.rotation} Agent position {self.initial_agent_state.position}"
        )
        # agent_head_neck_rotation (not a part of habitat api to keep track of head/neck rotation wrt to the agent.
        # HabitatAI api agent rotation is not rotation of agent wrt to WCS followed by rotation of head/neck
        self.agent_head_neck_rotation = np.quaternion(1, 0, 0, 0)

        self.counter = 0  # counter for saccade file numbering
        self.filename = self.create_unique_filename(scene)
        self.my_images = self.get_sensor_observations()
        self.current_saved_image = "empty"
        return
コード例 #25
0
def main(display=True):
    global cv2
    # Only import cv2 if we are doing to display
    if display:
        import cv2 as _cv2

        cv2 = _cv2

        cv2.namedWindow("stereo_pair")

    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene.id = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb"
    )

    # First, let's create a stereo RGB agent
    left_rgb_sensor = habitat_sim.SensorSpec()
    # Give it the uuid of left_sensor, this will also be how we
    # index the observations to retrieve the rendering from this sensor
    left_rgb_sensor.uuid = "left_sensor"
    left_rgb_sensor.resolution = [512, 512]
    # The left RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the left of the center of the agent
    left_rgb_sensor.position = 1.5 * habitat_sim.geo.UP + 0.25 * habitat_sim.geo.LEFT

    # Same deal with the right sensor
    right_rgb_sensor = habitat_sim.SensorSpec()
    right_rgb_sensor.uuid = "right_sensor"
    right_rgb_sensor.resolution = [512, 512]
    # The right RGB sensor will be 1.5 meters off the ground
    # and 0.25 meters to the right of the center of the agent
    right_rgb_sensor.position = 1.5 * habitat_sim.geo.UP + 0.25 * habitat_sim.geo.RIGHT

    agent_config = habitat_sim.AgentConfiguration()
    # Now we simly set the agent's list of sensor specs to be the two specs for our two sensors
    agent_config.sensor_specifications = [left_rgb_sensor, right_rgb_sensor]

    sim = habitat_sim.Simulator(habitat_sim.Configuration(backend_cfg, [agent_config]))

    _render(sim, display)
    sim.close()

    # Now let's do the exact same thing but for a depth camera stereo pair!
    left_depth_sensor = habitat_sim.SensorSpec()
    left_depth_sensor.uuid = "left_sensor"
    left_depth_sensor.resolution = [512, 512]
    left_depth_sensor.position = 1.5 * habitat_sim.geo.UP + 0.25 * habitat_sim.geo.LEFT
    # The only difference is that we set the sensor type to DEPTH
    left_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH

    right_depth_sensor = habitat_sim.SensorSpec()
    right_depth_sensor.uuid = "right_sensor"
    right_depth_sensor.resolution = [512, 512]
    right_depth_sensor.position = (
        1.5 * habitat_sim.geo.UP + 0.25 * habitat_sim.geo.RIGHT
    )
    # The only difference is that we set the sensor type to DEPTH
    right_depth_sensor.sensor_type = habitat_sim.SensorType.DEPTH

    agent_config = habitat_sim.AgentConfiguration()
    agent_config.sensor_specifications = [left_depth_sensor, right_depth_sensor]

    sim = habitat_sim.Simulator(habitat_sim.Configuration(backend_cfg, [agent_config]))

    _render(sim, display, depth=True)
コード例 #26
0
def main():
    # We will define an action that moves the agent and turns it by some amount

    # First, define a class to keep the parameters of the control
    # @attr.s is just syntatic sugar for creating these data-classes
    @attr.s(auto_attribs=True, slots=True)
    class MoveAndSpinSpec:
        forward_amount: float
        spin_amount: float

    print(MoveAndSpinSpec(1.0, 45.0))

    # Register the control functor
    # This action will be an action that effects the body, so body_action=True
    @habitat_sim.register_move_fn(body_action=True)
    class MoveForwardAndSpin(habitat_sim.SceneNodeControl):
        def __call__(self, scene_node: habitat_sim.SceneNode,
                     actuation_spec: MoveAndSpinSpec):
            forward_ax = (np.array(
                scene_node.absolute_transformation().rotation_scaling())
                          @ habitat_sim.geo.FRONT)
            scene_node.translate_local(forward_ax *
                                       actuation_spec.forward_amount)

            # Rotate about the +y (up) axis
            rotation_ax = habitat_sim.geo.UP
            scene_node.rotate_local(mn.Deg(actuation_spec.spin_amount),
                                    rotation_ax)
            # Calling normalize is needed after rotating to deal with machine precision errors
            scene_node.rotation = scene_node.rotation.normalized()

    # We can also register the function with a custom name
    habitat_sim.register_move_fn(MoveForwardAndSpin,
                                 name="my_custom_name",
                                 body_action=True)

    # We can also re-register this function such that it effects just the sensors
    habitat_sim.register_move_fn(MoveForwardAndSpin,
                                 name="move_forward_and_spin_sensors",
                                 body_action=False)

    # Now we need to add this action to the agent's action space in the configuration!
    agent_config = habitat_sim.AgentConfiguration()
    _pprint(agent_config.action_space)

    # We can add the control function in a bunch of ways
    # Note that the name of the action does not need to match the name the control function
    # was registered under.

    # The habitat_sim.ActionSpec defines an action.  The first arguement is the regsitered name
    # of the control spec, the second is the parameter spec
    agent_config.action_space["fwd_and_spin"] = habitat_sim.ActionSpec(
        "move_forward_and_spin", MoveAndSpinSpec(1.0, 45.0))

    # Add the sensor version
    agent_config.action_space["fwd_and_spin_sensors"] = habitat_sim.ActionSpec(
        "move_forward_and_spin_sensors", MoveAndSpinSpec(1.0, 45.0))

    # Add the same control with different parameters
    agent_config.action_space["fwd_and_spin_double"] = habitat_sim.ActionSpec(
        "move_forward_and_spin", MoveAndSpinSpec(2.0, 90.0))

    # Use the custom name with an integer name for the action
    agent_config.action_space[100] = habitat_sim.ActionSpec(
        "my_custom_name", MoveAndSpinSpec(0.1, 1.0))

    _pprint(agent_config.action_space)

    # Spin up the simulator
    backend_cfg = habitat_sim.SimulatorConfiguration()
    backend_cfg.scene.id = "data/scene_datasets/habitat-test-scenes/van-gogh-room.glb"
    sim = habitat_sim.Simulator(
        habitat_sim.Configuration(backend_cfg, [agent_config]))
    print(sim.get_agent(0).state)

    # Take the new actions!
    sim.step("fwd_and_spin")
    print(sim.get_agent(0).state)

    # Take the new actions!
    sim.step("fwd_and_spin_sensors")
    print(sim.get_agent(0).state)

    sim.step("fwd_and_spin_double")
    print(sim.get_agent(0).state)

    sim.step(100)
    print(sim.get_agent(0).state)

    sim.close()
    del sim

    # Let's define a strafe action!
    @attr.s(auto_attribs=True, slots=True)
    class StrafeActuationSpec:
        forward_amount: float
        # Classic strafing is to move perpendicular (90 deg) to the forward direction
        strafe_angle: float = 90.0

    def _strafe_impl(scene_node: habitat_sim.SceneNode, forward_amount: float,
                     strafe_angle: float):
        forward_ax = (
            np.array(scene_node.absolute_transformation().rotation_scaling())
            @ habitat_sim.geo.FRONT)
        rotation = habitat_sim.utils.quat_from_angle_axis(
            np.deg2rad(strafe_angle), habitat_sim.geo.UP)
        move_ax = habitat_sim.utils.quat_rotate_vector(rotation, forward_ax)

        scene_node.translate_local(move_ax * forward_amount)

    @habitat_sim.register_move_fn(body_action=True)
    class StrafeLeft(habitat_sim.SceneNodeControl):
        def __call__(self, scene_node: habitat_sim.SceneNode,
                     actuation_spec: StrafeActuationSpec):
            _strafe_impl(scene_node, actuation_spec.forward_amount,
                         actuation_spec.strafe_angle)

    @habitat_sim.register_move_fn(body_action=True)
    class StrafeRight(habitat_sim.SceneNodeControl):
        def __call__(self, scene_node: habitat_sim.SceneNode,
                     actuation_spec: StrafeActuationSpec):
            _strafe_impl(scene_node, actuation_spec.forward_amount,
                         -actuation_spec.strafe_angle)

    agent_config = habitat_sim.AgentConfiguration()
    agent_config.action_space["strafe_left"] = habitat_sim.ActionSpec(
        "strafe_left", StrafeActuationSpec(0.25))
    agent_config.action_space["strafe_right"] = habitat_sim.ActionSpec(
        "strafe_right", StrafeActuationSpec(0.25))

    sim = habitat_sim.Simulator(
        habitat_sim.Configuration(backend_cfg, [agent_config]))
    print(sim.get_agent(0).state)

    sim.step("strafe_left")
    print(sim.get_agent(0).state)

    sim.step("strafe_right")
    print(sim.get_agent(0).state)
コード例 #27
0
    def create_sim_config(
            self, _sensor_suite: SensorSuite) -> habitat_sim.Configuration:
        sim_config = habitat_sim.SimulatorConfiguration()
        # Check if Habitat-Sim is post Scene Config Update
        if not hasattr(sim_config, "scene_id"):
            raise RuntimeError(
                "Incompatible version of Habitat-Sim detected, please upgrade habitat_sim"
            )
        overwrite_config(
            config_from=self.habitat_config.HABITAT_SIM_V0,
            config_to=sim_config,
            # Ignore key as it gets propogated to sensor below
            ignore_keys={"gpu_gpu"},
        )
        sim_config.scene_id = self.habitat_config.SCENE
        agent_config = habitat_sim.AgentConfiguration()
        overwrite_config(
            config_from=self._get_agent_config(),
            config_to=agent_config,
            # These keys are only used by Hab-Lab
            ignore_keys={
                "is_set_start_state",
                # This is the Sensor Config. Unpacked below
                "sensors",
                "start_position",
                "start_rotation",
            },
        )

        sensor_specifications = []
        VisualSensorTypeSet = {
            habitat_sim.SensorType.COLOR,
            habitat_sim.SensorType.DEPTH,
            habitat_sim.SensorType.SEMANTIC,
        }
        CameraSensorSubTypeSet = {
            habitat_sim.SensorSubType.PINHOLE,
            habitat_sim.SensorSubType.ORTHOGRAPHIC,
        }
        for sensor in _sensor_suite.sensors.values():

            # Check if type VisualSensorSpec, we know that Sensor is one of HabitatSimRGBSensor, HabitatSimDepthSensor, HabitatSimSemanticSensor
            if (getattr(sensor, "sim_sensor_type", [])
                    not in VisualSensorTypeSet):
                raise ValueError(
                    f"""{getattr(sensor, "sim_sensor_type", [])} is an illegal sensorType that is not implemented yet"""
                )
            # Check if type CameraSensorSpec
            if (getattr(sensor, "sim_sensor_subtype", [])
                    not in CameraSensorSubTypeSet):
                raise ValueError(
                    f"""{getattr(sensor, "sim_sensor_subtype", [])} is an illegal sensorSubType for a VisualSensor"""
                )

            # TODO: Implement checks for other types of SensorSpecs

            sim_sensor_cfg = habitat_sim.CameraSensorSpec()
            # TODO Handle configs for custom VisualSensors that might need
            # their own ignore_keys. Maybe with special key / checking
            # SensorType
            overwrite_config(
                config_from=sensor.config,
                config_to=sim_sensor_cfg,
                # These keys are only used by Hab-Lab
                # or translated into the sensor config manually
                ignore_keys={
                    "height",
                    "hfov",
                    "max_depth",
                    "min_depth",
                    "normalize_depth",
                    "type",
                    "width",
                },
            )
            sim_sensor_cfg.uuid = sensor.uuid
            sim_sensor_cfg.resolution = list(
                sensor.observation_space.shape[:2])

            # TODO(maksymets): Add configure method to Sensor API to avoid
            # accessing child attributes through parent interface
            # We know that the Sensor has to be one of these Sensors
            sensor = cast(HabitatSimVizSensors, sensor)
            sim_sensor_cfg.sensor_type = sensor.sim_sensor_type
            sim_sensor_cfg.sensor_subtype = sensor.sim_sensor_subtype
            sim_sensor_cfg.gpu2gpu_transfer = (
                self.habitat_config.HABITAT_SIM_V0.GPU_GPU)
            sensor_specifications.append(sim_sensor_cfg)

        agent_config.sensor_specifications = sensor_specifications
        agent_config.action_space = registry.get_action_space_configuration(
            self.habitat_config.ACTION_SPACE_CONFIG)(
                self.habitat_config).get()

        return habitat_sim.Configuration(sim_config, [agent_config])