Пример #1
0
    def set_state(self, state: AgentState, reset_sensors: bool = True):
        r"""Sets the agents state

        :param state: The state to set the agent to
        :param reset_sensors: Whether or not to reset the sensors to their
            default intrinsic/extrinsic parameters before setting their
            extrinsic state
        """
        habitat_sim.errors.assert_obj_valid(self.body)

        if isinstance(state.rotation, list):
            state.rotation = quat_from_coeffs(state.rotation)

        self.body.object.reset_transformation()

        self.body.object.translate(state.position)
        self.body.object.rotation = quat_to_magnum(state.rotation)

        if reset_sensors:
            for _, v in self._sensors.items():
                v.set_transformation_from_spec()

        for k, v in state.sensor_states.items():
            assert k in self._sensors
            if isinstance(v.rotation, list):
                v.rotation = quat_from_coeffs(v.rotation)

            s = self._sensors[k]

            s.node.reset_transformation()
            s.node.translate(
                quat_rotate_vector(state.rotation.inverse(),
                                   v.position - state.position))
            s.node.rotation = quat_to_magnum(state.rotation.inverse() *
                                             v.rotation)
Пример #2
0
    def set_state(
        self,
        state: AgentState,
        reset_sensors: bool = True,
        infer_sensor_states: bool = True,
        is_initial: bool = False,
    ) -> None:
        r"""Sets the agents state

        :param state: The state to set the agent to
        :param reset_sensors: Whether or not to reset the sensors to their
            default intrinsic/extrinsic parameters before setting their extrinsic state.
        :param infer_sensor_states: Whether or not to infer the location of sensors based on
            the new location of the agent base state.
        :param is_initial: Whether this state is the initial state of the
            agent in the scene. Used for resetting the agent at a later time

        Setting ``reset_sensors`` to :py:`False`
        allows the agent base state to be moved and the new
        sensor locations inferred without changing the configuration of the sensors
        with respect to the base state of the agent.

        Setting ``infer_sensor_states``
        to :py:`False` is useful if you'd like to directly control
        the state of a sensor instead of moving the agent.

        """
        attr.validate(state)
        habitat_sim.errors.assert_obj_valid(self.body)

        if isinstance(state.rotation, (list, np.ndarray)):
            state.rotation = quat_from_coeffs(state.rotation)

        self.body.object.reset_transformation()

        self.body.object.translate(state.position)
        self.body.object.rotation = quat_to_magnum(state.rotation)

        if reset_sensors:
            for _, v in self._sensors.items():
                v.set_transformation_from_spec()

        if not infer_sensor_states:
            for k, v in state.sensor_states.items():
                assert k in self._sensors
                if isinstance(v.rotation, list):
                    v.rotation = quat_from_coeffs(v.rotation)

                s = self._sensors[k]

                s.node.reset_transformation()
                s.node.translate(
                    quat_rotate_vector(state.rotation.inverse(),
                                       v.position - state.position))
                s.node.rotation = quat_to_magnum(state.rotation.inverse() *
                                                 v.rotation)

        if is_initial:
            self.initial_state = state
Пример #3
0
    def reconfigure(self, config: Config) -> None:
        self.config = config
        is_same_sound = config.AGENT_0.SOUND_ID == self._current_sound
        if not is_same_sound:
            self._current_sound = self.config.AGENT_0.SOUND_ID

        is_same_scene = config.SCENE == self._current_scene
        if not is_same_scene:
            self._current_scene = config.SCENE
            logging.debug('Current scene: {} and sound: {}'.format(self.current_scene_name, self._current_sound))

            if not self.config.USE_RENDERED_OBSERVATIONS:
                self._sim.close()
                del self._sim
                self.sim_config = self.create_sim_config(self._sensor_suite)
                self._sim = habitat_sim.Simulator(self.sim_config)
                self._update_agents_state()
                self._frame_cache = dict()
            else:
                with open(self.current_scene_observation_file, 'rb') as fo:
                    self._frame_cache = pickle.load(fo)
            logging.debug('Loaded scene {}'.format(self.current_scene_name))

            self.points, self.graph = load_metadata(self.metadata_dir)
            for node in self.graph.nodes():
                self._position_to_index_mapping[self.position_encoding(self.graph.nodes()[node]['point'])] = node

        if not is_same_scene or not is_same_sound:
            self._audiogoal_cache = dict()
            self._spectrogram_cache = dict()

        self._episode_step_count = 0

        # set agent positions
        self._receiver_position_index = self._position_to_index(self.config.AGENT_0.START_POSITION)
        self._source_position_index = self._position_to_index(self.config.AGENT_0.GOAL_POSITION)
        # the agent rotates about +Y starting from -Z counterclockwise,
        # so rotation angle 90 means the agent rotate about +Y 90 degrees
        self._rotation_angle = int(np.around(np.rad2deg(quat_to_angle_axis(quat_from_coeffs(
            self.config.AGENT_0.START_ROTATION))[0]))) % 360
        if not self.config.USE_RENDERED_OBSERVATIONS:
            self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
                                 self.config.AGENT_0.START_ROTATION)
        else:
            self._sim.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
                                      quat_from_coeffs(self.config.AGENT_0.START_ROTATION))

        logging.debug("Initial source, agent at: {}, {}, orientation: {}".
                      format(self._source_position_index, self._receiver_position_index, self.get_orientation()))
    def reconfigure(self, config) -> None:
        dataset = config.SCENE.split('/')[2]
        scene_name = config.SCENE.split('/')[3]

        is_same_scene = config.SCENE == self._current_scene
        self.config = config
        self.sim_config = self.create_sim_config(self._sensor_suite)
        if not is_same_scene:
            self._current_scene = config.SCENE
            self._sim.close()
            del self._sim
            # HabitatSim is a wrapper class of habitat_sim, which is the backend renderer
            self._sim = habitat_sim.Simulator(self.sim_config)
            logging.info('Loaded scene {}'.format(scene_name))

        if not is_same_scene or self.points is None or self.graph is None:
            # can happen during initialization or reconfiguration
            metadata_dir = os.path.join('data/metadata', dataset, scene_name)
            self.points, self.graph = load_metadata(metadata_dir)

        # after env calls reconfigure to update the config with current episode,
        # simulation needs to update the agent position, rotation in accordance with the new config file
        self._update_agents_state()

        # set agent positions
        self._receiver_position_index = self._position_to_index(self.config.AGENT_0.START_POSITION)
        self._source_position_index = self._position_to_index(self.config.AGENT_0.GOAL_POSITION)
        self._rotation = int(np.around(np.rad2deg(quat_to_angle_axis(quat_from_coeffs(
                             self.config.AGENT_0.START_ROTATION))[0]))) % 360
        self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
                             self.config.AGENT_0.START_ROTATION)

        logging.debug("Initial source, agent at: {}, {}, orientation: {}".
                      format(self._source_position_index, self._receiver_position_index,
                             self._rotation))
Пример #5
0
def _render_and_load_gt(sim, scene, sensor_type, gpu2gpu):
    gt_data_pose_file = osp.abspath(
        osp.join(
            osp.dirname(__file__),
            "gt_data",
            "{}-state.json".format(osp.basename(osp.splitext(scene)[0])),
        ))
    with open(gt_data_pose_file, "r") as f:
        render_state = json.load(f)
        state = habitat_sim.AgentState()
        state.position = render_state["pos"]
        state.rotation = quat_from_coeffs(render_state["rot"])

    sim.initialize_agent(0, state)
    obs = sim.step("move_forward")

    assert sensor_type in obs, f"{sensor_type} not in obs"

    gt_obs_file = osp.abspath(
        osp.join(
            osp.dirname(__file__),
            "gt_data",
            "{}-{}.npy".format(osp.basename(osp.splitext(scene)[0]),
                               sensor_type),
        ))
    gt = np.load(gt_obs_file)

    if gpu2gpu:
        torch = pytest.importorskip("torch")

        for k, v in obs.items():
            if torch.is_tensor(v):
                obs[k] = v.cpu().numpy()

    return obs, gt
Пример #6
0
def test_sensors(scene, has_sem, sensor_type, gpu2gpu, sim, make_cfg_settings):
    if not osp.exists(scene):
        pytest.skip("Skipping {}".format(scene))

    if not habitat_sim.cuda_enabled and gpu2gpu:
        pytest.skip("Skipping GPU->GPU test")

    make_cfg_settings = {k: v for k, v in make_cfg_settings.items()}
    make_cfg_settings["semantic_sensor"] = has_sem
    make_cfg_settings["scene"] = scene

    cfg = make_cfg(make_cfg_settings)
    for sensor_spec in cfg.agents[0].sensor_specifications:
        sensor_spec.gpu2gpu_transfer = gpu2gpu

    sim.reconfigure(cfg)
    with open(
        osp.abspath(
            osp.join(
                osp.dirname(__file__),
                "gt_data",
                "{}-state.json".format(osp.basename(osp.splitext(scene)[0])),
            )
        ),
        "r",
    ) as f:
        render_state = json.load(f)
        state = habitat_sim.AgentState()
        state.position = render_state["pos"]
        state.rotation = quat_from_coeffs(render_state["rot"])

    sim.initialize_agent(0, state)
    obs = sim.step("move_forward")

    assert sensor_type in obs, f"{sensor_type} not in obs"

    gt = np.load(
        osp.abspath(
            osp.join(
                osp.dirname(__file__),
                "gt_data",
                "{}-{}.npy".format(osp.basename(osp.splitext(scene)[0]), sensor_type),
            )
        )
    )
    if gpu2gpu:
        import torch

        for k, v in obs.items():
            if torch.is_tensor(v):
                obs[k] = v.cpu().numpy()

    # Different GPUs and different driver version will produce slightly different images
    assert np.linalg.norm(
        obs[sensor_type].astype(np.float) - gt.astype(np.float)
    ) < 1.5e-2 * np.linalg.norm(gt.astype(np.float)), f"Incorrect {sensor_type} output"
Пример #7
0
# @markdown ---
# @markdown Configure Parameters:

obj_attr_mgr = sim.get_object_template_manager()
remove_all_objects(sim)

seed = 2  # @param{type:"integer"}
random.seed(seed)
sim.seed(seed)
np.random.seed(seed)

# setup agent state manually to face the bar
agent_state = sim.agents[0].state
agent_state.position = np.array([-1.97496, 0.072447, -2.0894])
agent_state.rotation = ut.quat_from_coeffs([0, -1, 0, 0])
sim.agents[0].set_state(agent_state)

# load the target objects
cheezit_handle = obj_attr_mgr.get_template_handles("cheezit")[0]
# create range from center and half-extent
target_zone = mn.Range3D.from_center(
    mn.Vector3(-2.07496, 1.07245, -0.2894), mn.Vector3(0.5, 0.05, 0.1)
)
num_targets = 9  # @param{type:"integer"}
for _target in range(num_targets):
    obj_id = sim.add_object_by_handle(cheezit_handle)
    # rotate boxes off of their sides
    rotate = mn.Quaternion.rotation(mn.Rad(-mn.math.pi_half), mn.Vector3(1.0, 0, 0))
    sim.set_rotation(rotate, obj_id)
    # sample state from the target zone
Пример #8
0
    def reconfigure(self, config: Config) -> None:
        self.config = config
        if hasattr(self.config.AGENT_0, 'OFFSET'):
            self._offset = int(self.config.AGENT_0.OFFSET)
        else:
            self._offset = 0
        if self.config.AUDIO.EVERLASTING:
            self._duration = 500
        else:
            assert hasattr(self.config.AGENT_0, 'DURATION')
            self._duration = int(self.config.AGENT_0.DURATION)
        self._audio_index = 0
        is_same_sound = config.AGENT_0.SOUND_ID == self._current_sound
        if not is_same_sound:
            self._current_sound = self.config.AGENT_0.SOUND_ID
            self._load_single_source_sound()
            logging.debug("Switch to sound {} with duration {} seconds".format(self._current_sound, self._duration))

        is_same_scene = config.SCENE == self._current_scene
        if not is_same_scene:
            self._current_scene = config.SCENE
            logging.debug('Current scene: {} and sound: {}'.format(self.current_scene_name, self._current_sound))

            if self.config.USE_RENDERED_OBSERVATIONS:
                with open(self.current_scene_observation_file, 'rb') as fo:
                    self._frame_cache = pickle.load(fo)
            else:
                self._sim.close()
                del self._sim
                self.sim_config = self.create_sim_config(self._sensor_suite)
                self._sim = habitat_sim.Simulator(self.sim_config)
                self._update_agents_state()
                self._frame_cache = dict()
            logging.debug('Loaded scene {}'.format(self.current_scene_name))

            self.points, self.graph = load_metadata(self.metadata_dir)
            for node in self.graph.nodes():
                self._position_to_index_mapping[self.position_encoding(self.graph.nodes()[node]['point'])] = node
            self._instance2label_mapping = None

        if not is_same_scene or not is_same_sound:
            self._audiogoal_cache = dict()
            self._spectrogram_cache = dict()

        self._episode_step_count = 0

        # set agent positions
        self._receiver_position_index = self._position_to_index(self.config.AGENT_0.START_POSITION)
        self._source_position_index = self._position_to_index(self.config.AGENT_0.GOAL_POSITION)
        # the agent rotates about +Y starting from -Z counterclockwise,
        # so rotation angle 90 means the agent rotate about +Y 90 degrees
        self._rotation_angle = int(np.around(np.rad2deg(quat_to_angle_axis(quat_from_coeffs(
                             self.config.AGENT_0.START_ROTATION))[0]))) % 360
        if self.config.USE_RENDERED_OBSERVATIONS:
            self._sim.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
                                      quat_from_coeffs(self.config.AGENT_0.START_ROTATION))
        else:
            self.set_agent_state(list(self.graph.nodes[self._receiver_position_index]['point']),
                                 self.config.AGENT_0.START_ROTATION)

        if self.config.AUDIO.HAS_DISTRACTOR_SOUND:
            self._distractor_position_index = self.config.AGENT_0.DISTRACTOR_POSITION_INDEX
            self._current_distractor_sound = self.config.AGENT_0.DISTRACTOR_SOUND_ID
            self._load_single_distractor_sound()

        if self._use_oracle_planner:
            self._oracle_actions = self.compute_oracle_actions()

        logging.debug("Initial source, agent at: {}, {}, orientation: {}".
                      format(self._source_position_index, self._receiver_position_index, self.get_orientation()))