def _step_along_grad(
         self,
         grad_dir: np.quaternion) -> Union[SimulatorActions, np.array]:
     current_state = self._sim.get_agent_state()
     alpha = angle_between_quaternions(grad_dir, current_state.rotation)
     if alpha <= np.deg2rad(self._sim.config.TURN_ANGLE) + EPSILON:
         return self._get_return_value(SimulatorActions.FORWARD)
     else:
         sim_action = SimulatorActions.LEFT.value
         self._sim.step(sim_action)
         best_turn = (SimulatorActions.LEFT if (angle_between_quaternions(
             grad_dir,
             self._sim.get_agent_state().rotation) < alpha) else
                      SimulatorActions.RIGHT)
         self._reset_agent_state(current_state)
         return self._get_return_value(best_turn)
Ejemplo n.º 2
0
    def get_optimal_action(self, start, pose_estimate):
        EPSILON = 1e-6
        config_env = super().habitat_env._config
        goal_pos = super().habitat_env.current_episode.goals[0].position
        goal_radius = config_env.TASK.SUCCESS_DISTANCE

        current_state = super().habitat_env.sim.get_agent_state()
        position_estimate, rotation_estimate = self.ans_frame_to_hab(
            start, pose_estimate)

        if (super().habitat_env.sim.geodesic_distance(
                position_estimate, goal_pos) <= goal_radius):
            # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: STOP".format(goal_pos, current_state.position))
            return HabitatSimActions.STOP
        points = super().habitat_env.sim.get_straight_shortest_path_points(
            position_estimate, goal_pos)
        # Add a little offset as things get weird if
        # points[1] - points[0] is anti-parallel with forward
        if len(points) < 2:
            max_grad_dir = None
        else:
            max_grad_dir = quaternion_from_two_vectors(
                super().habitat_env.sim.forward_vector,
                points[1] - points[0] +
                EPSILON * np.cross(super().habitat_env.sim.up_vector,
                                   super().habitat_env.sim.forward_vector),
            )
            max_grad_dir.x = 0
            max_grad_dir = np.normalized(max_grad_dir)
        if max_grad_dir is None:
            # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: MOVE FORWARD".format(goal_pos, current_state.position))
            return HabitatSimActions.MOVE_FORWARD
        else:
            alpha = angle_between_quaternions(max_grad_dir, rotation_estimate)
            if alpha <= np.deg2rad(config_env.SIMULATOR.TURN_ANGLE) + EPSILON:
                # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: MOVE FORWARD".format(goal_pos, current_state.position))
                return HabitatSimActions.MOVE_FORWARD
            else:
                if (angle_between_quaternions(max_grad_dir, rotation_estimate)
                        < alpha):
                    # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: TURN LEFT".format(goal_pos, current_state.position))
                    return HabitatSimActions.TURN_LEFT
                else:
                    # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: TURN LEFT".format(goal_pos, current_state.position))
                    return HabitatSimActions.TURN_RIGHT
        # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: STOP".format(goal_pos, current_state.position))
        return HabitatSimActions.STOP
    def check_state(agent_state, position, rotation):
        assert (angle_between_quaternions(agent_state.rotation,
                                          quaternion_from_coeff(rotation)) <
                1e-5), "Agent's rotation diverges from the shortest path."

        assert np.allclose(
            agent_state.position, position
        ), "Agent's position position diverges from the shortest path's one."
    def _step_along_grad(self, grad_dir: np.quaternion, goal_pos: np.array,
                         previous_action: int) -> np.array:
        current_state = self._sim.get_agent_state()
        alpha = angle_between_quaternions(grad_dir, current_state.rotation)
        if alpha <= np.deg2rad(self._sim.config.TURN_ANGLE) + EPSILON:
            return action_to_one_hot(SimulatorActions.MOVE_FORWARD)
        else:
            if previous_action == SimulatorActions.TURN_LEFT or previous_action == SimulatorActions.TURN_RIGHT:
                # Previous action was a turn, make current suggestion match previous action.
                best_turn = previous_action
            else:
                sim_action = SimulatorActions.TURN_LEFT
                self._sim.step(sim_action)
                best_turn = (SimulatorActions.TURN_LEFT if
                             (angle_between_quaternions(
                                 grad_dir,
                                 self._sim.get_agent_state().rotation) < alpha)
                             else SimulatorActions.TURN_RIGHT)
                self._reset_agent_state(current_state)

            # Check if forward reduces geodesic distance
            curr_dist = self._geo_dist(goal_pos)
            self._sim.step(SimulatorActions.MOVE_FORWARD)
            new_dist = self._geo_dist(goal_pos)
            new_pos = self._sim.get_agent_state().position
            movement_size = np.linalg.norm(new_pos - current_state.position)
            self._reset_agent_state(current_state)
            if new_dist < curr_dist and movement_size / self._step_size > 0.95:
                # Make probability proportional to benefit of doing forward action
                forward_ind = SimulatorActions.MOVE_FORWARD
                one_hot = np.zeros(len(SimulatorActions), dtype=np.float32)
                one_hot[forward_ind] = (curr_dist - new_dist) / self._step_size
                if one_hot[forward_ind] > 0.8:
                    one_hot[forward_ind] = 1
                elif one_hot[forward_ind] < 0.2:
                    one_hot[forward_ind] = 0
                one_hot[best_turn] = 1 - one_hot[forward_ind]
                return one_hot
            else:
                return action_to_one_hot(best_turn)
Ejemplo n.º 5
0
    def _step_along_grad(
            self, grad_dir: np.quaternion, position_estimate: np.array, orientation_estimate: np.array
    ) -> Union[int, np.array]:
        # current_state = self._sim.get_agent_state()
        # alpha = angle_between_quaternions(grad_dir, current_state.rotation)
        alpha = angle_between_quaternions(grad_dir, orientation_estimate)

        if alpha <= np.deg2rad(self._sim.config.TURN_ANGLE) + EPSILON:
            return self._get_return_value(HabitatSimActions.MOVE_FORWARD)
        else:
            sim_action = HabitatSimActions.TURN_LEFT
            self._sim.step(sim_action)
            best_turn = (
                HabitatSimActions.TURN_LEFT
                if (
                        angle_between_quaternions(
                            grad_dir, orientation_estimate
                        )
                        < alpha
                )
                else HabitatSimActions.TURN_RIGHT
            )
            # self._reset_agent_state(current_state)
            return self._get_return_value(best_turn)
Ejemplo n.º 6
0
def test_noise_models_rgbd():
    DEMO_MODE = False
    N_STEPS = 100

    config = get_config()
    config.defrost()
    config.SIMULATOR.SCENE = (
        "data/scene_datasets/habitat-test-scenes/skokloster-castle.glb")
    config.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR", "DEPTH_SENSOR"]
    config.freeze()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")

    valid_start_position = [-1.3731, 0.08431, 8.60692]

    expected_pointgoal = [0.1, 0.2, 0.3]
    goal_position = np.add(valid_start_position, expected_pointgoal)

    # starting quaternion is rotated 180 degree along z-axis, which
    # corresponds to simulator using z-negative as forward action
    start_rotation = [0, 0, 0, 1]
    test_episode = NavigationEpisode(
        episode_id="0",
        scene_id=config.SIMULATOR.SCENE,
        start_position=valid_start_position,
        start_rotation=start_rotation,
        goals=[NavigationGoal(position=goal_position)],
    )

    print(f"{test_episode}")
    with habitat.Env(config=config, dataset=None) as env:

        env.episode_iterator = iter([test_episode])
        no_noise_obs = [env.reset()]
        no_noise_states = [env.sim.get_agent_state()]

        actions = [
            sample_non_stop_action(env.action_space) for _ in range(N_STEPS)
        ]
        for action in actions:
            no_noise_obs.append(env.step(action))
            no_noise_states.append(env.sim.get_agent_state())
        env.close()

        config.defrost()

        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL = "GaussianNoiseModel"
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        config.SIMULATOR.RGB_SENSOR.NOISE_MODEL_KWARGS.INTENSITY_CONSTANT = 0.5
        config.SIMULATOR.DEPTH_SENSOR.NOISE_MODEL = "RedwoodDepthNoiseModel"

        config.SIMULATOR.ACTION_SPACE_CONFIG = "pyrobotnoisy"
        config.SIMULATOR.NOISE_MODEL = habitat.Config()
        config.SIMULATOR.NOISE_MODEL.ROBOT = "LoCoBot"
        config.SIMULATOR.NOISE_MODEL.CONTROLLER = "Proportional"
        config.SIMULATOR.NOISE_MODEL.NOISE_MULTIPLIER = 0.5

        config.freeze()

        env = habitat.Env(config=config, dataset=None)

        env.episode_iterator = iter([test_episode])

        obs = env.reset()
        assert np.linalg.norm(
            obs["rgb"].astype(np.float) -
            no_noise_obs[0]["rgb"].astype(np.float)) > 1.5e-2 * np.linalg.norm(
                no_noise_obs[0]["rgb"].astype(
                    np.float)), "No RGB noise detected."

        assert np.linalg.norm(obs["depth"].astype(np.float) -
                              no_noise_obs[0]["depth"].astype(np.float)
                              ) > 1.5e-2 * np.linalg.norm(
                                  no_noise_obs[0]["depth"].astype(
                                      np.float)), "No Depth noise detected."

        images = []
        state = env.sim.get_agent_state()
        angle_diffs = []
        pos_diffs = []
        for action in actions:
            prev_state = state
            obs = env.step(action)
            state = env.sim.get_agent_state()
            position_change = np.linalg.norm(np.array(state.position) -
                                             np.array(prev_state.position),
                                             ord=2)

            if action["action"][:5] == "TURN_":
                angle_diff = abs(
                    angle_between_quaternions(state.rotation,
                                              prev_state.rotation) -
                    np.deg2rad(config.SIMULATOR.TURN_ANGLE))
                angle_diffs.append(angle_diff)
            else:
                pos_diffs.append(
                    abs(position_change - config.SIMULATOR.FORWARD_STEP_SIZE))

            if DEMO_MODE:
                images.append(observations_to_image(obs, {}))

        if DEMO_MODE:
            images_to_video(images, "data/video/test_noise", "test_noise")

        assert (np.mean(angle_diffs) >
                0.025), "No turn action actuation noise detected."
        assert (np.mean(pos_diffs) >
                0.025), "No forward action actuation noise detected."
Ejemplo n.º 7
0
def test_mp3d_eqa_sim_correspondence():
    eqa_config = get_config(CFG_TEST)

    if not mp3d_dataset.Matterport3dDatasetV1.check_config_paths_exist(
            eqa_config.DATASET):
        pytest.skip("Please download Matterport3D EQA dataset to data folder.")

    dataset = make_dataset(id_dataset=eqa_config.DATASET.TYPE,
                           config=eqa_config.DATASET)
    with habitat.Env(config=eqa_config, dataset=dataset) as env:
        env.episodes = [
            episode for episode in dataset.episodes
            if int(episode.episode_id) in TEST_EPISODE_SET[:EPISODES_LIMIT]
        ]

        ep_i = 0
        cycles_n = 2
        while cycles_n > 0:
            env.reset()
            episode = env.current_episode
            assert (len(
                episode.goals) == 1), "Episode has no goals or more than one."
            assert (len(episode.shortest_paths) == 1
                    ), "Episode has no shortest paths or more than one."
            start_state = env.sim.get_agent_state()
            assert np.allclose(
                start_state.position, episode.start_position
            ), "Agent's start position diverges from the shortest path's one."

            rgb_mean = 0
            logger.info("{id} {question}\n{answer}".format(
                id=episode.episode_id,
                question=episode.question.question_text,
                answer=episode.question.answer_text,
            ))

            for step_id, point in enumerate(episode.shortest_paths[0]):
                cur_state = env.sim.get_agent_state()

                logger.info(
                    "diff position: {} diff rotation: {} "
                    "cur_state.position: {} shortest_path.position: {} "
                    "cur_state.rotation: {} shortest_path.rotation: {} action: {}"
                    "".format(
                        cur_state.position - point.position,
                        angle_between_quaternions(
                            cur_state.rotation,
                            quaternion_from_coeff(point.rotation),
                        ),
                        cur_state.position,
                        point.position,
                        cur_state.rotation,
                        point.rotation,
                        point.action,
                    ))

                assert np.allclose(
                    [cur_state.position[0], cur_state.position[2]],
                    [point.position[0], point.position[2]],
                    atol=CLOSE_STEP_THRESHOLD * (step_id + 1),
                ), "Agent's path diverges from the shortest path."

                if point.action != OLD_STOP_ACTION_ID:
                    obs = env.step(action=point.action)

                if not env.episode_over:
                    rgb_mean += obs["rgb"][:, :, :3].mean()

            if ep_i < len(RGB_EPISODE_MEANS):
                # Slightly bigger atol for basis meshes
                rgb_mean = rgb_mean / len(episode.shortest_paths[0])
                assert np.isclose(
                    RGB_EPISODE_MEANS[int(episode.episode_id)],
                    rgb_mean,
                    atol=0.5,
                ), "RGB output doesn't match the ground truth."

            ep_i = (ep_i + 1) % EPISODES_LIMIT
            if ep_i == 0:
                cycles_n -= 1
Ejemplo n.º 8
0
    def get_optimal_gt_action(self, debug=False):
        EPSILON = 1e-6
        config_env = super().habitat_env._config
        goal_pos = super().habitat_env.current_episode.goals[0].position
        goal_radius = config_env.TASK.SUCCESS_DISTANCE
        current_state = super().habitat_env.sim.get_agent_state()

        # return HabitatSimActions.STOP
        log_bool = self.total_num_steps % self.args.log_interval == 0
        self.total_num_steps += 1
        # add date
        getdate = lambda: str(datetime.now()).split('.')[0]

        # set true for debuging
        if debug:
            log = lambda w, x, y, z: print(
                "\n{}\nGoal Position:{}\nCurrent Position:{}\nOptimal Action: {}\nGeodistic distance to goal: {}"
                .format(getdate(), w, x, y, z))
        else:
            log = lambda w, x, y, z: print("\n{}\nGeodistic Dist: {}".format(
                getdate(), z))

        geoDist = super().habitat_env.sim.geodesic_distance(
            current_state.position, goal_pos)
        if (geoDist <= goal_radius):
            if log_bool: log(goal_pos, current_state.position, 'STOP', geoDist)
            # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: STOP\nGeodistic Dist: {2}".format(goal_pos, current_state.position, geoDist))
            return HabitatSimActions.STOP
        points = super().habitat_env.sim.get_straight_shortest_path_points(
            current_state.position, goal_pos)
        # Add a little offset as things get weird if
        # points[1] - points[0] is anti-parallel with forward
        if len(points) < 2:
            max_grad_dir = None
        else:
            max_grad_dir = quaternion_from_two_vectors(
                super().habitat_env.sim.forward_vector,
                points[1] - points[0] +
                EPSILON * np.cross(super().habitat_env.sim.up_vector,
                                   super().habitat_env.sim.forward_vector),
            )
            max_grad_dir.x = 0
            max_grad_dir = np.normalized(max_grad_dir)
        if max_grad_dir is None:
            if log_bool:
                log(goal_pos, current_state.position, 'FORWARD', geoDist)
            # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: MOVE FORWARD".format(goal_pos, current_state.position))
            return HabitatSimActions.MOVE_FORWARD
        else:
            alpha = angle_between_quaternions(max_grad_dir,
                                              current_state.rotation)
            if alpha <= np.deg2rad(config_env.SIMULATOR.TURN_ANGLE) + EPSILON:
                if log_bool:
                    log(goal_pos, current_state.position, 'FORWARD', geoDist)
                # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: MOVE FORWARD\nGeodistic Dist: {2}".format(goal_pos, current_state.position, geoDist))
                return HabitatSimActions.MOVE_FORWARD
            else:
                if (angle_between_quaternions(max_grad_dir,
                                              current_state.rotation) < alpha):
                    if log_bool:
                        log(goal_pos, current_state.position, 'LEFT', geoDist)
                    # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: TURN LEFT\nGeodistic Dist: {2}".format(goal_pos, current_state.position, geoDist))
                    return HabitatSimActions.TURN_LEFT
                else:
                    if log_bool:
                        log(goal_pos, current_state.position, 'RIGHT', geoDist)
                    # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: TURN LEFT\nGeodistic Dist: {2}".format(goal_pos, current_state.position, geoDist))
                    return HabitatSimActions.TURN_RIGHT
        if log_bool: log(goal_pos, current_state.position, 'STOP', geoDist)
        # print("\nGoal Position:{0}\nCurrent Position:{1}\nOptimal Action: STOP\nGeodistic Dist: {2}".format(goal_pos, current_state.position, geoDist))
        return HabitatSimActions.STOP