示例#1
0
    def _compute_pointgoal(self, source_position, source_rotation,
                           goal_position):
        direction_vector = goal_position - source_position
        direction_vector_agent = quaternion_rotate_vector(
            source_rotation.inverse(), direction_vector)

        if self._goal_format == "POLAR":
            if self._dimensionality == 2:
                rho, phi = cartesian_to_polar(-direction_vector_agent[2],
                                              direction_vector_agent[0])
                return np.array([rho, -phi], dtype=np.float32)
            else:
                _, phi = cartesian_to_polar(-direction_vector_agent[2],
                                            direction_vector_agent[0])
                theta = np.arccos(direction_vector_agent[1] /
                                  np.linalg.norm(direction_vector_agent))
                rho = np.linalg.norm(direction_vector_agent)

                return np.array([rho, -phi, theta], dtype=np.float32)
        else:
            if self._dimensionality == 2:
                return np.array(
                    [-direction_vector_agent[2], direction_vector_agent[0]],
                    dtype=np.float32,
                )
            else:
                return direction_vector_agent
示例#2
0
    def _quat_to_xy_heading(self, quat):
        direction_vector = np.array([0, 0, -1])

        heading_vector = quaternion_rotate_vector(quat, direction_vector)

        phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
        return np.array([phi], dtype=np.float32)
示例#3
0
def compute_distance_to_pred(pred, sim):
    from habitat.utils.geometry_utils import quaternion_rotate_vector
    import networkx as nx

    current_position = sim.get_agent_state().position
    agent_state = sim.get_agent_state()
    source_position = agent_state.position
    source_rotation = agent_state.rotation

    rounded_pred = np.round(pred)
    direction_vector_agent = np.array([rounded_pred[1], 0, -rounded_pred[0]])
    direction_vector = quaternion_rotate_vector(source_rotation,
                                                direction_vector_agent)
    pred_goal_location = source_position + direction_vector.astype(np.float32)
    pred_goal_location[1] = source_position[1]

    try:
        if sim.position_encoding(
                pred_goal_location) not in sim._position_to_index_mapping:
            pred_goal_location = sim.find_nearest_graph_node(
                pred_goal_location)
        distance_to_target = sim.geodesic_distance(current_position,
                                                   [pred_goal_location])
    except nx.exception.NetworkXNoPath:
        distance_to_target = -1
    return distance_to_target
示例#4
0
    def get_observation(self, observations, episode, *args: Any,
                        **kwargs: Any):
        episode_uniq_id = f"{episode.scene_id} {episode.episode_id}"
        if episode_uniq_id != self._current_episode_id:
            self._episode_time = 0.0
            self._current_episode_id = episode_uniq_id

        agent_state = self._sim.get_agent_state()

        origin = np.array(episode.start_position, dtype=np.float32)
        rotation_world_start = quaternion_from_coeff(episode.start_rotation)

        agent_position_xyz = agent_state.position
        rotation_world_agent = agent_state.rotation

        agent_position_xyz = quaternion_rotate_vector(
            rotation_world_start.inverse(), agent_position_xyz - origin)

        agent_heading = self._quat_to_xy_heading(
            rotation_world_agent.inverse() * rotation_world_start)

        ep_time = self._episode_time
        self._episode_time += 1.0

        return np.array([
            -agent_position_xyz[2], agent_position_xyz[0], agent_heading,
            ep_time
        ],
                        dtype=np.float32)
示例#5
0
def test_pointgoal_with_gps_compass_sensor():
    config = get_config()
    if not os.path.exists(config.SIMULATOR.SCENE):
        pytest.skip("Please download Habitat test data to data folder.")
    config.defrost()
    config.TASK.SENSORS = [
        "POINTGOAL_WITH_GPS_COMPASS_SENSOR",
        "COMPASS_SENSOR",
        "GPS_SENSOR",
        "POINTGOAL_SENSOR",
    ]
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.POINTGOAL_SENSOR.DIMENSIONALITY = 3
    config.TASK.POINTGOAL_SENSOR.GOAL_FORMAT = "CARTESIAN"

    config.TASK.GPS_SENSOR.DIMENSIONALITY = 3

    config.freeze()
    with habitat.Env(config=config, dataset=None) as env:
        # start position is checked for validity for the specific test scene
        valid_start_position = [-1.3731, 0.08431, 8.60692]
        expected_pointgoal = [0.1, 0.2, 0.3]
        goal_position = np.add(valid_start_position, expected_pointgoal)

        # starting quaternion is rotated 180 degree along z-axis, which
        # corresponds to simulator using z-negative as forward action
        start_rotation = [0, 0, 0, 1]

        env.episode_iterator = iter([
            NavigationEpisode(
                episode_id="0",
                scene_id=config.SIMULATOR.SCENE,
                start_position=valid_start_position,
                start_rotation=start_rotation,
                goals=[NavigationGoal(position=goal_position)],
            )
        ])

        env.reset()
        for _ in range(100):
            obs = env.step(sample_non_stop_action(env.action_space))
            pointgoal = obs["pointgoal"]
            pointgoal_with_gps_compass = obs["pointgoal_with_gps_compass"]
            compass = float(obs["compass"][0])
            gps = obs["gps"]
            # check to see if taking non-stop actions will affect static point_goal
            assert np.allclose(
                pointgoal_with_gps_compass,
                quaternion_rotate_vector(
                    quaternion.from_rotation_vector(
                        compass * np.array([0, 1, 0])).inverse(),
                    pointgoal - gps,
                ),
                atol=1e-5,
            )
示例#6
0
 def get_info(self, observations):
     agent_state = self._env.sim.get_agent_state()
     heading_vector = quaternion_rotate_vector(
         agent_state.rotation.inverse(), np.array([0, 0, -1]))
     heading = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
     return {
         "position": agent_state.position.tolist(),
         "heading": heading,
         "stop": self._env.task.is_stop_called,
     }
示例#7
0
    def get_polar_angle(self):
        agent_state = self._sim.get_agent_state()
        # quaternion is in x, y, z, w format
        ref_rotation = agent_state.rotation

        heading_vector = quaternion_rotate_vector(ref_rotation.inverse(),
                                                  np.array([0, 0, -1]))

        phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
        z_neg_z_flip = np.pi
        return np.array(phi) + z_neg_z_flip
示例#8
0
    def get_vel_world_frame(self, vel_local, rotation, translation) -> np.ndarray:
        r"""Get velocity in world frame. 
        Args:
        vel_local: linear/angular velocity in local frame, a numpy array
        rotation: rotation w.r.t. the world frame, a quaternion vector
        translation: translation w.r.t the world frame, a numpy array
        """
        
        # rotate and translate
        vel_world = quaternion_rotate_vector(rotation, vel_local) + np.array(translation)

        return vel_world
示例#9
0
def compute_heading_from_quaternion(r):
    """
    r - rotation quaternion

    Computes clockwise rotation about Y.
    """
    # quaternion - np.quaternion unit quaternion
    # Real world rotation
    direction_vector = np.array([0, 0, -1])  # Forward vector
    heading_vector = quaternion_rotate_vector(r.inverse(), direction_vector)

    phi = -np.arctan2(heading_vector[0], -heading_vector[2]).item()
    return phi
示例#10
0
    def get_polar_angle(self):
        # CHANGED BY ANDREW SZOT FOR FETCH ROBOT
        agent_state = self._sim.get_agent_state()
        # quaternion is in x, y, z, w format
        ref_rotation = agent_state.rotation

        heading_vector = quaternion_rotate_vector(ref_rotation.inverse(),
                                                  np.array([0, 0, 1]))

        phi = cartesian_to_polar(heading_vector[2], heading_vector[0])[1]
        #z_neg_z_flip = np.pi
        z_neg_z_flip = np.pi / 2.0
        return np.array(phi) + z_neg_z_flip
示例#11
0
def plot_top_down_map(info, dataset='replica', pred=None):
    top_down_map = info["top_down_map"]["map"]
    top_down_map = maps.colorize_topdown_map(
        top_down_map, info["top_down_map"]["fog_of_war_mask"])
    map_agent_pos = info["top_down_map"]["agent_map_coord"]
    if dataset == 'replica':
        agent_radius_px = top_down_map.shape[0] // 16
    else:
        agent_radius_px = top_down_map.shape[0] // 50
    top_down_map = maps.draw_agent(
        image=top_down_map,
        agent_center_coord=map_agent_pos,
        agent_rotation=info["top_down_map"]["agent_angle"],
        agent_radius_px=agent_radius_px)
    if pred is not None:
        from habitat.utils.geometry_utils import quaternion_rotate_vector

        source_rotation = info["top_down_map"]["agent_rotation"]

        rounded_pred = np.round(pred[1])
        direction_vector_agent = np.array(
            [rounded_pred[1], 0, -rounded_pred[0]])
        direction_vector = quaternion_rotate_vector(source_rotation,
                                                    direction_vector_agent)

        grid_size = (
            (maps.COORDINATE_MAX - maps.COORDINATE_MIN) / 10000,
            (maps.COORDINATE_MAX - maps.COORDINATE_MIN) / 10000,
        )
        delta_x = int(-direction_vector[0] / grid_size[0])
        delta_y = int(direction_vector[2] / grid_size[1])

        x = np.clip(map_agent_pos[0] + delta_x,
                    a_min=0,
                    a_max=top_down_map.shape[0])
        y = np.clip(map_agent_pos[1] + delta_y,
                    a_min=0,
                    a_max=top_down_map.shape[1])
        point_padding = 20
        for m in range(x - point_padding, x + point_padding + 1):
            for n in range(y - point_padding, y + point_padding + 1):
                if np.linalg.norm(np.array([m - x, n - y])) <= point_padding and \
                        0 <= m < top_down_map.shape[0] and 0 <= n < top_down_map.shape[1]:
                    top_down_map[m, n] = (0, 255, 255)
        if np.linalg.norm(rounded_pred) < 1:
            assert delta_x == 0 and delta_y == 0

    if top_down_map.shape[0] > top_down_map.shape[1]:
        top_down_map = np.rot90(top_down_map, 1)
    return top_down_map
示例#12
0
    def get_observation(self, observations, episode, *args: Any,
                        **kwargs: Any):
        agent_state = self._sim.get_agent_state()

        origin = np.array(episode.start_position, dtype=np.float32)
        rotation_world_start = quaternion_from_coeff(episode.start_rotation)

        agent_position = agent_state.position

        agent_position = quaternion_rotate_vector(
            rotation_world_start.inverse(), agent_position - origin)
        if self._dimensionality == 2:
            return np.array([-agent_position[2], agent_position[0]],
                            dtype=np.float32)
        else:
            return agent_position.astype(np.float32)
示例#13
0
def observations_to_image(observation: Dict,
                          info: Dict,
                          pred=None) -> np.ndarray:
    r"""Generate image of single frame from observation and info
    returned from a single environment step().

    Args:
        observation: observation returned from an environment step().
        info: info returned from an environment step().

    Returns:
        generated image of a single frame.
    """
    egocentric_view = []
    if "rgb" in observation:
        observation_size = observation["rgb"].shape[0]
        rgb = observation["rgb"]
        if not isinstance(rgb, np.ndarray):
            rgb = rgb.cpu().numpy()

        egocentric_view.append(rgb)

    # draw depth map if observation has depth info
    if "depth" in observation:
        observation_size = observation["depth"].shape[0]
        depth_map = observation["depth"].squeeze() * 255.0
        if not isinstance(depth_map, np.ndarray):
            depth_map = depth_map.cpu().numpy()

        depth_map = depth_map.astype(np.uint8)
        depth_map = np.stack([depth_map for _ in range(3)], axis=2)
        egocentric_view.append(depth_map)

    assert (len(egocentric_view) >
            0), "Expected at least one visual sensor enabled."
    egocentric_view = np.concatenate(egocentric_view, axis=1)

    # draw collision
    if "collisions" in info and info["collisions"]["is_collision"]:
        egocentric_view = draw_collision(egocentric_view)

    frame = egocentric_view

    if "top_down_map" in info:
        top_down_map = info["top_down_map"]["map"]
        top_down_map = maps.colorize_topdown_map(
            top_down_map, info["top_down_map"]["fog_of_war_mask"])
        map_agent_pos = info["top_down_map"]["agent_map_coord"]
        top_down_map = maps.draw_agent(
            image=top_down_map,
            agent_center_coord=map_agent_pos,
            agent_rotation=info["top_down_map"]["agent_angle"],
            agent_radius_px=top_down_map.shape[0] // 16,
        )
        if pred is not None:
            from habitat.utils.geometry_utils import quaternion_rotate_vector

            # current_position = sim.get_agent_state().position
            # agent_state = sim.get_agent_state()
            source_rotation = info["top_down_map"]["agent_rotation"]

            rounded_pred = np.round(pred[1])
            direction_vector_agent = np.array(
                [rounded_pred[1], 0, -rounded_pred[0]])
            direction_vector = quaternion_rotate_vector(
                source_rotation, direction_vector_agent)
            # pred_goal_location = source_position + direction_vector.astype(np.float32)

            grid_size = (
                (maps.COORDINATE_MAX - maps.COORDINATE_MIN) / 10000,
                (maps.COORDINATE_MAX - maps.COORDINATE_MIN) / 10000,
            )
            delta_x = int(-direction_vector[0] / grid_size[0])
            delta_y = int(direction_vector[2] / grid_size[1])

            x = np.clip(map_agent_pos[0] + delta_x,
                        a_min=0,
                        a_max=top_down_map.shape[0])
            y = np.clip(map_agent_pos[1] + delta_y,
                        a_min=0,
                        a_max=top_down_map.shape[1])
            point_padding = 12
            for m in range(x - point_padding, x + point_padding + 1):
                for n in range(y - point_padding, y + point_padding + 1):
                    if np.linalg.norm(np.array([m - x, n - y])) <= point_padding and \
                            0 <= m < top_down_map.shape[0] and 0 <= n < top_down_map.shape[1]:
                        top_down_map[m, n] = (0, 255, 255)
            if np.linalg.norm(rounded_pred) < 1:
                assert delta_x == 0 and delta_y == 0

        if top_down_map.shape[0] > top_down_map.shape[1]:
            top_down_map = np.rot90(top_down_map, 1)

        # scale top down map to align with rgb view
        if pred is None:
            old_h, old_w, _ = top_down_map.shape
            top_down_height = observation_size
            top_down_width = int(float(top_down_height) / old_h * old_w)
            # cv2 resize (dsize is width first)
            top_down_map = cv2.resize(
                top_down_map.astype(np.float32),
                (top_down_width, top_down_height),
                interpolation=cv2.INTER_CUBIC,
            )
        else:
            # draw label
            CATEGORY_INDEX_MAPPING = {
                'chair': 0,
                'table': 1,
                'picture': 2,
                'cabinet': 3,
                'cushion': 4,
                'sofa': 5,
                'bed': 6,
                'chest_of_drawers': 7,
                'plant': 8,
                'sink': 9,
                'toilet': 10,
                'stool': 11,
                'towel': 12,
                'tv_monitor': 13,
                'shower': 14,
                'bathtub': 15,
                'counter': 16,
                'fireplace': 17,
                'gym_equipment': 18,
                'seating': 19,
                'clothes': 20
            }
            index2label = {v: k for k, v in CATEGORY_INDEX_MAPPING.items()}
            pred_label = index2label[pred[0]]
            text_height = int(observation_size * 0.1)

            old_h, old_w, _ = top_down_map.shape
            top_down_height = observation_size - text_height
            top_down_width = int(float(top_down_height) / old_h * old_w)
            # cv2 resize (dsize is width first)
            top_down_map = cv2.resize(
                top_down_map.astype(np.float32),
                (top_down_width, top_down_height),
                interpolation=cv2.INTER_CUBIC,
            )

            top_down_map = np.concatenate([
                np.ones([text_height, top_down_map.shape[1], 3],
                        dtype=np.int32) * 255, top_down_map
            ],
                                          axis=0)
            top_down_map = cv2.putText(top_down_map,
                                       'C_t: ' + pred_label.replace('_', ' '),
                                       (10, text_height - 10),
                                       cv2.FONT_HERSHEY_SIMPLEX, 1.4,
                                       (0, 0, 0), 2, cv2.LINE_AA)

        frame = np.concatenate((egocentric_view, top_down_map), axis=1)
    return frame