def test_rotate_around_box_center(size, center, translate, angle1, angle2):
    axis = [0, 0, 1]

    q1 = Quaternion(axis=axis, angle=angle1)
    q2 = Quaternion(axis=axis, angle=angle2)

    minus_q2 = Quaternion(axis=axis, angle=-q2.angle)

    original = Box(center=center, size=size, orientation=q1)

    assert original == (original.copy().rotate_around_box_center(
        q2).rotate_around_box_center(minus_q2))

    assert original == (original.copy().rotate_around_box_center(q2).translate(
        translate).rotate_around_box_center(minus_q2).translate(-translate))
示例#2
0
def transform_box_from_world_to_flat_sensor_coordinates(first_train_sample_box: Box, sample_data_token: str,
                                                        lyftd: LyftDataset):
    sample_box = first_train_sample_box.copy()
    sd_record = lyftd.get("sample_data", sample_data_token)
    cs_record = lyftd.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
    sensor_record = lyftd.get("sensor", cs_record["sensor_token"])
    pose_record = lyftd.get("ego_pose", sd_record["ego_pose_token"])

    # Move box to ego vehicle coord system parallel to world z plane
    ypr = Quaternion(pose_record["rotation"]).yaw_pitch_roll
    yaw = ypr[0]

    sample_box.translate(-np.array(pose_record["translation"]))
    sample_box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)

    # Move box to sensor vehicle coord system parallel to world z plane
    # We need to steps, because camera coordinate is x(right), z(front), y(down)

    inv_ypr = Quaternion(cs_record["rotation"]).inverse.yaw_pitch_roll

    angz = inv_ypr[0]
    angx = inv_ypr[2]

    sample_box.translate(-np.array(cs_record['translation']))

    # rotate around z-axis
    sample_box.rotate(Quaternion(scalar=np.cos(angz / 2), vector=[0, 0, np.sin(angz / 2)]))
    # rotate around x-axis (by 90 degrees)
    angx = 90
    sample_box.rotate(Quaternion(scalar=np.cos(angx / 2), vector=[np.sin(angx / 2), 0, 0]))

    return sample_box
示例#3
0
def convert_box_to_world_coord(box: Box, sample_token, sensor_type, lyftd: LyftDataset):
    sample_box = box.copy()
    sample_record = lyftd.get('sample', sample_token)
    sample_data_token = sample_record['data'][sensor_type]

    converted_sample_box = convert_box_to_world_coord_with_sample_data_token(sample_box, sample_data_token)

    return converted_sample_box
示例#4
0
def transform_box_from_world_to_ego_coordinates(first_train_sample_box: Box, sample_data_token: str,
                                                lyftd: LyftDataset):
    sample_box = first_train_sample_box.copy()
    sd_record = lyftd.get("sample_data", sample_data_token)
    cs_record = lyftd.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
    sensor_record = lyftd.get("sensor", cs_record["sensor_token"])
    pose_record = lyftd.get("ego_pose", sd_record["ego_pose_token"])
    # Move box to ego vehicle coord system
    sample_box.translate(-np.array(pose_record["translation"]))
    sample_box.rotate(Quaternion(pose_record["rotation"]).inverse)

    return sample_box
示例#5
0
def transform_box_from_world_to_flat_vehicle_coordinates(first_train_sample_box: Box, sample_data_token: str,
                                                         lyftd: LyftDataset):
    sample_box = first_train_sample_box.copy()
    sd_record = lyftd.get("sample_data", sample_data_token)
    cs_record = lyftd.get("calibrated_sensor", sd_record["calibrated_sensor_token"])
    sensor_record = lyftd.get("sensor", cs_record["sensor_token"])
    pose_record = lyftd.get("ego_pose", sd_record["ego_pose_token"])

    # Move box to ego vehicle coord system parallel to world z plane
    ypr = Quaternion(pose_record["rotation"]).yaw_pitch_roll
    yaw = ypr[0]

    sample_box.translate(-np.array(pose_record["translation"]))
    sample_box.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)

    return sample_box
示例#6
0
    def project_kitti_box_to_image(
            box: Box, p_left: np.ndarray,
            imsize: Tuple[int, int]) -> Union[None, Tuple[int, int, int, int]]:
        """Projects 3D box into KITTI image FOV.

        Args:
            box: 3D box in KITTI reference frame.
            p_left: <np.float: 3, 4>. Projection matrix.
            imsize: (width, height). Image size.

        Returns: (xmin, ymin, xmax, ymax). Bounding box in image plane or None if box is not in the image.

        """
        # Create a new box.
        box = box.copy()

        # KITTI defines the box center as the bottom center of the object.
        # We use the true center, so we need to adjust half height in negative y direction.
        box.translate(np.array([0, -box.wlh[2] / 2, 0]))

        # Check that some corners are inside the image.
        corners = np.array(
            [corner for corner in box.corners().T if corner[2] > 0]).T
        if len(corners) == 0:
            return None

        # Project corners that are in front of the camera to 2d to get bbox in pixel coords.
        imcorners = view_points(corners, p_left, normalize=True)[:2]
        bbox = (np.min(imcorners[0]), np.min(imcorners[1]),
                np.max(imcorners[0]), np.max(imcorners[1]))

        # Crop bbox to prevent it extending outside image.
        bbox_crop = tuple(max(0, b) for b in bbox)
        bbox_crop = (
            min(imsize[0], bbox_crop[0]),
            min(imsize[0], bbox_crop[1]),
            min(imsize[0], bbox_crop[2]),
            min(imsize[1], bbox_crop[3]),
        )

        # Detect if a cropped box is empty.
        if bbox_crop[0] >= bbox_crop[2] or bbox_crop[1] >= bbox_crop[3]:
            return None

        return bbox_crop
示例#7
0
    def box_nuscenes_to_kitti(
        box: Box,
        velo_to_cam_rot: Quaternion,
        velo_to_cam_trans: np.ndarray,
        r0_rect: Quaternion,
        kitti_to_nu_lidar_inv: Quaternion = Quaternion(axis=(0, 0, 1),
                                                       angle=np.pi /
                                                       2).inverse,
    ) -> Box:
        """Transform from nuScenes lidar frame to KITTI reference frame.

        Args:
            box: Instance in nuScenes lidar frame.
            velo_to_cam_rot: Quaternion to rotate from lidar to camera frame.
            velo_to_cam_trans: <np.float: 3>. Translate from lidar to camera frame.
            r0_rect: Quaternion to rectify camera frame.
            kitti_to_nu_lidar_inv: Quaternion to rotate nuScenes to KITTI LIDAR.

        Returns: Box instance in KITTI reference frame.

        """
        # Copy box to avoid side-effects.
        box = box.copy()

        # Rotate to KITTI lidar.
        box.rotate(kitti_to_nu_lidar_inv)

        # Transform to KITTI camera.
        box.rotate(velo_to_cam_rot)
        box.translate(velo_to_cam_trans)

        # Rotate to KITTI rectified camera.
        box.rotate(r0_rect)

        # KITTI defines the box center as the bottom center of the object.
        # We use the true center, so we need to adjust half height in y direction.
        box.translate(np.array([0, box.wlh[2] / 2, 0]))

        return box
def test_rotate_around_origin_xy(size, angle1, angle2, center):
    x, y, z = center

    axis = [0, 0, 1]

    q1 = Quaternion(axis=axis, angle=angle1)
    q2 = Quaternion(axis=axis, angle=angle2)

    minus_q2 = Quaternion(axis=axis, angle=-q2.angle)

    original = Box(center=(x, y, z), size=size, orientation=q1)

    assert original == (original.copy().rotate_around_box_center(
        q2).rotate_around_box_center(minus_q2))

    cos_angle2 = q2.rotation_matrix[0, 0]
    sin_angle2 = q2.rotation_matrix[1, 0]

    new_center = x * cos_angle2 - y * sin_angle2, x * sin_angle2 + y * cos_angle2, z
    new_orientation = q1 * q2

    target = Box(center=new_center, size=size, orientation=new_orientation)

    assert original.rotate_around_origin(q2) == target