コード例 #1
0
def transform_xyz(xyz: np.ndarray, pose1: SE3, pose2: SE3) -> np.ndarray:
    # transform xyz from pose1 to pose2

    # convert to city coordinate
    city_coord = pose1.transform_point_cloud(xyz)

    return pose2.inverse_transform_point_cloud(city_coord)
コード例 #2
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_chaining_transforms() -> None:
    """Test chaining two transformations to restore 3 points back to their original position."""
    theta0 = np.pi / 4
    R0 = get_yaw_angle_rotmat(theta0)

    theta1 = np.pi
    R1 = get_yaw_angle_rotmat(theta1)

    t0 = np.zeros(3)
    t1 = np.zeros(3)

    fr2_se3_fr1 = SE3(rotation=R0, translation=t0)
    fr1_se3_fr0 = SE3(rotation=R1, translation=t1)

    fr2_se3_fr0 = fr2_se3_fr1.right_multiply_with_se3(fr1_se3_fr0)

    pts = np.array([[1.0, 0.0, 4.0], [1.0, 0.0, 3.0]])
    transformed_pts = fr2_se3_fr0.transform_point_cloud(pts.copy())
    gt_transformed_pts = np.array([
        [-np.sqrt(2) / 2, -np.sqrt(2) / 2, 4.0],
        [-np.sqrt(2) / 2, -np.sqrt(2) / 2, 3.0],
    ])

    assert np.allclose(transformed_pts, gt_transformed_pts)

    combined_R = get_yaw_angle_rotmat(theta0 + theta1)
    assert np.allclose(fr2_se3_fr0.rotation, combined_R)
    assert np.allclose(fr2_se3_fr0.translation, np.zeros(3))
コード例 #3
0
def plot_lane_centerlines_in_img(
    lidar_pts: np.ndarray,
    city_to_egovehicle_se3: SE3,
    img: np.ndarray,
    city_name: str,
    avm: ArgoverseMap,
    camera_config: CameraConfig,
    planes: Iterable[Tuple[np.array, np.array, np.array, np.array, np.array]],
    color: Tuple[int, int, int] = (0, 255, 255),
    linewidth: Number = 10,
) -> np.ndarray:
    """
    Args:
        city_to_egovehicle_se3: SE3 transformation representing egovehicle to city transformation
        img: Array of shape (M,N,3) representing updated image
        city_name: str, string representing city name, i.e. 'PIT' or 'MIA'
        avm: instance of ArgoverseMap
        camera_config: instance of CameraConfig
        planes: five frustum clipping planes
        color: RGB-tuple representing color
        linewidth: Number = 10) -> np.ndarray

    Returns:
        img: Array of shape (M,N,3) representing updated image
    """
    R = camera_config.extrinsic[:3, :3]
    t = camera_config.extrinsic[:3, 3]
    cam_SE3_egovehicle = SE3(rotation=R, translation=t)

    query_x, query_y, _ = city_to_egovehicle_se3.translation
    local_centerlines = avm.find_local_lane_centerlines(
        query_x, query_y, city_name)

    for centerline_city_fr in local_centerlines:
        color = [
            intensity + np.random.randint(0, LANE_COLOR_NOISE) -
            LANE_COLOR_NOISE // 2 for intensity in color
        ]

        ground_heights = avm.get_ground_height_at_xy(centerline_city_fr,
                                                     city_name)

        valid_idx = np.isnan(ground_heights)
        centerline_city_fr = centerline_city_fr[~valid_idx]

        centerline_egovehicle_fr = city_to_egovehicle_se3.inverse(
        ).transform_point_cloud(centerline_city_fr)
        centerline_uv_cam = cam_SE3_egovehicle.transform_point_cloud(
            centerline_egovehicle_fr)

        # can also clip point cloud to nearest LiDAR point depth
        centerline_uv_cam = clip_point_cloud_to_visible_region(
            centerline_uv_cam, lidar_pts)
        for i in range(centerline_uv_cam.shape[0] - 1):
            draw_clipped_line_segment(img, centerline_uv_cam[i],
                                      centerline_uv_cam[i + 1], camera_config,
                                      linewidth, planes, color)
    return img
コード例 #4
0
def egomotion_unit_test():
	"""
	1R2 bring points in 2's frame into 1's frame
	1R2 is the relative rotation from 1's frame to 2's frame
	"""
	city_SE3_egot1 = SE3(rotation=np.eye(3), translation=np.array([3040,0,0]))
	city_SE3_egot2 = SE3(rotation=np.eye(3), translation=np.array([3050,0,0]))

	egot1_SE3_city = city_SE3_egot1.inverse()
	egot1_SE3_egot2 = egot1_SE3_city.right_multiply_with_se3(city_SE3_egot2)

	assert np.allclose(egot1_SE3_egot2.translation, np.array([10,0,0]))
コード例 #5
0
def form_calibration_json(
    calib_data: google.protobuf.pyext._message.RepeatedCompositeContainer,
) -> Dict:
    """
    Argoverse expects to receive "egovehicle_T_camera", i.e. from camera -> egovehicle, with
            rotation parameterized as quaternion.
    Waymo provides the same SE(3) transformation, but with rotation parmaeterized as 3x3 matrix
    """
    calib_dict = {"camera_data_": []}
    for camera_calib in calib_data:
        cam_name = CAMERA_NAMES[camera_calib.name]
        # They provide "Camera frame to vehicle frame."
        # https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/dataset.proto
        egovehicle_SE3_waymocam = np.array(camera_calib.extrinsic.transform).reshape(
            4, 4
        )
        standardcam_R_waymocam = rotY(-90).dot(rotX(90))
        standardcam_SE3_waymocam = SE3(
            rotation=standardcam_R_waymocam, translation=np.zeros(3)
        )
        egovehicle_SE3_waymocam = SE3(
            rotation=egovehicle_SE3_waymocam[:3, :3],
            translation=egovehicle_SE3_waymocam[:3, 3],
        )
        standardcam_SE3_egovehicle = standardcam_SE3_waymocam.compose(
            egovehicle_SE3_waymocam.inverse()
        )
        egovehicle_SE3_standardcam = standardcam_SE3_egovehicle.inverse()
        egovehicle_q_camera = rotmat2quat(egovehicle_SE3_standardcam.rotation)
        x, y, z = egovehicle_SE3_standardcam.translation
        qw, qx, qy, qz = egovehicle_q_camera
        f_u, f_v, c_u, c_v, k1, k2, p1, p2, k3 = camera_calib.intrinsic
        cam_dict = {
            "key": "image_raw_" + cam_name,
            "value": {
                "focal_length_x_px_": f_u,
                "focal_length_y_px_": f_v,
                "focal_center_x_px_": c_u,
                "focal_center_y_px_": c_v,
                "skew_": 0,
                "distortion_coefficients_": [0, 0, 0],
                "vehicle_SE3_camera_": {
                    "rotation": {"coefficients": [qw, qx, qy, qz]},
                    "translation": [x, y, z],
                },
            },
        }
        calib_dict["camera_data_"] += [cam_dict]
    return calib_dict
コード例 #6
0
def convert_3dbox_to_8corner(bbox3d_input: np.ndarray) -> np.ndarray:
    '''
        Args:
        -   bbox3d_input: Numpy array of shape (7,) representing
                tx,ty,tz,yaw,l,w,h. (tx,ty,tz,yaw) tell us how to
                transform points to get from the object frame to 
                the egovehicle frame.

        Returns:
        -   corners_3d: (8,3) array in egovehicle frame
    '''
    # compute rotational matrix around yaw axis
    bbox3d = copy.copy(bbox3d_input)
    yaw = bbox3d[3]
    t = bbox3d[:3]

    # 3d bounding box dimensions
    l = bbox3d[4]
    w = bbox3d[5]
    h = bbox3d[6]

    # 3d bounding box corners
    x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]

    # rotate and translate 3d bounding box
    corners_3d_obj_fr = np.vstack([x_corners, y_corners, z_corners]).T
    egovehicle_SE3_object = SE3(rotation=rotMatZ_3D(yaw), translation=t)
    corners_3d_ego_fr = egovehicle_SE3_object.transform_point_cloud(
        corners_3d_obj_fr)
    return corners_3d_ego_fr
コード例 #7
0
def get_B_SE2_A(B_SE3_A: SE3):
    """
        Can take city_SE3_egovehicle -> city_SE2_egovehicle
        Can take egovehicle_SE3_object -> egovehicle_SE2_object

        Doesn't matter if we stretch square by h,w,l since
        triangles will be similar regardless

        Args:
        -   B_SE3_A

        Returns:
        -   B_SE2_A
        -   B_yaw_A
    """
    x_corners = np.array([1, 1, 1, 1, -1, -1, -1, -1])
    y_corners = np.array([1, -1, -1, 1, 1, -1, -1, 1])
    z_corners = np.array([1, 1, -1, -1, 1, 1, -1, -1])
    corners_A_frame = np.vstack((x_corners, y_corners, z_corners)).T

    corners_B_frame = B_SE3_A.transform_point_cloud(corners_A_frame)

    p1 = corners_B_frame[1]
    p5 = corners_B_frame[5]
    dy = p1[1] - p5[1]
    dx = p1[0] - p5[0]
    # the orientation angle of the car
    B_yaw_A = np.arctan2(dy, dx)

    t = B_SE3_A.transform_matrix[:2, 3]  # get x,y only
    B_SE2_A = SE2(rotation=rotmat2d(B_yaw_A), translation=t)
    return B_SE2_A, B_yaw_A
コード例 #8
0
def filter_objs_to_roi(instances: np.ndarray, avm: ArgoverseMap,
                       city_SE3_egovehicle: SE3, city_name: str) -> np.ndarray:
    """Filter objects to the region of interest (5 meter dilation of driveable area).

    We ignore instances outside of region of interest (ROI) during evaluation.

    Args:
        instances: Numpy array of shape (N,) with ObjectLabelRecord entries
        avm: Argoverse map object
        city_SE3_egovehicle: pose of egovehicle within city map at time of sweep
        city_name: name of city where log was captured

    Returns:
        instances_roi: objects with any of 4 cuboid corners located within ROI
    """
    # for each cuboid, get its 4 corners in the egovehicle frame
    corners_egoframe = np.vstack([dt.as_2d_bbox() for dt in instances])
    corners_cityframe = city_SE3_egovehicle.transform_point_cloud(
        corners_egoframe)
    corner_within_roi = avm.get_raster_layer_points_boolean(
        corners_cityframe, city_name, "roi")
    # check for each cuboid if any of its 4 corners lies within the ROI
    is_within_roi = corner_within_roi.reshape(-1, 4).any(axis=1)
    instances_roi = instances[is_within_roi]
    return instances_roi
コード例 #9
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_transform_point_cloud_by_quaternion() -> None:
    """Test rotating points by a given quaternion, and then adding translation vector to each point."""
    pts = np.array([[1.0, 1.0, 1.1], [1.0, 1.0, 2.1], [1.0, 1.0, 3.1],
                    [1.0, 1.0, 4.1]])

    # x, y, z of cuboid center
    t = np.array([-34.7128603513203, 5.29461762417753, 0.10328996181488])

    # quaternion has order (w,x,y,z)
    q = np.array([0.700322174885275, 0.0, 0.0, -0.713826905743933])
    R = quat2rotmat(q)

    dst_se3_src = SE3(rotation=R.copy(), translation=t.copy())
    transformed_pts = dst_se3_src.transform_point_cloud(pts.copy())

    gt_transformed_pts = pts.dot(R.T)
    gt_transformed_pts += t
    print(gt_transformed_pts)

    assert np.allclose(transformed_pts, gt_transformed_pts)
    gt_transformed_pts_explicit = np.array([
        [-33.73214043, 4.2757023, 1.20328996],
        [-33.73214043, 4.2757023, 2.20328996],
        [-33.73214043, 4.2757023, 3.20328996],
        [-33.73214043, 4.2757023, 4.20328996],
    ])
    assert np.allclose(transformed_pts, gt_transformed_pts_explicit)
コード例 #10
0
    def as_3d_bbox(self) -> np.ndarray:
        r"""Calculate the 8 bounding box corners.

        Args:
            None

        Returns:
            Numpy array of shape (8,3)

        Corner numbering::

             5------4
             |\\    |\\
             | \\   | \\
             6--\\--7  \\
             \\  \\  \\ \\
         l    \\  1-------0    h
          e    \\ ||   \\ ||   e
           n    \\||    \\||   i
            g    \\2------3    g
             t      width.     h
              h.               t.

        First four corners are the ones facing forward.
        The last four are the ones facing backwards.
        """
        # 3D bounding box corners. (Convention: x points forward, y to the left, z up.)
        x_corners = self.length / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])
        y_corners = self.width / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])
        z_corners = self.height / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])
        corners_object_frame = np.vstack((x_corners, y_corners, z_corners)).T

        egovehicle_SE3_object = SE3(rotation=quat2rotmat(self.quaternion), translation=self.translation)
        corners_egovehicle_frame = egovehicle_SE3_object.transform_point_cloud(corners_object_frame)
        return corners_egovehicle_frame
コード例 #11
0
def get_z_icp(x_prev, transf):
    """
    Apply transformation matrix from icp to previous pose to get measurement
    """

    pose_new = x_prev[0:3] + transf[0:3, 3]
    pose_head = np.array([np.cos(x_prev[3]),
                          np.sin(x_prev[3]), 0])[np.newaxis, :]
    transf_se3 = SE3(rotation=transf[0:3, 0:3], translation=np.zeros((3)))
    pose_head_new = transf_se3.transform_point_cloud(pose_head)

    theta_new = np.arctan2(pose_head_new[0, 1], pose_head_new[0, 0])

    # clip rotation angle given turning radius
    dx = pose_new[0] - x_prev[0]
    dy = pose_new[1] - x_prev[1]

    delta_theta = theta_new - x_prev[3]
    theta_th = np.sqrt(dx**2 + dy**2) / CAR_TURNING_RADIUS
    if delta_theta > 0:
        delta_theta = min(delta_theta, theta_th)
    else:
        delta_theta = max(delta_theta, -theta_th)

    output = x_prev + np.array([0, 0, 0, delta_theta])
    output[0:3] = pose_new[0:3]

    return output
コード例 #12
0
def visualize_ground_lidar_pts(log_id: str, dataset_dir: str,
                               experiment_prefix: str):
    """Process a log by drawing the LiDAR returns that are classified as belonging
    to the ground surface in a red to green colormap in the image.

    Args:
        log_id: The ID of a log
        dataset_dir: Where the dataset is stored
        experiment_prefix: Output prefix
    """
    sdb = SynchronizationDB(dataset_dir, collect_single_log_id=log_id)

    city_info_fpath = f"{dataset_dir}/{log_id}/city_info.json"
    city_info = read_json_file(city_info_fpath)
    city_name = city_info["city_name"]
    avm = ArgoverseMap()

    ply_fpaths = sorted(glob.glob(f"{dataset_dir}/{log_id}/lidar/PC_*.ply"))

    for i, ply_fpath in enumerate(ply_fpaths):
        if i % 500 == 0:
            print(f"\tOn file {i} of {log_id}")
        lidar_timestamp_ns = ply_fpath.split("/")[-1].split(".")[0].split(
            "_")[-1]

        pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{lidar_timestamp_ns}.json"
        if not Path(pose_fpath).exists():
            continue

        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)

        lidar_pts = load_ply(ply_fpath)

        lidar_timestamp_ns = int(lidar_timestamp_ns)
        draw_ground_pts_in_image(
            sdb,
            lidar_pts,
            city_to_egovehicle_se3,
            avm,
            log_id,
            lidar_timestamp_ns,
            city_name,
            dataset_dir,
            experiment_prefix,
        )

    for camera_name in CAMERA_LIST:
        if "stereo" in camera_name:
            fps = 5
        else:
            fps = 10
        cmd = f"ffmpeg -r {fps} -f image2 -i '{experiment_prefix}_ground_viz/{log_id}/{camera_name}/%*.jpg' {experiment_prefix}_ground_viz/{experiment_prefix}_{log_id}_{camera_name}_{fps}fps.mp4"

        print(cmd)
        run_command(cmd)
コード例 #13
0
def test_SE3_inverse_transform_point_cloud_identity() -> None:
    """Test taking a transformed point cloud and undoing the transformation.

    Since the transformation was the identity, the points should not be affected.
    """
    transformed_pts = np.array([[1.0, 1.0, 1.1], [1.0, 1.0, 2.1], [1.0, 1.0, 3.1], [1.0, 1.0, 4.1]])
    dst_se3_src = SE3(rotation=np.eye(3), translation=np.zeros(3))
    pts = dst_se3_src.inverse_transform_point_cloud(transformed_pts.copy())
    assert np.allclose(pts, transformed_pts)
コード例 #14
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_transform_point_cloud_identity() -> None:
    """Test taking a point cloud and performing an SE3 transformation.

    Since the transformation is the identity, the points should not be affected.
    """
    pts = np.array([[1.0, 1.0, 1.1], [1.0, 1.0, 2.1], [1.0, 1.0, 3.1]])
    dst_se3_src = SE3(rotation=np.eye(3), translation=np.zeros(3))
    transformed_pts = dst_se3_src.transform_point_cloud(pts.copy())

    assert np.allclose(transformed_pts, pts)
    def render_bev_labels_mpl_tmp(
        self,
        city_name: str,
        ax: plt.Axes,
        axis: str,
        lidar_pts: np.ndarray,
        local_lane_polygons: np.ndarray,
        local_das: np.ndarray,
        city_to_egovehicle_se3: SE3,
        avm: ArgoverseMap,
    ) -> None:
        """Plot nearby lane polygons and nearby driveable areas (da) on the Matplotlib axes.

        Args:
            city_name: The name of a city, e.g. `"PIT"`
            ax: Matplotlib axes
            axis: string, either 'ego_axis' or 'city_axis' to demonstrate the
            lidar_pts:  Numpy array of shape (N,3)
            local_lane_polygons: Polygons representing the local lane set
            local_das: Numpy array of objects of shape (N,) where each object is of shape (M,3)
            city_to_egovehicle_se3: Transformation from egovehicle frame to city frame
            avm: ArgoverseMap instance
        """
        if axis is not "city_axis":
            # rendering instead in the egovehicle reference frame
            for da_idx, local_da in enumerate(local_das):
                local_da = city_to_egovehicle_se3.inverse_transform_point_cloud(local_da)
                local_das[da_idx] = rotate_polygon_about_pt(local_da, city_to_egovehicle_se3.rotation, np.zeros(3))

            for lane_idx, local_lane_polygon in enumerate(local_lane_polygons):
                local_lane_polygon = city_to_egovehicle_se3.inverse_transform_point_cloud(local_lane_polygon)
                local_lane_polygons[lane_idx] = rotate_polygon_about_pt(
                    local_lane_polygon, city_to_egovehicle_se3.rotation, np.zeros(3)
                )

        draw_lane_polygons(ax, local_lane_polygons)
        draw_lane_polygons(ax, local_das, color="tab:pink")

        if axis is not "city_axis":
            lidar_pts = rotate_polygon_about_pt(lidar_pts, city_to_egovehicle_se3.rotation, np.zeros((3,)))
            draw_point_cloud_bev(ax, lidar_pts)
コード例 #16
0
def plot_argoverse_epilines_from_ground_truth_poses(ts1: int, ts2: int, img1, img2, K):
	""" """
	city_SE3_egot1 = get_city_SE3_egovehicle_at_sensor_t(ts1, dataset_dir, log_id) 
	city_SE3_egot2 = get_city_SE3_egovehicle_at_sensor_t(ts2, dataset_dir, log_id) 

	camera_T_egovehicle = calib_dict['ring_front_center'].extrinsic
	camera_T_egovehicle = SE3(rotation=camera_T_egovehicle[:3,:3], translation=camera_T_egovehicle[:3,3])
	egovehicle_T_camera = camera_T_egovehicle.inverse()

	city_SE3_cam1 = city_SE3_egot1.compose(egovehicle_T_camera)
	city_SE3_cam2 = city_SE3_egot2.compose(egovehicle_T_camera)

	cam1_SE3_city = city_SE3_cam1.inverse()
	cam1_SE3_cam2 = cam1_SE3_city.compose(city_SE3_cam2)

	# rotates i1's frame to i2's frame
	# 1R2 bring points in 2's frame into 1's frame
	# 1R2 is the relative rotation from 1's frame to 2's frame
	cam1_R_cam2 = cam1_SE3_cam2.rotation
	cam1_t_cam2 = cam1_SE3_cam2.translation

	r = Rotation.from_matrix(cam1_R_cam2)
	print('cam1_R_cam2 =', r.as_euler('zyx', degrees=True))
	print('Recover t=', cam1_t_cam2, ' up to scale ', cam1_t_cam2 / np.linalg.norm(cam1_t_cam2))

	cam2_SE3_cam1 = cam1_SE3_cam2.inverse()
	cam2_R_cam1 = cam2_SE3_cam1.rotation
	cam2_t_cam1 = cam2_SE3_cam1.translation

	# use correct ground truth relationship to generate gt_E
	# and then generate correspondences using
	cam2_E_cam1 = compute_essential_matrix(cam2_R_cam1, cam2_t_cam1)
	cam2_F_cam1 = get_fmat_from_emat(cam2_E_cam1, K1=K, K2=K)

	img_h, img_w, _ = img1.shape

	pkl_fpath = f'/Users/johnlambert/Downloads/visual-odometry-tutorial/labeled_correspondences/argoverse_1_E_0.pkl'
	corr_data = load_pkl_correspondences(pkl_fpath)

	pts_left = np.hstack([corr_data.X1.reshape(-1,1), corr_data.Y1.reshape(-1,1) ]).astype(np.int32)
	pts_right = np.hstack([corr_data.X2.reshape(-1,1), corr_data.Y2.reshape(-1,1)]).astype(np.int32)

	pdb.set_trace()
	draw_epilines(pts_left, pts_right, img1, img2, cam2_F_cam1)
	plt.show()
	draw_epipolar_lines(cam2_F_cam1, img1, img2, pts_left, pts_right)

	pts_left = cartesian_to_homogeneous(pts_left)
	pts_right = cartesian_to_homogeneous(pts_right)

	for (pt1, pt2) in zip(pts_left, pts_right):
		epi_error = pt2.dot(cam2_F_cam1).dot(pt1)
		print('Error: ', epi_error)
コード例 #17
0
def test_leave_only_roi_region():
    """
        test leave_only_roi_region function
        (lidar_pts,egovehicle_to_city_se3,ground_removal_method, city_name='MIA')
        """
    pc = ply_loader.load_ply(TEST_DATA_LOC / "1/lidar/PC_0.ply")
    pose_data = read_json_file(TEST_DATA_LOC / "1/poses/city_SE3_egovehicle_0.json")
    rotation = np.array(pose_data["rotation"])
    translation = np.array(pose_data["translation"])
    ego_R = quat2rotmat(rotation)
    ego_t = translation
    egovehicle_to_city_se3 = SE3(rotation=ego_R, translation=ego_t)
    pts = eval_utils.leave_only_roi_region(pc, egovehicle_to_city_se3, ground_removal_method="map")
コード例 #18
0
    def load_camera_calibration(self, log_id: str, camera_name: str) -> None:
        """Load extrinsics and intrinsics from disk."""
        calib_data = self._dl.get_log_calibration_data(log_id)
        cam_config = get_calibration_config(calib_data, camera_name)
        self._K = cam_config.intrinsic[:3, :3]

        # square pixels, so fx and fy should be (nearly) identical
        assert np.isclose(self._K[0, 0], self._K[1, 1], atol=0.1)

        self._camera_SE3_egovehicle = SE3(
            rotation=cam_config.extrinsic[:3, :3],
            translation=cam_config.extrinsic[:3, 3])
        self._egovehicle_SE3_camera = self._camera_SE3_egovehicle.inverse()
コード例 #19
0
def test_SE3_transform_point_cloud_by_yaw_angle() -> None:
    """Test rotating points by yaw angle of pi/4 in the xy plane, then adding a translation vector to them all."""
    pts = np.array([[1.0, 0.0, 4.0], [1.0, 0.0, 3.0]])
    theta = np.pi / 4
    R = get_yaw_angle_rotmat(theta)
    t = np.array([1.0, 2.0, 3.0])

    dst_SE3_src = SE3(rotation=R.copy(), translation=t.copy())

    transformed_pts = dst_SE3_src.transform_point_cloud(pts.copy())
    gt_transformed_pts = np.array([[np.sqrt(2) / 2, np.sqrt(2) / 2, 4.0], [np.sqrt(2) / 2, np.sqrt(2) / 2, 3.0]])
    gt_transformed_pts += t
    assert np.allclose(transformed_pts, gt_transformed_pts)
コード例 #20
0
def rotate_orientation(x, rotation):
    """
    convert rotation matrix to orientation angle
    """

    pose_head = np.array([np.cos(x[3]), np.sin(x[3]), 0])[np.newaxis, :]

    transf_se3 = SE3(rotation=rotation[0:3, 0:3], translation=np.zeros((3)))

    pose_head_new = transf_se3.transform_point_cloud(pose_head)

    theta_new = np.arctan2(pose_head_new[0, 1], pose_head_new[0, 0])

    return theta_new
コード例 #21
0
def plot_argoverse_epilines_from_annotated_correspondences(
	img1: np.ndarray,
	img2: np.ndarray,
	K: np.ndarray,
	use_normalized_coords: bool
	):
	""" """
	pkl_fpath = f'/Users/johnlambert/Downloads/visual-odometry-tutorial/labeled_correspondences/argoverse_1_E_0.pkl'
	corr_data = load_pkl_correspondences(pkl_fpath)

	corr_img = show_correspondence_lines(img1, img2, corr_data.X1, corr_data.Y1, corr_data.X2, corr_data.Y2)
	plt.imshow(corr_img)
	plt.show()

	img1_kpts = np.hstack([ corr_data.X1.reshape(-1,1), corr_data.Y1.reshape(-1,1) ]).astype(np.int32)
	img2_kpts = np.hstack([ corr_data.X2.reshape(-1,1), corr_data.Y2.reshape(-1,1) ]).astype(np.int32)

	if use_normalized_coords:
		img1_norm_kpts = cv2.undistortPoints(img1_kpts.astype(np.float32), K, None)
		img2_norm_kpts = cv2.undistortPoints(img2_kpts.astype(np.float32), K, None)
		K_I = np.eye(3)
		# since normalized coordinates are between 0 and 1, need much smaller threshold!
		cam2_E_cam1, inlier_mask = cv2.findEssentialMat(img1_norm_kpts, img2_norm_kpts, K_I, method=cv2.RANSAC, threshold=0.001)
	else:
		cam2_E_cam1, inlier_mask = cv2.findEssentialMat(img1_kpts, img2_kpts, K, method=cv2.RANSAC, threshold=0.1)
	
	print('Num inliers: ', inlier_mask.sum())
	cam2_F_cam1 = get_fmat_from_emat(cam2_E_cam1, K1=K, K2=K)
	_num_inlier, cam2_R_cam1, cam2_t_cam1, _ = cv2.recoverPose(cam2_E_cam1, img1_kpts, img2_kpts, mask=inlier_mask)

	r = Rotation.from_matrix(cam2_R_cam1)
	print('cam2_R_cam1 recovered from correspondences', r.as_euler('zyx', degrees=True))
	print('cam2_t_cam1: ', np.round(cam2_t_cam1.squeeze(), 2))

	cam2_SE3_cam1 = SE3(cam2_R_cam1, cam2_t_cam1.squeeze() )
	cam1_SE3_cam2 = cam2_SE3_cam1.inverse()
	cam1_R_cam2 = cam1_SE3_cam2.rotation
	cam1_t_cam2 = cam1_SE3_cam2.translation

	r = Rotation.from_matrix(cam1_R_cam2)
	print('cam1_R_cam2: ', r.as_euler('zyx', degrees=True)) ## prints "[-0.32  33.11 -0.45]"
	print('cam1_t_cam2: ', np.round(cam1_t_cam2,2)) ## [0.21 0.   0.98]

	pdb.set_trace()
	draw_epilines(img1_kpts, img2_kpts, img1, img2, cam2_F_cam1)
	plt.show()

	draw_epipolar_lines(cam2_F_cam1, img1, img2, img1_kpts, img2_kpts)
	plt.show()
コード例 #22
0
    def get_future_traj(self, axes: Axes, log_id: str, timestamp_ind: int,
                        city_to_egovehicle_se3: SE3):

        traj = np.zeros((FUTURE_TIME_STEP, 2))
        for ind, i in enumerate(
                range(timestamp_ind + 1,
                      timestamp_ind + FUTURE_TIME_STEP + 1)):
            timestamp = self.timestamps[i]
            pose_city_to_ego = self.log_egopose_dict[log_id][timestamp]
            relative_pose = city_to_egovehicle_se3.inverse_transform_point_cloud(
                pose_city_to_ego["translation"])
            traj[ind, :] = relative_pose[0:2]

        # axes.scatter(traj[:, 0].tolist(), traj[:, 1].tolist(), s=5, c='g')
        return traj
コード例 #23
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_transform_point_cloud_optimized() -> None:
    """Ensure that our transform_point_cloud() implementation is faster than a naive implementation.

    We use 1k trials, with a point cloud of 100k points. The chosen SE(3) transformation is arbitrary.
    """
    def transform_point_cloud_slow(point_cloud: np.ndarray,
                                   transform_matrix: np.ndarray) -> np.ndarray:
        """Slow, unoptimized method -- allocated unnecessary array for homogeneous coordinates."""
        # convert to homogeneous
        num_pts = point_cloud.shape[0]
        homogeneous_pts = np.hstack([point_cloud, np.ones((num_pts, 1))])
        transformed_point_cloud = homogeneous_pts.dot(transform_matrix.T)
        return transformed_point_cloud[:, :3]

    # number of trials
    n_trials = 1000
    num_pts = 100000
    pts_src = np.random.randn(num_pts, 3)

    # x, y, z of cuboid center
    t = np.array([-34.7128603513203, 5.29461762417753, 0.10328996181488])

    # quaternion has order (w,x,y,z)
    q = np.array([0.700322174885275, 0.0, 0.0, -0.713826905743933])
    R = quat2rotmat(q)

    dst_se3_src = SE3(rotation=R.copy(), translation=t.copy())

    def measure_time_elapsed(n_trials: int, fn: Callable, *args: Any) -> float:
        """Compute time in seconds to execute a function `n_trials` times."""
        start = time.time()
        for i in range(n_trials):
            pts_dst = fn(*args)
        end = time.time()
        duration = end - start
        return duration

    optimized_duration = measure_time_elapsed(
        n_trials, dst_se3_src.transform_point_cloud, pts_src)

    unoptimized_duration = measure_time_elapsed(
        n_trials,
        transform_point_cloud_slow,
        pts_src,
        dst_se3_src.transform_matrix,
    )

    assert optimized_duration < unoptimized_duration
コード例 #24
0
ファイル: calibration.py プロジェクト: maherzog/argoverse-api
def project_lidar_to_undistorted_img(
    lidar_points_h: np.ndarray,
    calib_data: Dict[str, Any],
    camera_name: str,
    remove_nan: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, CameraConfig]:
    camera_config = get_calibration_config(calib_data, camera_name)

    R = camera_config.extrinsic[:3, :3]
    t = camera_config.extrinsic[:3, 3]
    cam_SE3_egovehicle = SE3(rotation=R, translation=t)

    points_egovehicle = lidar_points_h.T[:, :3]
    uv_cam = cam_SE3_egovehicle.transform_point_cloud(points_egovehicle)

    return proj_cam_to_uv(uv_cam, camera_config)
コード例 #25
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_constructor() -> None:
    """Test making an arbitrary SE2 transformation for a pedestrian cuboid."""
    # x, y, z of cuboid center
    t = np.array([-34.7128603513203, 5.29461762417753, 0.10328996181488])

    # quaternion has order (w,x,y,z)
    q = np.array([0.700322174885275, 0.0, 0.0, -0.713826905743933])
    R = quat2rotmat(q)

    dst_se3_src = SE3(rotation=R.copy(), translation=t.copy())

    T_mat_gt = np.eye(4)
    T_mat_gt[:3, :3] = R
    T_mat_gt[:3, 3] = t

    assert np.allclose(dst_se3_src.rotation, R)
    assert np.allclose(dst_se3_src.translation, t)
    assert np.allclose(dst_se3_src.transform_matrix, T_mat_gt)
コード例 #26
0
    def render_ego_past_traj(
        self,
        axes: Axes,
        log_id: str,
        timestamp_ind: int,
        city_to_egovehicle_se3: SE3,
    ) -> None:

        x = [0]
        y = [0]
        for i in range(timestamp_ind - EGO_TIME_STEP, timestamp_ind):
            timestamp = self.timestamps[i]
            pose_city_to_ego = self.log_egopose_dict[log_id][timestamp]
            relative_pose = city_to_egovehicle_se3.inverse_transform_point_cloud(
                pose_city_to_ego["translation"])
            x.append(relative_pose[0])
            y.append(relative_pose[1])
        axes.scatter(x, y, s=5, c='b', alpha=1, zorder=2)
コード例 #27
0
    def get_city_to_egovehicle_se3(self, log_id: str,
                                   timestamp: int) -> Optional[SE3]:
        """
        Args:
            log_id: str, unique ID of vehicle log
            timestamp: int, timestamp of sensor observation, in nanoseconds

        Returns:
            city_to_egovehicle_se3: SE3 transformation to bring egovehicle frame point into city frame.
        """
        pose_fpath = f"{self.data_dir}/{log_id}/poses/city_SE3_egovehicle_{timestamp}.json"
        if not Path(pose_fpath).exists():
            return None
        pose_data = read_json_file(pose_fpath)
        rotation = np.array(pose_data["rotation"])
        translation = np.array(pose_data["translation"])
        city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation),
                                     translation=translation)
        return city_to_egovehicle_se3
コード例 #28
0
ファイル: test_se3.py プロジェクト: argoai/argoverse-api
def test_SE3_inverse_transform_point_cloud() -> None:
    """Test taking a transformed point cloud and undoing the transformation to recover the original points."""
    transformed_pts = np.array([
        [-33.73214043, 4.2757023, 1.20328996],
        [-33.73214043, 4.2757023, 2.20328996],
        [-33.73214043, 4.2757023, 3.20328996],
        [-33.73214043, 4.2757023, 4.20328996],
    ])
    # x, y, z of cuboid center
    t = np.array([-34.7128603513203, 5.29461762417753, 0.10328996181488])

    # quaternion has order (w,x,y,z)
    q = np.array([0.700322174885275, 0.0, 0.0, -0.713826905743933])
    R = quat2rotmat(q)

    dst_se3_src = SE3(rotation=R, translation=t)
    pts = dst_se3_src.inverse_transform_point_cloud(transformed_pts)
    gt_pts = np.array([[1.0, 1.0, 1.1], [1.0, 1.0, 2.1], [1.0, 1.0, 3.1],
                       [1.0, 1.0, 4.1]])

    assert np.allclose(pts, gt_pts)
コード例 #29
0
def get_city_SE3_egovehicle_at_sensor_t(sensor_timestamp: int, dataset_dir: str, log_id: str) -> Optional[SE3]:
    """Get transformation from ego-vehicle to city coordinates at a given timestamp.

    Args:
        sensor_timestamp: integer representing timestamp when sensor measurement captured, in nanoseconds
        dataset_dir:
        log_id: string representing unique log identifier

    Returns:
        SE3 for translating ego-vehicle coordinates to city coordinates if found, else None.
    """
    pose_fpath = f"{dataset_dir}/{log_id}/poses/city_SE3_egovehicle_{sensor_timestamp}.json"
    if not Path(pose_fpath).exists():
        logger.error(f"missing pose {sensor_timestamp}")
        return None

    pose_city_to_ego = read_json_file(pose_fpath)
    rotation = np.array(pose_city_to_ego["rotation"])
    translation = np.array(pose_city_to_ego["translation"])
    city_to_egovehicle_se3 = SE3(rotation=quat2rotmat(rotation), translation=translation)
    return city_to_egovehicle_se3
コード例 #30
0
def get_camera_extrinsic_matrix(config: Dict[str, Any]) -> np.ndarray:
    """Load camera calibration rotation and translation.

    Note that the camera calibration file contains the SE3 for sensor frame to the vehicle frame, i.e.
        pt_egovehicle = egovehicle_SE3_sensor * pt_sensor

    Then build extrinsic matrix from rotation matrix and translation, a member
    of SE3. Then we return the inverse of the SE3 transformation.

    Args:
       config: Calibration config in json, or calibration file path.

    Returns:
       Camera rotation and translation matrix.
    """
    vehicle_SE3_sensor = config["vehicle_SE3_camera_"]
    egovehicle_t_camera = np.array(vehicle_SE3_sensor["translation"])
    egovehicle_q_camera = vehicle_SE3_sensor["rotation"]["coefficients"]
    egovehicle_R_camera = quat2rotmat(egovehicle_q_camera)
    egovehicle_T_camera = SE3(rotation=egovehicle_R_camera, translation=egovehicle_t_camera)

    return egovehicle_T_camera.inverse().transform_matrix