Beispiel #1
0
    def project_pts_to_image(self, pointcloud: LidarPointCloud,
                             token: str) -> np.ndarray:
        """
        Project lidar points into image.
        :param pointcloud: The LidarPointCloud in nuScenes lidar frame.
        :param token: Unique KITTI token.
        :return: <np.float: N, 3.> X, Y are points in image pixel coordinates. Z is depth in image.
        """

        # Copy and convert pointcloud.
        pc_image = LidarPointCloud(points=pointcloud.points.copy())
        pc_image.rotate(self.kitti_to_nu_lidar_inv)  # Rotate to KITTI lidar.

        # Transform pointcloud to camera frame.
        transforms = self.get_transforms(token, root=self.root)
        pc_image.rotate(transforms['velo_to_cam']['R'])
        pc_image.translate(transforms['velo_to_cam']['T'])

        # Project to image.
        depth = pc_image.points[2, :]
        points_fov = view_points(pc_image.points[:3, :],
                                 transforms['p_combined'],
                                 normalize=True)
        points_fov[2, :] = depth

        return points_fov
Beispiel #2
0
def from_file(file_name: str):
    scan = np.fromfile(file_name, dtype=np.float32)
    points = scan.reshape((-1, 5))
    xyzr = points[:, :LidarPointCloud.nbr_dims()]
    ring = points[:, 4]
    xyzr[:, 3] = ring
    return LidarPointCloud(xyzr.T)
Beispiel #3
0
    def get_pointcloud(token: str, root: str = '/data/sets/kitti') -> LidarPointCloud:
        """
        Load up the pointcloud for a sample.
        :param token: KittiDB unique id.
        :param root: Base folder for all KITTI data.
        :return: LidarPointCloud for the sample in the KITTI Lidar frame.
        """
        pc_filename = KittiDB.get_filepath(token, 'velodyne', root=root)

        # The lidar PC is stored in the KITTI LIDAR coord system.
        pc = LidarPointCloud(np.fromfile(pc_filename, dtype=np.float32).reshape(-1, 4).T)

        return pc
Beispiel #4
0
def get_gt(nusc, corners, camera_token, pointsensor_token):
    cam = nusc.get('sample_data', camera_token)
    pointsensor = nusc.get('sample_data', pointsensor_token)
    pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
    # im = Image.open(os.path.join(nusc.dataroot, cam['filename']))
    pc = LidarPointCloud(corners)

    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    cs_record = nusc.get('calibrated_sensor', pointsensor['calibrated_sensor_token'])
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
    pc.translate(np.array(cs_record['translation']))

    # Second step: transform to the global frame.
    poserecord = nusc.get('ego_pose', pointsensor['ego_pose_token'])
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
    pc.translate(np.array(poserecord['translation']))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    poserecord = nusc.get('ego_pose', cam['ego_pose_token'])
    pc.translate(-np.array(poserecord['translation']))
    pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix.T)

    # Fourth step: transform into the camera.
    cs_record = nusc.get('calibrated_sensor', cam['calibrated_sensor_token'])
    pc.translate(-np.array(cs_record['translation']))
    pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix.T)

    # Fifth step: actually take a "picture" of the point cloud.
    # Grab the depths (camera frame z axis points away from the camera).
    depths = pc.points[2, :]

    # Retrieve the color from the depth.
    coloring = depths

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    points = view_points(pc.points[:3, :], np.array(cs_record['camera_intrinsic']), normalize=True)

    # Remove points that are either outside or behind the camera. Leave a margin of 1 pixel for aesthetic reasons.
    # Also make sure points are at least 1m in front of the camera to avoid seeing the lidar points on the camera
    # casing for non-keyframes which are slightly out of sync.
    # mask = np.ones(depths.shape[0], dtype=bool)
    # mask = np.logical_and(mask, depths > min_dist)
    # mask = np.logical_and(mask, points[0, :] > 1)
    # mask = np.logical_and(mask, points[0, :] < im.size[0] - 1)
    # mask = np.logical_and(mask, points[1, :] > 1)
    # mask = np.logical_and(mask, points[1, :] < im.size[1] - 1)
    # points = points[:, mask]
    # coloring = coloring[mask]

    return points, coloring, im
Beispiel #5
0
def get_pc_from_file_multisweep(nusc,
                                sample_rec,
                                chan: str,
                                ref_chan: str,
                                nsweeps: int = 3,
                                min_distance: float = 1.0,
                                invalid_states=None,
                                dynprop_states=None,
                                ambig_states=None):
    """
    Returns a point cloud of multiple aggregated sweeps.
    Original function can be found at: https://github.com/nutonomy/nuscenes-devkit/blob/ae022aba3b37f07202ea402f8278979097738369/python-sdk/nuscenes/utils/data_classes.py#L56.
    Function is modified to accept additional input parametes, just like the get_pc_from_file function. This enables the filtering by invalid_states, dynprop_states and ambig_states.

    Arguments:
        nusc: A NuScenes instance, <NuScenes>.
        sample_rec: The current sample, <dict>.
        chan: The radar/lidar channel from which we track back n sweeps to aggregate the point cloud, <str>.
        ref_chan: The reference channel of the current sample_rec that the point clouds are mapped to, <str>.
        nsweeps: Number of sweeps to aggregated, <int>.
        min_distance: Distance below which points are discarded, <float>.
        invalid_states: Radar states to be kept, <int List>.
        dynprop_states: Radar states to be kept, <int List>. Use [0, 2, 6] for moving objects only.
        ambig_states: Radar states to be kept, <int List>.
    """
    # Define
    if chan == 'LIDAR_TOP':
        points = np.zeros((get_number_of_lidar_pc_dimensions(), 0))
        all_pc = LidarPointCloud(points)
    else:
        points = np.zeros((get_number_of_radar_pc_dimensions(), 0))
        all_pc = RadarPointCloud(points)

    all_times = np.zeros((1, 0))

    # Get reference pose and timestamp.
    ref_sd_token = sample_rec['data'][ref_chan]
    ref_sd_rec = nusc.get('sample_data', ref_sd_token)
    ref_pose_rec = nusc.get('ego_pose', ref_sd_rec['ego_pose_token'])
    ref_cs_rec = nusc.get('calibrated_sensor',
                          ref_sd_rec['calibrated_sensor_token'])
    ref_time = 1e-6 * ref_sd_rec['timestamp']

    # Homogeneous transform from ego car frame to reference frame.
    ref_from_car = transform_matrix(ref_cs_rec['translation'],
                                    Quaternion(ref_cs_rec['rotation']),
                                    inverse=True)

    # Homogeneous transformation matrix from global to _current_ ego car frame.
    car_from_global = transform_matrix(ref_pose_rec['translation'],
                                       Quaternion(ref_pose_rec['rotation']),
                                       inverse=True)

    # Aggregate current and previous sweeps.
    sample_data_token = sample_rec['data'][chan]
    current_sd_rec = nusc.get('sample_data', sample_data_token)
    for _ in range(nsweeps):
        # Load up the pointcloud.
        if chan == 'LIDAR_TOP':
            current_pc = LidarPointCloud.from_file(file_name=os.path.join(
                nusc.dataroot, current_sd_rec['filename']))
        else:
            current_pc = RadarPointCloud.from_file(
                file_name=os.path.join(nusc.dataroot,
                                       current_sd_rec['filename']),
                invalid_states=invalid_states,
                dynprop_states=dynprop_states,
                ambig_states=ambig_states)

        # Get past pose.
        current_pose_rec = nusc.get('ego_pose',
                                    current_sd_rec['ego_pose_token'])
        global_from_car = transform_matrix(current_pose_rec['translation'],
                                           Quaternion(
                                               current_pose_rec['rotation']),
                                           inverse=False)

        # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
        current_cs_rec = nusc.get('calibrated_sensor',
                                  current_sd_rec['calibrated_sensor_token'])
        car_from_current = transform_matrix(current_cs_rec['translation'],
                                            Quaternion(
                                                current_cs_rec['rotation']),
                                            inverse=False)

        # Fuse four transformation matrices into one and perform transform.
        trans_matrix = reduce(
            np.dot,
            [ref_from_car, car_from_global, global_from_car, car_from_current])
        current_pc.transform(trans_matrix)

        # Remove close points and add timevector.
        current_pc.remove_close(min_distance)
        time_lag = ref_time - 1e-6 * current_sd_rec[
            'timestamp']  # Positive difference.
        times = time_lag * np.ones((1, current_pc.nbr_points()))
        all_times = np.hstack((all_times, times))

        # Merge with key pc.
        all_pc.points = np.hstack((all_pc.points, current_pc.points))

        # Abort if there are no previous sweeps.
        if current_sd_rec['prev'] == '':
            break
        else:
            current_sd_rec = nusc.get('sample_data', current_sd_rec['prev'])

    return all_pc, all_times
Beispiel #6
0
    def __init__(self,
                 scene_id: int,
                 min_distance: float = 1.0,
                 skip_sweep: int = 1):
        super().__init__()

        self.nusc = NuScenes(version="v1.0-mini",
                             dataroot="./data/sets/nuscenes",
                             verbose=True)

        self.scene = self.nusc.scene[scene_id]
        first_sample_token = self.scene["first_sample_token"]
        self.first_sample = self.nusc.get("sample", first_sample_token)

        points = np.zeros((NUM_LIDAR_CHANNELS, 0), dtype=np.float32)
        num_samples = self.scene["nbr_samples"]
        all_pc = [LidarPointCloud(points) for _ in range(num_samples)]

        ref_sd_token = self.first_sample["data"]["LIDAR_TOP"]
        ref_sd_rec = self.nusc.get("sample_data", ref_sd_token)
        ref_pose_rec = self.nusc.get("ego_pose", ref_sd_rec["ego_pose_token"])
        ref_cs_rec = self.nusc.get("calibrated_sensor",
                                   ref_sd_rec["calibrated_sensor_token"])
        ref_time = 1e-6 * ref_sd_rec["timestamp"]

        # Homogeneous transform from ego car frame to reference frame.
        ref_from_car = transform_matrix(ref_cs_rec["translation"],
                                        Quaternion(ref_cs_rec["rotation"]),
                                        inverse=True)

        # Homogeneous transformation matrix from global to _current_ ego car frame.
        car_from_global = transform_matrix(
            ref_pose_rec["translation"],
            Quaternion(ref_pose_rec["rotation"]),
            inverse=True,
        )

        current_sample = self.first_sample

        self.pc = []
        self.ego = []

        for sample_id in range(1):
            sample_data_token = current_sample["data"]["LIDAR_TOP"]
            current_sd_rec = self.nusc.get("sample_data", sample_data_token)

            print("loading sample...", sample_id)

            while current_sd_rec["next"] != "":
                ego = LidarPointCloud(
                    np.array([[0.0], [0.0], [0.0], [1.0]], dtype=np.float32))
                current_pc = from_file(
                    osp.join(self.nusc.dataroot, current_sd_rec["filename"]))
                current_pc.remove_close(min_distance)
                ring = current_pc.points[3].copy()

                # Get past pose.
                current_pose_rec = self.nusc.get(
                    "ego_pose", current_sd_rec["ego_pose_token"])
                global_from_car = transform_matrix(
                    current_pose_rec["translation"],
                    Quaternion(current_pose_rec["rotation"]),
                    inverse=False,
                )

                # Homogeneous transformation matrix from sensor coordinate frame to ego car frame.
                current_cs_rec = self.nusc.get(
                    "calibrated_sensor",
                    current_sd_rec["calibrated_sensor_token"])
                car_from_current = transform_matrix(
                    current_cs_rec["translation"],
                    Quaternion(current_cs_rec["rotation"]),
                    inverse=False,
                )

                # Fuse four transformation matrices into one and perform transform.
                trans_matrix = reduce(
                    np.dot,
                    [
                        ref_from_car, car_from_global, global_from_car,
                        car_from_current
                    ],
                )
                current_pc.transform(trans_matrix)
                ego.transform(trans_matrix)

                time_lag = (1e-6 * current_sd_rec["timestamp"] - ref_time
                            )  # Positive difference.
                times = time_lag * np.ones((1, current_pc.nbr_points()))

                # replace the intensity with time
                current_pc.points[3] = times
                ring_pts = np.concatenate(
                    (current_pc.points, ring.reshape(1, -1)), axis=0)
                ring_pts = torch.from_numpy(ring_pts.transpose())
                rings_idx = [
                    (ring_pts[:, -1] == i).nonzero().squeeze(1).type(
                        torch.LongTensor)
                    for i in range(int(ring_pts[:, -1].max().item()) + 1)
                ]
                all_idx = torch.cat(rings_idx)
                pc_sorted = ring_pts[all_idx]

                self.pc.append(pc_sorted.numpy())
                self.ego.append(ego.points.transpose())
                # print(sample_id, ego.points.transpose())
                print(pc_sorted[:100])

                for _ in range(skip_sweep):
                    if current_sd_rec["next"] == "":
                        break
                    current_sd_rec = self.nusc.get("sample_data",
                                                   current_sd_rec["next"])

            if current_sample["next"] == "":
                break
            else:
                current_sample = self.nusc.get("sample",
                                               current_sample["next"])
Beispiel #7
0
def warpPointcloud(pointcloud):
    new_shape = np.zeros((4, pointcloud.shape[0]))
    for i in range(pointcloud.shape[0]):
        new_shape[:3, i] = pointcloud[i, :]
    return LidarPointCloud(new_shape)