Exemple #1
0
def project_and_save(lidar_filepath, output_base_path, calib_data, cameras, db,
                     acc_sweeps, ip_basic):

    log = lidar_filepath.parents[1].stem
    lidar_timestamp = lidar_filepath.stem[3:]

    neighbouring_timestamps = get_neighbouring_lidar_timestamps(
        db, log, lidar_timestamp, acc_sweeps)

    pts = load_ply(
        lidar_filepath)  # point cloud, numpy array Nx3 -> N 3D coords

    points_h = point_cloud_to_homogeneous(pts).T

    uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
        points_h,  # these are recorded at lidar_time
        copy.deepcopy(calib),
        camera_name,
        int(cam_timestamp),
        int(lidar_timestamp),
        str(split_dir),
        log,
    )

    for camera_name in cameras:
        uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
            points_h, calib_data, camera_name)
        uv = uv[valid_pts_bool].astype(
            np.int32)  # Nx2 projected coords in pixels
        uv_cam = uv_cam.T[valid_pts_bool]  # Nx3 projected coords in meters

        img_width, img_height = get_image_dims_for_camera(camera_name)
        img = np.zeros((img_height, img_width))
        img[uv[:, 1],
            uv[:, 0]] = uv_cam[:, 2]  # image of projected lidar measurements

        lidar_filename = lidar_filepath.stem
        output_dir = output_base_path / camera_name
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / (lidar_filename + ".npz")
        savez_compressed(str(output_path), img)
    return None
Exemple #2
0
def project_and_save(lidar_filepath, output_base_path, calib_data, cameras):

    pts = load_ply(lidar_filepath)  # point cloud, numpy array Nx3 -> N 3D coords

    points_h = point_cloud_to_homogeneous(pts).T

    for camera_name in cameras:
        uv, uv_cam, valid_pts_bool = project_lidar_to_img(points_h, calib_data, camera_name)
        uv = uv[valid_pts_bool].astype(np.int32)  # Nx2 projected coords in pixels
        uv_cam = uv_cam.T[valid_pts_bool]  # Nx3 projected coords in meters

        img_width, img_height = get_image_dims_for_camera(camera_name)
        img = np.zeros((img_height, img_width))
        img[uv[:, 1], uv[:, 0]] = uv_cam[:, 2]  # image of projected lidar measurements

        lidar_filename = lidar_filepath.stem
        output_dir = output_base_path / camera_name
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / (lidar_filename + ".npz")
        savez_compressed(str(output_path), img)
    return None
def draw_ground_pts_in_image(
    sdb: SynchronizationDB,
    lidar_points: np.ndarray,
    city_to_egovehicle_se3: SE3,
    dataset_map: ArgoverseMap,
    log_id: str,
    lidar_timestamp: int,
    city_name: str,
    dataset_dir: str,
    experiment_prefix: str,
    plot_ground: bool = True,
    motion_compensate: bool = False,
    camera: Optional[str] = None,
) -> Union[None, np.ndarray]:
    """Write an image to disk with rendered ground points for every camera.

    Args:
        sdb: instance of SynchronizationDB
        lidar_points: Numpy array of shape (N,3) in egovehicle frame
        city_to_egovehicle_se3: SE3 instance which takes a point in egovehicle frame and brings it into city frame
        dataset_map: Map dataset instance
        log_id: ID of the log
        city_name: A city's name (e.g. 'MIA' or 'PIT')
        motion_compensate: Whether to bring lidar points from world frame @ lidar timestamp, to world frame @ camera
                           timestamp
        camera: camera name, if specified will return image of that specific camera, if None, will save all camera to
                disk and return None

    """
    # put into city coords, then prune away ground and non-RoI points
    lidar_points = city_to_egovehicle_se3.transform_point_cloud(lidar_points)
    lidar_points = dataset_map.remove_non_driveable_area_points(
        lidar_points, city_name)
    _, not_ground_logicals = dataset_map.remove_ground_surface(
        copy.deepcopy(lidar_points), city_name, return_logicals=True)
    lidar_points = lidar_points[np.logical_not(not_ground_logicals)
                                if plot_ground else not_ground_logicals]

    # put back into ego-vehicle coords
    lidar_points = city_to_egovehicle_se3.inverse_transform_point_cloud(
        lidar_points)

    calib_fpath = f"{dataset_dir}/{log_id}/vehicle_calibration_info.json"
    calib_data = read_json_file(calib_fpath)

    # repeat green to red colormap every 50 m.
    colors_arr = np.array([
        [color_obj.rgb]
        for color_obj in Color("red").range_to(Color("green"), NUM_RANGE_BINS)
    ]).squeeze()
    np.fliplr(colors_arr)

    for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                          STEREO_CAMERA_LIST):
        im_dir = f"{dataset_dir}/{log_id}/{camera_name}"

        # load images, e.g. 'image_raw_ring_front_center_000000486.jpg'
        cam_timestamp = sdb.get_closest_cam_channel_timestamp(
            lidar_timestamp, camera_name, log_id)
        if cam_timestamp is None:
            continue

        im_fname = f"{camera_name}_{cam_timestamp}.jpg"
        im_fpath = f"{im_dir}/{im_fname}"

        # Swap channel order as OpenCV expects it -- BGR not RGB
        # must make a copy to make memory contiguous
        img = imageio.imread(im_fpath)[:, :, ::-1].copy()
        points_h = point_cloud_to_homogeneous(copy.deepcopy(lidar_points)).T

        if motion_compensate:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
                points_h,  # these are recorded at lidar_time
                copy.deepcopy(calib_data),
                camera_name,
                cam_timestamp,
                lidar_timestamp,
                dataset_dir,
                log_id,
                False,
            )
        else:
            uv, uv_cam, valid_pts_bool = project_lidar_to_img(
                points_h, copy.deepcopy(calib_data), camera_name, False)

        if valid_pts_bool is None or uv is None or uv_cam is None:
            continue

        if valid_pts_bool.sum() == 0:
            continue

        uv = np.round(uv[valid_pts_bool]).astype(np.int32)
        uv_cam = uv_cam.T[valid_pts_bool]
        pt_ranges = np.linalg.norm(uv_cam[:, :3], axis=1)
        rgb_bins = np.round(pt_ranges).astype(np.int32)
        # account for moving past 100 meters, loop around again
        rgb_bins = rgb_bins % NUM_RANGE_BINS
        uv_colors = (255 * colors_arr[rgb_bins]).astype(np.int32)

        img = draw_point_cloud_in_img_cv2(img, uv, np.fliplr(uv_colors))

        if not Path(f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
                    ).exists():
            os.makedirs(
                f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}")

        save_dir = f"{experiment_prefix}_ground_viz/{log_id}/{camera_name}"
        cv2.imwrite(f"{save_dir}/{camera_name}_{lidar_timestamp}.jpg", img)
        if camera == camera_name:
            return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    return None
Exemple #4
0
def dump_clipped_3d_cuboids_to_images(
    log_ids: Sequence[str],
    max_num_images_to_render: int,
    data_dir: str,
    experiment_prefix: str,
    motion_compensate: bool = True,
) -> List[str]:
    """
    We bring the 3D points into each camera coordinate system, and do the clipping there in 3D.

    Args:
        log_ids: A list of log IDs
        max_num_images_to_render: maximum numbers of images to render.
        data_dir: path to dataset with the latest data
        experiment_prefix: Output directory
        motion_compensate: Whether to motion compensate when projecting

    Returns:
        saved_img_fpaths
    """
    saved_img_fpaths = []
    dl = SimpleArgoverseTrackingDataLoader(data_dir=data_dir,
                                           labels_dir=data_dir)
    avm = ArgoverseMap()

    for log_id in log_ids:
        save_dir = f"{experiment_prefix}_{log_id}"
        if not Path(save_dir).exists():
            os.makedirs(save_dir)

        city_name = dl.get_city_name(log_id)
        log_calib_data = dl.get_log_calibration_data(log_id)

        flag_done = False
        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            cam_im_fpaths = dl.get_ordered_log_cam_fpaths(log_id, camera_name)
            for i, im_fpath in enumerate(cam_im_fpaths):
                if i % 50 == 0:
                    logging.info("\tOn file %s of camera %s of %s", i,
                                 camera_name, log_id)

                cam_timestamp = Path(im_fpath).stem.split("_")[-1]
                cam_timestamp = int(cam_timestamp)

                # load PLY file path, e.g. 'PC_315978406032859416.ply'
                ply_fpath = dl.get_closest_lidar_fpath(log_id, cam_timestamp)
                if ply_fpath is None:
                    continue
                lidar_pts = load_ply(ply_fpath)
                save_img_fpath = f"{save_dir}/{camera_name}_{cam_timestamp}.jpg"
                if Path(save_img_fpath).exists():
                    saved_img_fpaths += [save_img_fpath]
                    if max_num_images_to_render != -1 and len(
                            saved_img_fpaths) > max_num_images_to_render:
                        flag_done = True
                        break
                    continue

                city_to_egovehicle_se3 = dl.get_city_to_egovehicle_se3(
                    log_id, cam_timestamp)
                if city_to_egovehicle_se3 is None:
                    continue

                lidar_timestamp = Path(ply_fpath).stem.split("_")[-1]
                lidar_timestamp = int(lidar_timestamp)
                labels = dl.get_labels_at_lidar_timestamp(
                    log_id, lidar_timestamp)
                if labels is None:
                    logging.info("\tLabels missing at t=%s", lidar_timestamp)
                    continue

                # Swap channel order as OpenCV expects it -- BGR not RGB
                # must make a copy to make memory contiguous
                img = imageio.imread(im_fpath)[:, :, ::-1].copy()
                camera_config = get_calibration_config(log_calib_data,
                                                       camera_name)
                planes = generate_frustum_planes(
                    camera_config.intrinsic.copy(), camera_name)
                img = plot_lane_centerlines_in_img(
                    lidar_pts,
                    city_to_egovehicle_se3,
                    img,
                    city_name,
                    avm,
                    camera_config,
                    planes,
                )

                for label_idx, label in enumerate(labels):
                    obj_rec = json_label_dict_to_obj_record(label)
                    if obj_rec.occlusion == 100:
                        continue

                    cuboid_vertices = obj_rec.as_3d_bbox()
                    points_h = point_cloud_to_homogeneous(cuboid_vertices).T
                    if motion_compensate:
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            K,
                        ) = project_lidar_to_img_motion_compensated(
                            points_h,  # these are recorded at lidar_time
                            copy.deepcopy(log_calib_data),
                            camera_name,
                            cam_timestamp,
                            lidar_timestamp,
                            data_dir,
                            log_id,
                            return_K=True,
                        )
                    else:
                        # project_lidar_to_img
                        (
                            uv,
                            uv_cam,
                            valid_pts_bool,
                            camera_config,
                        ) = project_lidar_to_undistorted_img(
                            points_h, copy.deepcopy(log_calib_data),
                            camera_name)
                    if valid_pts_bool.sum() == 0:
                        continue

                    img = obj_rec.render_clip_frustum_cv2(
                        img,
                        uv_cam.T[:, :3],
                        planes.copy(),
                        copy.deepcopy(camera_config),
                    )

                cv2.imwrite(save_img_fpath, img)
                saved_img_fpaths += [save_img_fpath]
                if max_num_images_to_render != -1 and len(
                        saved_img_fpaths) > max_num_images_to_render:
                    flag_done = True
                    break
            if flag_done:
                break
        category_subdir = "amodal_labels"

        if not Path(f"{experiment_prefix}_{category_subdir}").exists():
            os.makedirs(f"{experiment_prefix}_{category_subdir}")

        for cam_idx, camera_name in enumerate(RING_CAMERA_LIST +
                                              STEREO_CAMERA_LIST):
            # Write the cuboid video -- could also write w/ fps=20,30,40
            if "stereo" in camera_name:
                fps = 5
            else:
                fps = 30
            img_wildcard = f"{save_dir}/{camera_name}_%*.jpg"
            output_fpath = f"{experiment_prefix}_{category_subdir}/{log_id}_{camera_name}_{fps}fps.mp4"
            write_nonsequential_idx_video(img_wildcard, output_fpath, fps)

    return saved_img_fpaths
Exemple #5
0
def project_and_save(argoverse_tracking_root_dir, camera_list, output_base_dir,
                     sample_paths):
    relative_lidar_path = Path(sample_paths[0])

    split = relative_lidar_path.parents[2].stem
    log = relative_lidar_path.parents[1].stem
    lidar_timestamp = int(relative_lidar_path.stem[3:])

    lidar_filepath = argoverse_tracking_root_dir / relative_lidar_path

    split_dir = argoverse_tracking_root_dir / split

    log_dir = split_dir / log
    with open(str(log_dir / "vehicle_calibration_info.json"), "r") as f:
        calib_data = json.load(f)

    pc = load_ply_ring(str(lidar_filepath))

    tf_down_lidar_rot = Rotation.from_quat(
        calib_data['vehicle_SE3_down_lidar_']['rotation']['coefficients'])
    tf_down_lidar_tr = calib_data['vehicle_SE3_down_lidar_']['translation']
    tf_down_lidar = np.eye(4)
    tf_down_lidar[0:3, 0:3] = tf_down_lidar_rot.as_matrix()
    tf_down_lidar[0:3, 3] = tf_down_lidar_tr

    tf_up_lidar_rot = Rotation.from_quat(
        calib_data['vehicle_SE3_up_lidar_']['rotation']['coefficients'])
    tf_up_lidar_tr = calib_data['vehicle_SE3_up_lidar_']['translation']
    tf_up_lidar = np.eye(4)
    tf_up_lidar[0:3, 0:3] = tf_up_lidar_rot.as_matrix()
    tf_up_lidar[0:3, 3] = tf_up_lidar_tr

    pc_up, pc_down = separate_pc(pc, tf_up_lidar, tf_down_lidar)
    beams = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    mask = np.logical_or.reduce([pc_up[:, 4] == beam for beam in beams])
    pts = pc_up[mask][:, :3]

    points_h = point_cloud_to_homogeneous(pts).T

    for cam_idx, camera_name in enumerate(camera_list):

        img_rel_path = sample_paths[1 + cam_idx][0]
        closest_cam_timestamp = int(
            Path(img_rel_path).stem[len(camera_name) + 1:])

        uv, uv_cam, valid_pts_bool = project_lidar_to_img_motion_compensated(
            points_h,  # these are recorded at lidar_time
            copy.deepcopy(calib_data),
            camera_name,
            closest_cam_timestamp,
            lidar_timestamp,
            str(split_dir),
            log,
        )

        img_width, img_height = get_image_dims_for_camera(camera_name)
        img = np.zeros((img_height, img_width))

        if valid_pts_bool is None or uv is None:
            print(
                f"uv or valid_pts_bool is None: camera {camera_name} ts {closest_cam_timestamp}, {lidar_filepath}"
            )
        else:
            uv = uv[valid_pts_bool].astype(
                np.int32)  # Nx2 projected coords in pixels
            uv_cam = uv_cam.T[valid_pts_bool]  # Nx3 projected coords in meters

            img[uv[:, 1],
                uv[:, 0]] = uv_cam[:,
                                   2]  # image of projected lidar measurements

        lidar_filename = lidar_filepath.stem
        output_dir = output_base_dir / split / log / camera_name
        output_dir.mkdir(parents=True, exist_ok=True)
        output_path = output_dir / (lidar_filename + ".npz")
        savez_compressed(str(output_path), img)

    return None