def get_lidar_in_image_fov(pc_velo,
                           calib,
                           sensor,
                           xmin,
                           ymin,
                           xmax,
                           ymax,
                           return_more=False,
                           clip_distance=2.0):
    ''' Filter lidar points, keep those in image FOV '''
    '''    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,
        view, 0, 0, img_width, img_height, True)
    '''

    pts_global = calib.project_lidar_to_global(pc_velo.T)
    pts_3d_cam = calib.project_global_to_cam(pts_global, sensor)
    depths = pts_3d_cam[2, :]
    pts_2d = utils.view_points(pts_3d_cam[:3, :],
                               getattr(calib, sensor),
                               normalize=True)  #(3,n)
    pts_2d = pts_2d.T
    pts_2d[:, 2] = depths.T

    #pts_2d = project_velo_to_image(pc_velo.copy(), calib, sensor)

    fov_inds = (pts_2d[:,0]<xmax) & (pts_2d[:,0]>=xmin) & \
        (pts_2d[:,1]<ymax) & (pts_2d[:,1]>=ymin)#26414->7149
    # Use depth before projection
    fov_inds = fov_inds & (pts_2d[:, 2] > clip_distance)  #7149->3067
    imgfov_pc_velo = pc_velo[
        fov_inds, :]  #(3067, 3),mean:array([-1.1616094e-02,  1.8041308e-01,  1.5962053e+01], dtype=float32)
    if return_more:
        return imgfov_pc_velo, pts_2d, fov_inds
    else:
        return imgfov_pc_velo
def project_velo_to_image(pc_velo, calib, sensor, return_time=False):
    ''' Input: nx3 points in velodyne coord.
        Output: nx3 points in image2 coord.
    '''
    # Points live in the point sensor frame. So they need to be transformed via global to the image plane.
    # First step: transform the point-cloud to the ego vehicle frame for the timestamp of the sweep.
    if return_time:
        import time
        start = time.time()
    pts_3d_velo = pc_velo.T
    pts_3d_ego = rotate(pts_3d_velo, getattr(calib, 'lidar2ego_rotation'))
    pts_3d_ego = translate(pts_3d_ego, getattr(calib, 'lidar2ego_translation'))

    # Second step: transform to the global frame.
    pts_3d_global = rotate(pts_3d_ego, getattr(calib, 'ego2global_rotation'))
    pts_3d_global = translate(pts_3d_global,
                              getattr(calib, 'ego2global_translation'))

    # Third step: transform into the ego vehicle frame for the timestamp of the image.
    pts_3d_ego_cam = translate(
        pts_3d_global,
        -getattr(calib, sensor + '_' + 'ego2global_translation'))
    pts_3d_ego_cam = rotate(
        pts_3d_ego_cam,
        getattr(calib, sensor + '_' + 'ego2global_rotation').T)

    # Fourth step: transform into the camera.
    pts_3d_cam = translate(
        pts_3d_ego_cam, -getattr(calib, sensor + '_' + 'cam2ego_translation'))
    pts_3d_cam = rotate(pts_3d_cam,
                        getattr(calib, sensor + '_' + 'cam2ego_rotation').T)

    # Take the actual picture (matrix multiplication with camera-matrix + renormalization).
    depths = pts_3d_cam[2, :]
    pts_2d_cam = utils.view_points(pts_3d_cam[:3, :],
                                   getattr(calib, sensor),
                                   normalize=True)  #(3,n)
    pts_2d_cam = pts_2d_cam.T
    pts_2d_cam[:, 2] = depths.T
    if return_time:
        end = time.time()
        print('Time(project_velo_to_image):', end - start)
        return pts_2d_cam, end - start
    else:
        return pts_2d_cam