def show_lidar_with_boxes(pc_velo,
                          objects,
                          calib,
                          sensor,
                          img_fov=False,
                          img_width=None,
                          img_height=None):
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple
    #from viz_util import draw_lidar
    from viz_util import draw_gt_boxes3d

    view = getattr(calib, sensor)
    print(('All point num: ', pc_velo.shape[0]))
    #fig = mlab.figure(figure=None, bgcolor=(0,0,0),
    #    fgcolor=None, engine=None, size=(1000, 500))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, sensor, 0, 0,
                                         img_width, img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    #draw_lidar(pc_velo, fig=fig)
    #fig = draw_nusc_lidar(pc_velo,pts_scale=0.1,pts_mode='sphere')
    fig = utils.draw_nusc_lidar(pc_velo)
    obj_mean = np.array([0.0, 0.0, 0.0])
    obj_count = 0
    for obj in objects:
        if obj.type == 'DontCare': continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(
            obj, np.eye(4))  #(8,2),(8,3)
        box3d_pts_3d_global = calib.project_cam_to_global(
            box3d_pts_3d.T, sensor)  # (3,8)
        box3d_pts_3d_velo = calib.project_global_to_lidar(
            box3d_pts_3d_global).T  #(8,3)
        # box3d_pts_3d_velo = box3d_pts_3d
        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(
            obj, np.eye(4))  #(2,2),(2,3)
        ori3d_pts_3d_global = calib.project_cam_to_global(
            ori3d_pts_3d.T, sensor)  #(3,2)
        ori3d_pts_3d_velo = calib.project_global_to_lidar(
            ori3d_pts_3d_global).T  #(2,3)
        ori3d_pts_3d_velo = ori3d_pts_3d
        x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)

        mlab.plot3d([x1, x2], [y1, y2], [z1, z2],
                    color=(0.5, 0.5, 0.5),
                    tube_radius=None,
                    line_width=1,
                    figure=fig)

        obj_mean += np.sum(box3d_pts_3d_velo, axis=0)
        obj_count += 1
    obj_mean = obj_mean / obj_count
    print("mean:", obj_mean)
    mlab.show(1)
Ejemplo n.º 2
0
def demo(data_idx=0, obj_idx=-1):
    sensor = 'CAM_FRONT'
    import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_gt_boxes3d
    dataset = nuscenes2kitti_object(
        os.path.join(ROOT_DIR, 'dataset/nuScenes2KITTI'))

    # Load data from dataset
    objects = dataset.get_label_objects(
        sensor, data_idx)  # objects = [Object3d(line) for line in lines]
    for i, obj in enumerate(objects):
        print('obj %d' % (i))
        objects[obj_idx].print_object()

    calib = dataset.get_calibration(
        data_idx)  # utils.Calibration(calib_filename)
    box2d = objects[obj_idx].box2d
    xmin, ymin, xmax, ymax = box2d
    box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
    uvdepth = np.zeros((1, 3))
    uvdepth[0, 0:2] = box2d_center
    uvdepth[0, 2] = 20  # some random depth
    #box2d_center_rect = calib.project_image_to_rect(uvdepth)
    #frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
    #                                box2d_center_rect[0, 0])
    #print('frustum_angle:', frustum_angle)
    img = dataset.get_image(sensor, data_idx)  # (370, 1224, 3)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_height, img_width, img_channel = img.shape
    print(('Image shape: ', img.shape))
    print(dataset.get_lidar(data_idx).shape)
    pc_velo = dataset.get_lidar(data_idx)[:, 0:3]  # (115384, 3)
    calib = dataset.get_calibration(
        data_idx)  # utils.Calibration(calib_filename)
    # 1.Draw lidar with boxes in LIDAR_TOP coord
    print(' -------- LiDAR points in LIDAR_TOP coordination --------')
    print('pc_velo.shape:', pc_velo.shape)
    print('pc_velo[:10,:]:', pc_velo[:10, :])
    ##view = np.eye(4)
    ##pc_velo[:, :3] = utils.view_points(pc_velo[:, :3].T, view, normalize=False).T
    ##pc_rect = calib.project_velo_to_rect(pc_velo)
    #fig = draw_lidar_simple(pc_velo)
    show_lidar_with_boxes(pc_velo, objects, calib, sensor, False, img_width,
                          img_height)
    raw_input()

    # 2.Draw frustum lidar with boxes in LIDAR_TOP coord
    print(
        ' -------- LiDAR points and 3D boxes in velodyne coordinate --------')
    #show_lidar_with_boxes(pc_velo, objects, calib)
    show_lidar_with_boxes(pc_velo.copy(), objects, calib, sensor, True,
                          img_width, img_height)
    raw_input()

    # 3.Draw 2d and 3d boxes on CAM_FRONT image
    print(' -------- 2D/3D bounding boxes in images --------')
    show_image_with_boxes(img, objects, calib, sensor)
    raw_input()

    print(
        ' -------- render LiDAR points (and 3D boxes) in LIDAR_TOP coordinate --------'
    )
    render_lidar_bev(pc_velo, objects, calib, sensor)
    raw_input()

    # Visualize LiDAR points on images
    print(' -------- LiDAR points projected to image plane --------')
    show_lidar_on_image(pc_velo, img.copy(), calib, sensor, img_width,
                        img_height)  #pc_velo:(n,3)
    raw_input()

    # Show LiDAR points that are in the 3d box
    print(' -------- LiDAR points in a 3D bounding box --------')
    for obj_idx, obj in enumerate(objects):
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(
            objects[obj_idx], np.eye(4))
        box3d_pts_3d_global = calib.project_cam_to_global(
            box3d_pts_3d.T, sensor)  # (3,8)
        box3d_pts_3d_velo = calib.project_global_to_lidar(
            box3d_pts_3d_global)  # (3,8)
        box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo.T)
        print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))
        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        utils.draw_nusc_lidar(box3droi_pc_velo, fig=fig)
        draw_gt_boxes3d([box3d_pts_3d_velo.T], fig=fig)
        mlab.show(1)
        raw_input()

    # UVDepth Image and its backprojection to point clouds
    print(' -------- LiDAR points in a frustum --------')

    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(
        pc_velo, calib, sensor, 0, 0, img_width, img_height, True)
    imgfov_pts_2d = pts_2d[fov_inds, :]  #(n, 3)
    imgfov_pc_global = calib.project_lidar_to_global(imgfov_pc_velo.T)
    imgfov_pc_cam = calib.project_global_to_cam(imgfov_pc_global,
                                                sensor)  #(3,n)

    #cameraUVDepth = utils.view_points(imgfov_pc_cam[:3, :], getattr(calib,sensor), normalize=True)#(3,3067)
    #cameraUVDepth = cameraUVDepth#(3067, 3)
    #ipdb.set_trace()
    #cameraUVDepth = np.zeros_like(imgfov_pc_cam)
    #cameraUVDepth[:,0:2] = imgfov_pts_2d[:, 0:2]
    #cameraUVDepth[:,2] = imgfov_pc_cam[:,2]

    # miss intrisic
    # cameraUVDepth = imgfov_pc_cam
    # backprojected_pc_cam = cameraUVDepth

    #consider intrinsic
    print('imgfov_pc_cam.shape:', imgfov_pc_cam.shape)
    print('imgfov_pc_cam[:,0:5].T:\n', imgfov_pc_cam[:, 0:5].T)
    cameraUVDepth = calib.project_cam_to_image(imgfov_pc_cam, sensor)  #(3,n)
    cameraUVDepth[2, :] = imgfov_pc_cam[2, :]
    print('cameraUVDepth.shape:', cameraUVDepth.shape)
    print('cameraUVDepth[:,0:5].T:\n', cameraUVDepth[:, 0:5].T)
    backprojected_pc_cam = calib.project_image_to_cam(cameraUVDepth,
                                                      sensor)  #(3,n)
    print('backprojected_pc_cam.shape:', backprojected_pc_cam.shape)
    print('backprojected_pc_cam[:,0:5].T\n:', backprojected_pc_cam[:, 0:5].T)
    print('error:')
    print(np.mean(backprojected_pc_cam - imgfov_pc_cam, axis=1))
    # Show that the points are exactly the same
    backprojected_pc_global = calib.project_cam_to_global(
        backprojected_pc_cam, sensor)  #(3,n)
    backprojected_pc_velo = calib.project_global_to_lidar(
        backprojected_pc_global).T  #(n,3)
    print('imgfov_pc_velo.shape:', imgfov_pc_velo.shape)
    print(imgfov_pc_velo[0:5, :])
    print('backprojected_pc_velo.shape:', backprojected_pc_velo.shape)
    print(backprojected_pc_velo[0:5, :])
    print('error:')
    print(np.mean(backprojected_pc_velo - imgfov_pc_velo, axis=0))
    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    utils.draw_nusc_lidar(backprojected_pc_velo, fig=fig)
    raw_input()

    # Only display those points that fall into 2d box
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    xmin,ymin,xmax,ymax = \
        objects[obj_idx].xmin, objects[obj_idx].ymin, objects[obj_idx].xmax, objects[obj_idx].ymax
    boxfov_pc_velo = \
        get_lidar_in_image_fov(pc_velo, calib, sensor, xmin, ymin, xmax, ymax)
    print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    utils.draw_nusc_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()