コード例 #1
0
 def visualize_one_sample(self, old_points, expand_points, gt_box_3d,
                          prop_box_3d, box2d_center_rect):
     import mayavi.mlab as mlab
     from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
     # fig = draw_lidar(pc_in_prop_box, pts_color=(1,1,1))
     fig = draw_lidar(expand_points[:, :3], pts_color=(1, 1, 1))
     fig = draw_lidar(old_points[:, :3], pts_color=(0, 1, 0), fig=fig)
     fig = draw_gt_boxes3d([gt_box_3d], fig, color=(1, 0, 0))
     fig = draw_gt_boxes3d([prop_box_3d],
                           fig,
                           draw_text=False,
                           color=(1, 1, 1))
     # roi_feature_map
     # roi_features_size = 7 * 7 * 32
     # img_roi_features = prop.roi_features[0:roi_features_size].reshape((7, 7, -1))
     # bev_roi_features = prop.roi_features[roi_features_size:].reshape((7, 7, -1))
     # img_roi_features = np.amax(img_roi_features, axis=-1)
     # bev_roi_features = np.amax(bev_roi_features, axis=-1)
     # fig1 = mlab.figure(figure=None, bgcolor=(0,0,0),
     #     fgcolor=None, engine=None, size=(500, 500))
     # fig2 = mlab.figure(figure=None, bgcolor=(0,0,0),
     #     fgcolor=None, engine=None, size=(500, 500))
     # mlab.imshow(img_roi_features, colormap='gist_earth', name='img_roi_features', figure=fig1)
     # mlab.imshow(bev_roi_features, colormap='gist_earth', name='bev_roi_features', figure=fig2)
     mlab.plot3d([0, box2d_center_rect[0][0]], [0, box2d_center_rect[0][1]],
                 [0, box2d_center_rect[0][2]],
                 color=(1, 1, 1),
                 tube_radius=None,
                 figure=fig)
     raw_input()
コード例 #2
0
ファイル: rpn_dataset.py プロジェクト: overfitover/PointRCNN
 def viz_frame(self, pc_rect, mask, gt_boxes):
     import mayavi.mlab as mlab
     from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
     fig = draw_lidar(pc_rect)
     fig = draw_lidar(pc_rect[mask == 1], fig=fig, pts_color=(1, 1, 1))
     fig = draw_gt_boxes3d(gt_boxes, fig, draw_text=False, color=(1, 1, 1))
     raw_input()
コード例 #3
0
def show_corners(points, calib, corners_anchors, ctr_points=None, sv_img_path=None):
    """
    Input:
        points: [N, 3]
        calib: a calib object
        anchors: [N, 8, 3]
    """
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    # first project anchors back to the velo location
    points = calib.project_rect_to_velo(points)

    # first project it back to velo
    corners_anchors = np.reshape(corners_anchors, [-1, 3])
    corners_anchors = calib.project_rect_to_velo(corners_anchors)
    corners_anchors = np.reshape(corners_anchors, [-1, 8, 3])

    # now, we get the corners_anchors, then, we only have to draw them
    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(points, fig=fig)
    if ctr_points is not None:
        draw_lidar(ctr_points, fig=fig, pts_scale=0.10, pts_color=(1.0, 0.0, 0.0))
    draw_gt_boxes3d(corners_anchors, fig=fig) 
    if sv_img_path is not None:
        mlab.savefig(sv_img_path, figure=fig)
    else:
        mlab.show(1)
コード例 #4
0
def show_lidar_with_boxes(pc_velo, objects, calib,
                          img_fov=False, img_width=None, img_height=None): 
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(('All point num: ', pc_velo.shape[0]))
    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0,
            img_width, img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    draw_lidar(pc_velo, fig=fig)

    for obj in objects:
        if obj.type=='DontCare':continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) 
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        x1,y1,z1 = ori3d_pts_3d_velo[0,:]
        x2,y2,z2 = ori3d_pts_3d_velo[1,:]
        draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        mlab.plot3d([x1, x2], [y1, y2], [z1,z2], color=(0.5,0.5,0.5),
            tube_radius=None, line_width=1, figure=fig)
    mlab.show(1)
コード例 #5
0
def show_lidar_with_boxes(pc_velo, objects, calib,
                          img_fov=False, img_width=None, img_height=None): 
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(('All point num: ', pc_velo.shape[0]))
    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0,
            img_width, img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    draw_lidar(pc_velo, fig=fig)

    for obj in objects:
        if obj.type=='DontCare':continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P) 
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        x1,y1,z1 = ori3d_pts_3d_velo[0,:]
        x2,y2,z2 = ori3d_pts_3d_velo[1,:]
        draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        mlab.plot3d([x1, x2], [y1, y2], [z1,z2], color=(0.5,0.5,0.5),
            tube_radius=None, line_width=1, figure=fig)
    mlab.show(1)
コード例 #6
0
 def visualize_proposals(self, pc_rect, prop_boxes, neg_boxes, gt_boxes,
                         pc_seg):
     import mayavi.mlab as mlab
     from viz_util import draw_lidar, draw_gt_boxes3d
     fig = draw_lidar(pc_rect)
     fig = draw_lidar(pc_rect[pc_seg == 1], fig=fig, pts_color=(1, 1, 1))
     fig = draw_gt_boxes3d(prop_boxes,
                           fig,
                           draw_text=False,
                           color=(1, 0, 0))
     fig = draw_gt_boxes3d(neg_boxes, fig, draw_text=False, color=(0, 1, 0))
     fig = draw_gt_boxes3d(gt_boxes, fig, draw_text=False, color=(1, 1, 1))
     raw_input()
コード例 #7
0
ファイル: rcnn_dataset.py プロジェクト: overfitover/PointRCNN
 def viz_sample(self, sample):
     import mayavi.mlab as mlab
     from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
     pc_rect = sample['pointcloud']
     proposal = ProposalObject(sample['proposal_box'])
     proposal.ry = 0
     proposal.t = np.zeros((3,))
     _, prop_box = utils.compute_box_3d(proposal, sample['calib'])
     fig = draw_lidar(pc_rect)
     mask = pc_rect[:,5] == 1
     fig = draw_lidar(pc_rect[mask], fig=fig, pts_color=(1, 1, 1))
     fig = draw_gt_boxes3d([prop_box], fig, draw_text=False, color=(1, 1, 1))
     raw_input()
コード例 #8
0
def show_lidar(pc_velo, calib, fig, img_fov=False, img_width=None, img_height=None):
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar

    #mlab.clf(fig)
    print(('All point num: ', pc_velo.shape[0]))
    #fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1000, 500))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0,
            img_width, img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    draw_lidar(pc_velo, fig=fig)
    mlab.show(30)
コード例 #9
0
def viz_kitti_video():
    video_path = os.path.join(ROOT_DIR, 'dataset/2011_09_26/')
    dataset = kitti_object_video(\
        os.path.join(video_path, '2011_09_26_drive_0023_sync/image_02/data'),
        os.path.join(video_path, '2011_09_26_drive_0023_sync/velodyne_points/data'),
        video_path)
    print(len(dataset))
    for i in range(len(dataset)):
        img = dataset.get_image(0)
        pc = dataset.get_lidar(0)
        Image.fromarray(img).show()
        draw_lidar(pc)
        raw_input()
        pc[:, 0:3] = dataset.get_calibration().project_velo_to_rect(pc[:, 0:3])
        draw_lidar(pc)
        raw_input()
    return
コード例 #10
0
def viz_kitti_video():
    video_path = os.path.join(ROOT_DIR, 'dataset/2011_09_26/')
    dataset = kitti_object_video(\
        os.path.join(video_path, '2011_09_26_drive_0023_sync/image_02/data'),
        os.path.join(video_path, '2011_09_26_drive_0023_sync/velodyne_points/data'),
        video_path)
    print(len(dataset))
    for i in range(len(dataset)):
        img = dataset.get_image(0)
        pc = dataset.get_lidar(0)
        Image.fromarray(img).show()
        draw_lidar(pc)
        raw_input()
        pc[:,0:3] = dataset.get_calibration().project_velo_to_rect(pc[:,0:3])
        draw_lidar(pc)
        raw_input()
    return
コード例 #11
0
def viz_kitti_video():
    video_path = os.path.join(ROOT_DIR, "dataset/2011_09_26/")
    dataset = kitti_object_video(
        os.path.join(video_path, "2011_09_26_drive_0023_sync/image_02/data"),
        os.path.join(video_path, "2011_09_26_drive_0023_sync/velodyne_points/data"),
        video_path,
    )
    print(len(dataset))
    for i in range(len(dataset)):
        img = dataset.get_image(0)
        pc = dataset.get_lidar(0)
        cv2.imshow("video", img)
        draw_lidar(pc)
        raw_input()
        pc[:, 0:3] = dataset.get_calibration().project_velo_to_rect(pc[:, 0:3])
        draw_lidar(pc)
        raw_input()
    return
コード例 #12
0
def show_lidar_with_depth(data_idx,
                          pc_velo,
                          objects,
                          calib,
                          fig,
                          objects_pred=None,
                          depth=None,
                          constraint_box=False,
                          pc_label=False,
                          save=False):
    print(("All point num: ", pc_velo.shape[0]))
    print("pc_velo", pc_velo.shape)
    draw_lidar(pc_velo, fig=fig, pc_label=pc_label)

    color = (0, 1, 0)
    for obj in objects:
        if obj.type == "DontCare":
            continue

        obj.score = norm_score(obj.score)
        if obj.score < threshold:
            continue

        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        print("box3d_pts_3d_velo:")
        print(box3d_pts_3d_velo)

        draw_gt_box3d(box3d_pts_3d_velo, fig, obj)

    mlab.view(
        azimuth=180,
        elevation=65,
        # focalpoint=[12.0909996 , -1.04700089, -2.03249991],
        focalpoint=[0, 0, 0],
        distance=40.0,
        figure=fig)

    # MAKE SURE THAT FLAG IS TRUE
    mlab.savefig(filename=path.join(output_dir, 'pc_%.3d.png' % data_idx),
                 figure=fig)

    # MAKE SURE THAT FLAG IS FALSE
    mlab.show(stop=False)
コード例 #13
0
def test():
    parser = argparse.ArgumentParser()
    parser.add_argument('--avod_config_path',
                    type=str,
                    dest='avod_config_path',
                    required=True,
                    help='avod_config_path')
    parser.add_argument('--sample_idx',
                    type=str,
                    dest='sample_idx',
                    required=True,
                    help='sample id')
    args = parser.parse_args()
    _, _, _, dataset_config = \
    config_builder.get_configs_from_pipeline_file(
        args.avod_config_path, is_training=False)
    dataset = get_dataset(dataset_config, 'val')

    idx = np.argwhere(dataset.sample_names==args.sample_idx).squeeze()
    # print(idx)
    kitti_samples = dataset.load_samples([idx])
    sample = kitti_samples[0]

    label_mask = np.equal(sample[constants.KEY_LABEL_CLASSES], g_type2onehotclass['Car']+1)
    gt_cls = sample[constants.KEY_LABEL_CLASSES][label_mask]
    gt_boxes_3d = sample[constants.KEY_LABEL_BOXES_3D][label_mask]
    gt_boxes_bev = []
    for i in range(len(gt_cls)):
        gt_obj = box_3d_encoder.box_3d_to_object_label(gt_boxes_3d[i], gt_cls[i])
        gt_corner_3d = compute_box_3d(gt_obj)
        gt_boxes_bev.append(gt_corner_3d[:4, [0,2]])
    print(gt_boxes_bev)

    rpn_out = pickle.load(open("rpn_out/%s" % sample[constants.KEY_SAMPLE_NAME], "rb"))
    pos_prop = []
    for prop in rpn_out['proposals_and_scores']:
        corners = compute_box_3d(box_3d_encoder.box_3d_to_object_label(prop[:7]))
        label_idx, iou = find_match_label(corners[:4, [0,2]], gt_boxes_bev)
        if iou > 0.65:
            pos_prop.append(corners)
    pc = sample[constants.KEY_POINT_CLOUD].T
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_gt_boxes3d
    fig = draw_lidar(pc)
    fig = draw_gt_boxes3d(pos_prop, fig, draw_text=False, color=(1, 1, 1))
    input()

    # visualize_rpn_out(sample, rpn_out['proposals_and_scores'])
    prediction = pickle.load(open("%s"%sample[constants.KEY_SAMPLE_NAME], "rb"))
    print(prediction)
    visualize(dataset, sample, prediction)
コード例 #14
0
def visualize_rpn_out(sample, proposals_and_scores, rpn_score_threshold=0.1):
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_gt_boxes3d
    nms_idxs = nms_on_bev(proposals_and_scores, 0.5)
    proposals_and_scores = proposals_and_scores[nms_idxs]

    proposal_boxes_3d = proposals_and_scores[:, 0:7]
    proposal_scores = proposals_and_scores[:, 7]
    score_mask = proposal_scores > rpn_score_threshold
    # 3D box in the format [x, y, z, l, w, h, ry]
    proposal_boxes_3d = proposal_boxes_3d[score_mask]
    proposal_scores = proposal_scores[score_mask]

    proposal_objs = list(map(lambda pair: ProposalObject(pair[0], pair[1], None, None), zip(proposal_boxes_3d, proposal_scores)))
    propsasl_corners = list(map(lambda obj: compute_box_3d(obj), proposal_objs))
    pc = sample[constants.KEY_POINT_CLOUD].T
    fig = draw_lidar(pc)
    fig = draw_gt_boxes3d(propsasl_corners, fig, draw_text=False, color=(1, 1, 1))
    input()
コード例 #15
0
def demo():
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
    dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))
    data_idx = 38

    # Load data from dataset
    objects = dataset.get_label_objects(data_idx)
    objects[0].print_object()
    img = dataset.get_image(data_idx)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 
    img_height, img_width, img_channel = img.shape
    print(('Image Left shape: ', img.shape))

    img_r = dataset.get_image_r(data_idx)
    img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2RGB) 
    img_height_r, img_width_r, img_channel_r = img_r.shape
    print(('Image Right shape: ', img_r.shape))

    pc_velo = dataset.get_lidar(data_idx)[:,0:3]
    calib = dataset.get_calibration(data_idx)

    ## Draw lidar in rect camera coord
    #print(' -------- LiDAR points in rect camera coordination --------')
    #pc_rect = calib.project_velo_to_rect(pc_velo)
    #fig = draw_lidar_simple(pc_rect)
    #raw_input()

    # Draw 2d and 3d boxes on image
    print(' -------- 2D/3D bounding boxes in images --------')
    show_image_with_boxes(img, objects, calib)
    raw_input()

    # Show all LiDAR points. Draw 3d box in LiDAR point cloud
    print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------')
    #show_lidar_with_boxes(pc_velo, objects, calib)
    #raw_input()
    show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)
    raw_input()

    ## Visualize LiDAR points on images
    #print(' -------- LiDAR points projected to image plane --------')
    #show_lidar_on_image(pc_velo, img, calib, img_width, img_height) 
    #raw_input()
    
    # Show LiDAR points that are in the 3d box
    print(' -------- LiDAR points in a 3D bounding box --------')
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) 
    box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
    box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)
    print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(box3droi_pc_velo, fig=fig)
    draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
    mlab.show(1)
    raw_input()
    
    # UVDepth Image and its backprojection to point clouds
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,
        calib, 0, 0, img_width, img_height, True)
    imgfov_pts_2d = pts_2d[fov_inds,:]
    imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)

    cameraUVDepth = np.zeros_like(imgfov_pc_rect)
    cameraUVDepth[:,0:2] = imgfov_pts_2d
    cameraUVDepth[:,2] = imgfov_pc_rect[:,2]

    # Show that the points are exactly the same
    backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)
    print(imgfov_pc_velo[0:20])
    print(backprojected_pc_velo[0:20])

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(backprojected_pc_velo, fig=fig)
    raw_input()

    # Only display those points that fall into the left 2d box
    print(' -------- LiDAR points in the left frustum from a 2D box --------')
    xmin,ymin,xmax,ymax = \
        objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax
    boxfov_pc_velo = \
        get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)
    print(('Left 2D box FOV point num: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()

  
    # Only display those points that fall into the right 2d box
    print(' -------- LiDAR points in the right frustums from a 2D box Method 1--------')
    
    # We need to project xmin,ymin,xmax,ymax to the right image cordinate
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P3)
    xcord   = box3d_pts_2d[:,0]
    ycord   = box3d_pts_2d[:,1]
    xmin_r  = int(min(xcord))
    ymin_r  = int(min(ycord))
    xmax_r  = int(max(xcord))
    ymax_r  = int(max(ycord))
    box2d_r = xmin_r,ymin_r,xmax_r,ymax_r

    boxfov_pc_velo = get_lidar_in_image_fov_right(pc_velo, calib, xmin_r, ymin_r, xmax_r, ymax_r)
    
    print(('Right 2D box FOV point num Method 1: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()

    # Draw 2d box in right image
    print(' -------- 2D bounding box in right images using Method 1 --------')
    draw_2d_box(img_r, [xmin_r, ymin_r, xmax_r, ymax_r], calib)
    raw_input()
    
    
    # Only display those points that fall into the right 2d box
    print(' -------- LiDAR points in the right frustums from a 2D box Method 2 --------')
    
    xmin_r,ymin_r,xmax_r,ymax_r = objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax

    point = np.zeros((1,3)) 
    point[0,0]= xmin_r
    point[0,1]= ymin_r
    point[0,2]= 30
    point_r = calib.project_rect_to_image_right(calib.project_image_to_rect(point))
    xmin_r = point_r[0,0]
    ymin_r = point_r[0,1]
    
    point[0,0] = xmax_r
    point[0,1] = ymax_r
    point_r = calib.project_rect_to_image_right(calib.project_image_to_rect(point))
    xmax_r = point_r[0,0]
    ymax_r = point_r[0,1] 

    boxfov_pc_velo = get_lidar_in_image_fov_right(pc_velo, calib, xmin_r, ymin_r, xmax_r, ymax_r)


    print(('Right 2D box FOV point num Method 2: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()

    # Draw 2d box in right image
    print(' -------- 2D bounding box in right images using Method 2 --------')
    draw_2d_box(img_r, [xmin_r, ymin_r, xmax_r, ymax_r], calib)
    raw_input()
    

    # Displays those points that fall into the intersection of right and left
    print(' -------- LiDAR points in the intersection right and left frustums --------')
    xmin,ymin,xmax,ymax = objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax

    # We need to project xmin,ymin,xmax,ymax to the right image cordinate
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P3)
    xcord   = box3d_pts_2d[:,0]
    ycord   = box3d_pts_2d[:,1]
    xmin_r  = int(min(xcord))
    ymin_r  = int(min(ycord))
    xmax_r  = int(max(xcord))
    ymax_r  = int(max(ycord))
    #box2d_r = xmin_r, ymin_r, xmax_r, ymax_r

    boxfov_pc_velo = get_lidar_in_image_fov_intersect(pc_velo, calib, xmin, ymin, xmax, ymax, xmin_r, ymin_r, xmax_r, ymax_r)
    
    print((' Intersection of frustums point num: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()
コード例 #16
0
def demo():
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
    dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))
    data_idx = 0

    # Load data from dataset
    objects = dataset.get_label_objects(data_idx)
    objects[0].print_object()
    img = dataset.get_image(data_idx)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_height, img_width, img_channel = img.shape
    print(('Image shape: ', img.shape))
    pc_velo = dataset.get_lidar(data_idx)[:, 0:3]
    calib = dataset.get_calibration(data_idx)

    ## Draw lidar in rect camera coord
    #print(' -------- LiDAR points in rect camera coordination --------')
    #pc_rect = calib.project_velo_to_rect(pc_velo)
    #fig = draw_lidar_simple(pc_rect)
    #raw_input()

    # Draw 2d and 3d boxes on image
    print(' -------- 2D/3D bounding boxes in images --------')
    show_image_with_boxes(img, objects, calib)
    raw_input()

    # Show all LiDAR points. Draw 3d box in LiDAR point cloud
    print(
        ' -------- LiDAR points and 3D boxes in velodyne coordinate --------')
    #show_lidar_with_boxes(pc_velo, objects, calib)
    #raw_input()
    show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)
    raw_input()

    # Visualize LiDAR points on images
    print(' -------- LiDAR points projected to image plane --------')
    show_lidar_on_image(pc_velo, img, calib, img_width, img_height)
    raw_input()

    # Show LiDAR points that are in the 3d box
    print(' -------- LiDAR points in a 3D bounding box --------')
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P)
    box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
    box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)
    print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    draw_lidar(box3droi_pc_velo, fig=fig)
    draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
    mlab.show(1)
    raw_input()

    # UVDepth Image and its backprojection to point clouds
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(
        pc_velo, calib, 0, 0, img_width, img_height, True)
    imgfov_pts_2d = pts_2d[fov_inds, :]
    imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)

    cameraUVDepth = np.zeros_like(imgfov_pc_rect)
    cameraUVDepth[:, 0:2] = imgfov_pts_2d
    cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2]

    # Show that the points are exactly the same
    backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)
    print(imgfov_pc_velo[0:20])
    print(backprojected_pc_velo[0:20])

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    draw_lidar(backprojected_pc_velo, fig=fig)
    raw_input()

    # Only display those points that fall into 2d box
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    xmin,ymin,xmax,ymax = \
        objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax
    boxfov_pc_velo = \
        get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)
    print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()
コード例 #17
0
def main(_):
    tv_utils.set_gpus_to_use()

    # if FLAGS.input_image is None:
    #     logging.error("No input_image was given.")
    #     logging.info(
    #         "Usage: python demo.py --input_image data/test.png "
    #         "[--output_image output_image] [--logdir /path/to/weights] "
    #         "[--gpus GPUs_to_use] ")
    #     exit(1)

    if FLAGS.logdir is None:
        # Download and use weights from the MultiNet Paper
        if 'TV_DIR_RUNS' in os.environ:
            runs_dir = os.path.join(os.environ['TV_DIR_RUNS'], 'KittiSeg')
        else:
            runs_dir = 'RUNS'
        maybe_download_and_extract(runs_dir)
        logdir = os.path.join(runs_dir, default_run)
    else:
        logging.info("Using weights found in {}".format(FLAGS.logdir))
        logdir = FLAGS.logdir

    # Loading hyperparameters from logdir
    hypes = tv_utils.load_hypes_from_logdir(logdir, base_path='hypes')

    logging.info("Hypes loaded successfully.")

    # Loading tv modules (encoder.py, decoder.py, eval.py) from logdir
    modules = tv_utils.load_modules_from_logdir(logdir)
    logging.info("Modules loaded successfully. Starting to build tf graph.")

    # Create tf graph and build module.
    with tf.Graph().as_default():
        # Create placeholder for input
        image_pl = tf.placeholder(tf.float32)
        image = tf.expand_dims(image_pl, 0)

        # build Tensorflow graph using the model from logdir
        prediction = core.build_inference_graph(hypes, modules, image=image)

        logging.info("Graph build successfully.")

        # Create a session for running Ops on the Graph.
        sess = tf.Session()
        saver = tf.train.Saver()

        # Load weights from logdir
        core.load_weights(logdir, sess, saver)

        logging.info("Weights loaded successfully.")

    dataset = kitti_object(
        os.path.join(ROOT_DIR, 'free-space/dataset/KITTI/object'))

    point_pub = rospy.Publisher('cloud', PointCloud2, queue_size=50)
    # picture_pub = rospy.Publisher("kitti_image",newImage,queue_size=50)
    rospy.init_node('point-cloud', anonymous=True)
    # h = std_msgs.msg.Header()
    # h.frame_id="base_link"
    # h.stamp=rospy.Time.now()
    #rate = rospy.Rate(10)
    #point_msg=PointCloud2()

    video_dir = '/home/user/Data/lrx_work/free-space/kitti.avi'
    fps = 10
    num = 4541
    img_size = (1241, 376)
    fourcc = 'mp4v'
    videoWriter = cv2.VideoWriter(video_dir, cv2.VideoWriter_fourcc(*fourcc),
                                  fps, img_size)

    calib = dataset.get_calibration(0)
    for data_idx in range(len(dataset)):
        #objects = dataset.get_label_objects(data_idx)

        # Load and resize input image
        image = dataset.get_image(data_idx)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        #scp.misc.imsave('new.png', image)
        if hypes['jitter']['reseize_image']:
            # Resize input only, if specified in hypes
            image_height = hypes['jitter']['image_height']
            image_width = hypes['jitter']['image_width']
            image = scp.misc.imresize(image,
                                      size=(image_height, image_width),
                                      interp='cubic')
        img_height, img_width, img_channel = image.shape
        print("picture-shape")
        print(len(image))
        print(len(image[0]))
        pc_velo = dataset.get_lidar(data_idx)[:, 0:3]
        print(len(pc_velo))
        velo_len = len(pc_velo)
        #calib = dataset.get_calibration(data_idx)

        # Run KittiSeg model on image
        feed = {image_pl: image}
        softmax = prediction['softmax']
        output = sess.run([softmax], feed_dict=feed)

        # Reshape output from flat vector to 2D Image
        shape = image.shape
        output_image = output[0][:, 1].reshape(shape[0], shape[1])

        # Plot confidences as red-blue overlay
        rb_image = seg.make_overlay(image, output_image)
        # scp.misc.imsave('new0.png', rb_image)

        # Accept all pixel with conf >= 0.5 as positive prediction
        # This creates a `hard` prediction result for class street
        threshold = 0.5
        street_prediction = output_image > threshold

        index = np.where(street_prediction == True)
        chang = len(index[0])
        print(chang)
        # test = np.zeros((velo_len,2),dtype=np.int)
        # for tmp0 in range(chang):
        #     test[tmp0][0]=index[0][tmp0]
        #     test[tmp0][1]=index[1][tmp0]
        print("suoyindayin")
        # if (chang>0):
        #     print(test[0][0])
        #     print(test[0][1])

        pts_2d = calib.project_velo_to_image(pc_velo)
        print(pts_2d.shape)
        # print(pts_2d[1][0])
        # print(pts_2d[1][1])

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        fov_inds = (pts_2d[:,0]<1242) & (pts_2d[:,0]>=0) & \
            (pts_2d[:,1]<370) & (pts_2d[:,1]>=0)
        print(fov_inds.shape)
        # print(fov_inds[1000])
        # print(pc_velo.shape)
        print("okok")
        fov_inds = fov_inds & (pc_velo[:, 0] > 0)
        print(fov_inds.shape)
        imgfov_pts_2d = pts_2d[fov_inds, :]
        imgfov_pc_velo = pc_velo[fov_inds, :]
        pts_2d0 = calib.project_velo_to_image(imgfov_pc_velo)
        fov_inds0 = (pts_2d0[:,0]<len(image[0])) & (pts_2d0[:,0]>=0) & \
            (pts_2d0[:,1]<len(image)) & (pts_2d0[:,1]>=0)
        fov_inds0 = fov_inds0 & (imgfov_pc_velo[:, 0] > 2.0)
        print(fov_inds0.shape)
        print(street_prediction.shape)
        print(pts_2d0.shape)
        # if(chang>0):
        #     print(int(imgfov_pts_2d[5,0]))
        #     print(int(imgfov_pts_2d[5,1]))
        #     print(street_prediction[int(imgfov_pts_2d[5,1]),int(imgfov_pts_2d[5,0])])

        if (chang > 0):
            for i in range(len(fov_inds0)):
                if ((pts_2d0[i, 1] < len(street_prediction)) &
                    (pts_2d0[i, 0] < len(street_prediction[0]))):
                    fov_inds0[i] = fov_inds0[i] & (
                        street_prediction[int(pts_2d0[i, 1]),
                                          int(pts_2d0[i, 0])] == True)
        imgfov_pc_velo0 = imgfov_pc_velo[fov_inds0, :]
        print("number")
        green_image = tv_utils.fast_overlay(image, street_prediction)
        # pub point-cloud topic
        print(imgfov_pc_velo0.shape)
        number = len(imgfov_pc_velo0)

        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = "velodyne"
        points = pc2.create_cloud_xyz32(header, imgfov_pc_velo0)

        # point=Point()

        # for t in range(0,number):
        #     point_x=imgfov_pc_velo0[t][0]
        #     point_y=imgfov_pc_velo0[t][1]
        #     point_z=imgfov_pc_velo0[t][2]
        #     point_msg.points[t].point.x=point_x
        #     point_msg.points[t].point.y=point_y
        #     point_msg.points[t].point.z=point_z

        # point_pub.publish(points)
        # videoWriter.write(green_image)

        # bridge=CvBridge()
        # picture_pub.publish(bridge.cv2_to_imgmsg(green_image,"rgb8"))

        # minx=imgfov_pc_velo0[0][0]
        # miny=imgfov_pc_velo0[0][1]
        # minz=imgfov_pc_velo0[0][2]
        # maxx=imgfov_pc_velo0[0][0]
        # maxy=imgfov_pc_velo0[0][1]
        # maxz=imgfov_pc_velo0[0][2]

        # for t in range(len(imgfov_pc_velo0)):
        #     minx=min(minx,imgfov_pc_velo0[t][0])
        #     miny=min(miny,imgfov_pc_velo0[t][1])
        #     minz=min(minz,imgfov_pc_velo0[t][2])
        #     maxx=max(maxx,imgfov_pc_velo0[t][0])
        #     maxy=max(maxy,imgfov_pc_velo0[t][1])
        #     maxz=max(maxz,imgfov_pc_velo0[t][2])
        # print(minx,miny,minz,maxx,maxy,maxz)
        # width=1024
        # height=1024
        # img_res=np.zeros([width,height,3],dtype=np.uint8)
        # for p in range(len(imgfov_pc_velo0)):
        #     velo_x=5*(int(imgfov_pc_velo0[p][0])+50)
        #     velo_y=5*(int(imgfov_pc_velo0[p][1])+50)
        #     img_res[velo_x][velo_y]=255
        #     scale=25
        #     if((velo_x>scale)&(velo_x+scale<1024)&(velo_y>scale)&(velo_y+scale<1024)):
        #         for q in range(scale):
        #             for m in range(scale):
        #                 img_res[velo_x-q][velo_y-m]=255
        #                 img_res[velo_x-q][velo_y+m]=255
        #                 img_res[velo_x+q][velo_y-m]=255
        #                 img_res[velo_x+q][velo_y+m]=255
        # scp.misc.imsave('res.png',img_res)

        draw_lidar(imgfov_pc_velo0, fig=fig)

        # for obj in objects:
        #     if obj.type == 'DontCare': continue
        #     # Draw 3d bounding box
        #     box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        #     box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        #     # Draw heading arrow
        #     ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        #     ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        #     x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        #     x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        #     draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        #     mlab.plot3d([x1, x2], [y1, y2], [z1, z2], color=(0.5, 0.5, 0.5),
        #                 tube_radius=None, line_width=1, figure=fig)

        #mlab.show(1)

        # Plot the hard prediction as green overlay

        # Save output images to disk.
        if FLAGS.output_image is None:
            output_base_name = image
        else:
            output_base_name = FLAGS.output_image

        # raw_image_name = output_base_name.split('.')[0] + '_raw.png'
        # rb_image_name = output_base_name.split('.')[0] + '_rb.png'
        # green_image_name = output_base_name.split('.')[0] + '_green.png'

        # scp.misc.imsave('1.png', output_image)
        # scp.misc.imsave('2.png', rb_image)
        # scp.misc.imsave('3.png', green_image)
        raw_input()
コード例 #18
0
def show_lidar_with_depth(
    pc_velo,
    objects,
    calib,
    fig,
    img_fov=False,
    img_width=None,
    img_height=None,
    objects_pred=None,
    depth=None,
    cam_img=None,
    constraint_box=False,
    pc_label=False,
    save=False,
):
    """ Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) """
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(("All point num: ", pc_velo.shape[0]))
    if img_fov:
        pc_velo_index = get_lidar_index_in_image_fov(
            pc_velo[:, :3], calib, 0, 0, img_width, img_height
        )
        pc_velo = pc_velo[pc_velo_index, :]
        print(("FOV point num: ", pc_velo.shape))
    print("pc_velo", pc_velo.shape)
    draw_lidar(pc_velo, fig=fig, pc_label=pc_label)

    # Draw depth
    if depth is not None:
        depth_pc_velo = calib.project_depth_to_velo(depth, constraint_box)

        indensity = np.ones((depth_pc_velo.shape[0], 1)) * 0.5
        depth_pc_velo = np.hstack((depth_pc_velo, indensity))
        print("depth_pc_velo:", depth_pc_velo.shape)
        print("depth_pc_velo:", type(depth_pc_velo))
        print(depth_pc_velo[:5])
        draw_lidar(depth_pc_velo, fig=fig, pts_color=(1, 1, 1))

        if save:
            data_idx = 0
            vely_dir = "data/obj/training/depth_pc"
            save_filename = os.path.join(vely_dir, "%06d.bin" % (data_idx))
            print(save_filename)
            # np.save(save_filename+".npy", np.array(depth_pc_velo))
            depth_pc_velo = depth_pc_velo.astype(np.float32)
            depth_pc_velo.tofile(save_filename)

    color = (0, 1, 0)
    for obj in objects:
        if obj.type == "DontCare":
            continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        print("box3d_pts_3d_velo:")
        print(box3d_pts_3d_velo)

        draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color)

    if objects_pred is not None:
        color = (1, 0, 0)
        for obj in objects_pred:
            if obj.type == "DontCare":
                continue
            # Draw 3d bounding box
            box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
            box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
            print("box3d_pts_3d_velo:")
            print(box3d_pts_3d_velo)
            draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color)
            # Draw heading arrow
            ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
            ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
            x1, y1, z1 = ori3d_pts_3d_velo[0, :]
            x2, y2, z2 = ori3d_pts_3d_velo[1, :]
            mlab.plot3d(
                [x1, x2],
                [y1, y2],
                [z1, z2],
                color=color,
                tube_radius=None,
                line_width=1,
                figure=fig,
            )
    mlab.show(1)
コード例 #19
0
def show_lidar_with_boxes(pc_velo,
                          objects,
                          calib,
                          img_fov=False,
                          img_width=None,
                          img_height=None,
                          objects_pred=None,
                          depth=None,
                          cam_img=None,
                          data_idx=None,
                          pseudo_lidar=None):
    """ Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) """
    if "mlab" not in sys.modules:
        import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(("All point num: ", pc_velo.shape[0]))
    fig = mlab.figure(
        figure=None,
        # bgcolor=(1, 1, 1), # black background
        bgcolor=(0, 0, 0),  # white background
        fgcolor=None,
        engine=None,
        size=(1000, 500)
        # figure=None
    )

    if img_fov:  # filter pcl out of fov
        pc_velo = get_lidar_in_image_fov(pc_velo[:, 0:3], calib, 0, 0,
                                         img_width, img_height)
        print(pc_velo.shape)
        print(("FOV point num: ", pc_velo.shape[0]))
    print("pc_velo", pc_velo.shape)
    draw_lidar(pc_velo, fig=fig, pts_color=(0, 1, 0))

    # show_box_lidar(objects, calib, data_idx, fig)

    car_obj = []
    color = (1, 0, 0)

    for obj in objects:
        if obj.type == "DontCare":
            print("############## Dont care gt: t: {}, (h: {}, l: {}, w: {})".
                  format(obj.t, obj.h, obj.l, obj.w))
            continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)

        if obj.type == "Car":
            car_obj.append(box3d_pts_3d)
        draw_gt_boxes3d([box3d_pts_3d_velo],
                        fig=fig,
                        color=color,
                        data_idx=data_idx,
                        type=obj.type,
                        occlu=obj.occlusion)

        # Draw depth from ego-vehicle to objects
        if depth is not None:
            # import pdb; pdb.set_trace()
            depth_pt3d = depth_region_pt3d(depth, obj)
            depth_UVDepth = np.zeros_like(depth_pt3d)
            depth_UVDepth[:, 0] = depth_pt3d[:, 1]
            depth_UVDepth[:, 1] = depth_pt3d[:, 0]
            depth_UVDepth[:, 2] = depth_pt3d[:, 2]
            print("depth_pt3d:", depth_UVDepth)
            dep_pc_velo = calib.project_image_to_velo(depth_UVDepth)
            print("dep_pc_velo:", dep_pc_velo)

            draw_lidar(dep_pc_velo, fig=fig, pts_color=(1, 1, 1))

        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(obj, calib.P)
        ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        mlab.plot3d(
            [x1, x2],
            [y1, y2],
            [z1, z2],
            color=color,
            tube_radius=None,
            line_width=1,
            figure=fig,
        )
    mlab.show()
コード例 #20
0
def demo():
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
    dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))
    data_idx = 0

    # Load data from dataset
    objects = dataset.get_label_objects(data_idx)
    objects[0].print_object()
    img = dataset.get_image(data_idx)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 
    img_height, img_width, img_channel = img.shape
    print(('Image shape: ', img.shape))
    pc_velo = dataset.get_lidar(data_idx)[:,0:3]
    calib = dataset.get_calibration(data_idx)

    ## Draw lidar in rect camera coord
    #print(' -------- LiDAR points in rect camera coordination --------')
    #pc_rect = calib.project_velo_to_rect(pc_velo)
    #fig = draw_lidar_simple(pc_rect)
    #raw_input()

    # Draw 2d and 3d boxes on image
    print(' -------- 2D/3D bounding boxes in images --------')
    show_image_with_boxes(img, objects, calib)
    raw_input()

    # Show all LiDAR points. Draw 3d box in LiDAR point cloud
    print(' -------- LiDAR points and 3D boxes in velodyne coordinate --------')
    #show_lidar_with_boxes(pc_velo, objects, calib)
    #raw_input()
    show_lidar_with_boxes(pc_velo, objects, calib, True, img_width, img_height)
    raw_input()

    # Visualize LiDAR points on images
    print(' -------- LiDAR points projected to image plane --------')
    show_lidar_on_image(pc_velo, img, calib, img_width, img_height) 
    raw_input()
    
    # Show LiDAR points that are in the 3d box
    print(' -------- LiDAR points in a 3D bounding box --------')
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P) 
    box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
    box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)
    print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(box3droi_pc_velo, fig=fig)
    draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
    mlab.show(1)
    raw_input()
    
    # UVDepth Image and its backprojection to point clouds
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(pc_velo,
        calib, 0, 0, img_width, img_height, True)
    imgfov_pts_2d = pts_2d[fov_inds,:]
    imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)

    cameraUVDepth = np.zeros_like(imgfov_pc_rect)
    cameraUVDepth[:,0:2] = imgfov_pts_2d
    cameraUVDepth[:,2] = imgfov_pc_rect[:,2]

    # Show that the points are exactly the same
    backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)
    print(imgfov_pc_velo[0:20])
    print(backprojected_pc_velo[0:20])

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(backprojected_pc_velo, fig=fig)
    raw_input()

    # Only display those points that fall into 2d box
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    xmin,ymin,xmax,ymax = \
        objects[0].xmin, objects[0].ymin, objects[0].xmax, objects[0].ymax
    boxfov_pc_velo = \
        get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)
    print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))

    fig = mlab.figure(figure=None, bgcolor=(0,0,0),
        fgcolor=None, engine=None, size=(1000, 500))
    draw_lidar(boxfov_pc_velo, fig=fig)
    mlab.show(1)
    raw_input()
コード例 #21
0
def show_lidar_with_depth(pc_velo,
                          objects,
                          calib,
                          fig,
                          img_fov=False,
                          img_width=None,
                          img_height=None,
                          objects_pred=None,
                          depth=None,
                          cam_img=None,
                          data_idx=0,
                          constraint_box=False,
                          pc_label=False,
                          save=False,
                          fov_restrict=False):
    """ Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) """
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(("All point num: ", pc_velo.shape[0]))
    if img_fov:
        pc_velo_index = get_lidar_index_in_image_fov(pc_velo[:, :3], calib, 0,
                                                     0, img_width, img_height)
        pc_velo = pc_velo[pc_velo_index, :]
        print(("FOV point num: ", pc_velo.shape))
        pc_velo = pc_velo[pc_velo[:, 0] < 70.4]

    if fov_restrict:
        x_range = [0, 70.4]
        y_range = [-40, 40]
        z_range = [-1, 3]

        pts_x, pts_y, pts_z = pc_velo[:, 0], pc_velo[:, 1], pc_velo[:, 2]
        range_flag = (pts_x >= x_range[0]) & (pts_x <= x_range[1]) \
                    & (pts_y >= y_range[0]) & (pts_y <= y_range[1]) \
                    & (pts_z >= z_range[0]) & (pts_z <= z_range[1])
        pc_velo = pc_velo[range_flag, :]

    print("pc_velo", pc_velo.shape)
    draw_lidar(pc_velo, fig=fig, pc_label=pc_label, pts_scale=1.0)

    color = (0, 1, 0)
    for obj in objects:
        if obj.type != "Car" or obj.t[2] > 70.4:
            continue
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        print("box3d_pts_3d_velo:")
        print(box3d_pts_3d_velo)

        color_list = [(0, 1, 0), (0, 0, 1)]

        draw_gt_boxes3d([box3d_pts_3d_velo],
                        fig=fig,
                        color=color,
                        label=obj.type,
                        color_list=color_list,
                        draw_text=False)

    if objects_pred is not None:
        color = (1, 0, 0)
        for obj in objects_pred:
            if obj.type != "Car":
                continue
            # Draw 3d bounding box
            box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
            box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
            print("box3d_pts_3d_velo:")
            print(box3d_pts_3d_velo)
            draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color)

            # Draw heading arrow
            ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(
                obj, calib.P)
            ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
            x1, y1, z1 = ori3d_pts_3d_velo[0, :]
            x2, y2, z2 = ori3d_pts_3d_velo[1, :]
            mlab.plot3d(
                [x1, x2],
                [y1, y2],
                [z1, z2],
                color=color,
                tube_radius=None,
                line_width=1,
                figure=fig,
            )

    if (args.save_img):
        mlab.savefig(args.save_dir + 'lidar/' + str(data_idx).zfill(6) +
                     '.png')
        mlab.clf()
    else:
        mlab.show(1)
コード例 #22
0
def extract_frustum_data_rgb_detection(det_filename,
                                       split,
                                       output_filename,
                                       viz=True,
                                       type_whitelist=['Car'],
                                       img_height_threshold=30,
                                       img_width_threshold=20,
                                       lidar_point_threshold=50):
    ''' Extract point clouds in frustums extruded from 2D detection boxes.
        Update: Lidar points and 3d boxes are in *rect camera* coord system
            (as that in 3d box label files)
        
    Input:
        det_filename: string, each line is
            img_path typeid confidence xmin ymin xmax ymax
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        type_whitelist: a list of strings, object types we are interested in.
        img_height_threshold: int, neglect image with height lower than that.
        lidar_point_threshold: int, neglect frustum with too few points.
    Output:
        None (will write a .pickle file to the disk)
    '''
    dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'),
                           split)
    det_id_list, det_type_list, det_box2d_list, det_prob_list = \
        read_det_file(det_filename)
    cache_id = -1
    cache = None

    # print(det_id_list)
    id_list = []
    type_list = []
    box2d_list = []
    prob_list = []
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    frustum_angle_list = []  # angle of 2d box center from pos x-axis

    for det_idx in range(len(det_id_list)):
        data_idx = det_id_list[det_idx]
        print('det idx: %d/%d, data idx: %d' % \
            (det_idx, len(det_id_list), data_idx))
        if cache_id != data_idx:
            calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix
            pc_velo = dataset.get_lidar(data_idx)
            pc_rect = np.zeros_like(pc_velo)
            pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
            pc_rect[:, 3] = pc_velo[:, 3]
            img = dataset.get_image(data_idx)
            img_height, img_width, img_channel = img.shape
            _, pc_image_coord, img_fov_inds = get_lidar_in_image_fov(\
                pc_velo[:,0:3], calib, 0, 0, img_width, img_height, True)
            cache = [calib, pc_rect, pc_image_coord, img_fov_inds]
            cache_id = data_idx
        else:
            calib, pc_rect, pc_image_coord, img_fov_inds = cache

        if det_type_list[det_idx] not in type_whitelist:
            print('WTF not in list')
            print(data_idx)
            continue

        # 2D BOX: Get pts rect backprojected
        xmin, ymin, xmax, ymax = det_box2d_list[det_idx]
        offset = 0
        xmax = min(xmax + offset, img_width)
        xmin = max(xmin - offset, 0)
        ymax = min(ymax + offset, img_height)
        ymin = max(ymin - offset, 0)
        box_fov_inds = (pc_image_coord[:,0]<xmax) & \
            (pc_image_coord[:,0]>=xmin) & \
            (pc_image_coord[:,1]<ymax) & \
            (pc_image_coord[:,1]>=ymin)
        box_fov_inds = box_fov_inds & img_fov_inds
        pc_in_box_fov = pc_rect[box_fov_inds, :]
        # Get frustum angle (according to center pixel in 2D BOX)
        box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
        uvdepth = np.zeros((1, 3))
        uvdepth[0, 0:2] = box2d_center
        uvdepth[0, 2] = 20  # some random depth
        box2d_center_rect = calib.project_image_to_rect(uvdepth)
        frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                        box2d_center_rect[0, 0])

        # Pass objects that are too small
        if ymax-ymin<img_height_threshold or xmax-xmin<img_width_threshold or\
            len(pc_in_box_fov)<lidar_point_threshold:
            print('WTF too small')
            continue

        id_list.append(data_idx)
        type_list.append(det_type_list[det_idx])
        box2d_list.append(det_box2d_list[det_idx])
        prob_list.append(det_prob_list[det_idx])
        input_list.append(pc_in_box_fov)
        frustum_angle_list.append(frustum_angle)

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp)
        pickle.dump(box2d_list, fp)
        pickle.dump(input_list, fp)
        pickle.dump(type_list, fp)
        pickle.dump(frustum_angle_list, fp)
        pickle.dump(prob_list, fp)
    print(id_list)
    if viz:
        print("Enter viz")
        import mayavi.mlab as mlab
        from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d
        for i in range(10):
            p1 = input_list[i]
            print(id_list[i])
            fig = mlab.figure(figure=None,
                              bgcolor=(0, 0, 0),
                              fgcolor=None,
                              engine=None,
                              size=(500, 500))
            draw_lidar(p1, fig=fig, pts_scale=0.1, pts_mode='point')
            #mlab.points3d(p1[:,0], p1[:,1], p1[:,2], p1[:,1], mode='point',
            #    colormap='gnuplot', scale_factor=1, figure=fig)
            #fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4),
            #   fgcolor=None, engine=None, size=(500, 500))
            #mlab.points3d(p1[:,2], -p1[:,0], -p1[:,1], seg, mode='point',
            #    colormap='gnuplot', scale_factor=1, figure=fig)
            raw_input()
コード例 #23
0
    cv2.imwrite('messigray.png', img2)
    raw_input()

    # Show LiDAR points that are in the 3d box
    print(' -------- LiDAR points in a 3D bounding box --------')
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(objects[0], calib.P)
    box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
    box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)
    print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))

    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    draw_lidar(box3droi_pc_velo, fig=fig)
    draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
    mlab.show(1)
    raw_input()

    # UVDepth Image and its backprojection to point clouds
    print(' -------- LiDAR points in a frustum from a 2D box --------')
    imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(
        pc_velo, calib, 0, 0, img_width, img_height, True)
    imgfov_pts_2d = pts_2d[fov_inds, :]
    imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)

    cameraUVDepth = np.zeros_like(imgfov_pc_rect)
    cameraUVDepth[:, 0:2] = imgfov_pts_2d
    cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2]
コード例 #24
0
ファイル: show_proposal_pc.py プロジェクト: JenningsL/avod
    # integer data type
    return pick


if __name__ == '__main__':
    frame_id = sys.argv[1]

    pointcloud_file_path = './kitti/velodyne/{0}.bin'.format(frame_id)
    with open(pointcloud_file_path, 'rb') as fid:
        data_array = np.fromfile(fid, np.single)
    points = data_array.reshape(-1, 4)

    calib_filename = os.path.join('./kitti/calib/', '{0}.txt'.format(frame_id))
    calib = kitti_util.Calibration(calib_filename)

    fig = draw_lidar(points)

    # ground truth
    obj_labels = obj_utils.read_labels('./kitti/label_2', int(frame_id))
    gt_boxes = []
    for obj in obj_labels:
        if obj.type not in ['Car']:
            continue
        _, corners = kitti_util.compute_box_3d(obj, calib.P)
        corners_velo = calib.project_rect_to_velo(corners)
        gt_boxes.append(corners_velo)
    fig = draw_gt_boxes3d(gt_boxes, fig, color=(1, 0, 0))

    # proposals
    proposal_objs = load_proposals(frame_id)
    boxes = []
コード例 #25
0
ファイル: kitti_object.py プロジェクト: rui-qian/voxelnet
def show_lidar_with_numpy_boxes(pc_rect,
                                objects,
                                calib,
                                save_figure,
                                save_figure_dir='',
                                img_name='',
                                img_fov=False,
                                img_width=None,
                                img_height=None,
                                color=(1, 1, 1)):
    ''' Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) '''
    if 'mlab' not in sys.modules: import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    pc_velo = calib.project_rect_to_velo(pc_rect)

    print(('All point num: ', pc_velo.shape[0]))
    fig = mlab.figure(figure=None,
                      bgcolor=(0.5, 0.5, 0.5),
                      fgcolor=None,
                      engine=None,
                      size=(1600, 1000))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo, calib, 0, 0, img_width,
                                         img_height)
        print(('FOV point num: ', pc_velo.shape[0]))
    draw_lidar(pc_velo, fig=fig)
    for obj in objects:
        # Draw 3d bounding box
        box3d_pts_2d, box3d_pts_3d = utils.compute_numpy_boxes_3d(obj, calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        # Draw heading arrow
        ori3d_pts_2d, ori3d_pts_3d = utils.compute_numpy_orientation_3d(
            obj, calib.P)
        ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
        x1, y1, z1 = ori3d_pts_3d_velo[0, :]
        x2, y2, z2 = ori3d_pts_3d_velo[1, :]
        draw_gt_boxes3d([box3d_pts_3d_velo],
                        fig=fig,
                        color=color,
                        draw_text=False)
        mlab.plot3d([x1, x2], [y1, y2], [z1, z2],
                    color=(0.8, 0.8, 0.8),
                    tube_radius=None,
                    line_width=1,
                    figure=fig)
    # mlab.show(1)
    # mlab.view(azimuth=180, elevation=70, focalpoint=[12.0909996, -1.04700089, -2.03249991], distance='auto', figure=fig)
    mlab.view(azimuth=180,
              elevation=60,
              focalpoint=[12.0909996, -1.04700089, 5.03249991],
              distance=62.0,
              figure=fig)
    if save_figure:
        if save_figure_dir != '':
            save_figure_dir = os.path.join(root_dir(), save_figure_dir)
            print(save_figure_dir)
        if not os.path.exists(save_figure_dir):
            os.makedirs(save_figure_dir)
            print("done!!!!")
        filename = os.path.join(save_figure_dir, img_name)
        mlab.savefig(filename)
    time.sleep(0.03)
コード例 #26
0
def demo(data_idx=11,
         object_idx=0,
         show_images=True,
         show_lidar=True,
         show_lidar_2d=True,
         show_lidar_box=True,
         show_project=True,
         show_lidar_frustum=True):
    import mayavi.mlab as mlab
    from viz_util import draw_lidar, draw_lidar_simple, draw_gt_boxes3d
    dataset = kitti_object(os.path.join(ROOT_DIR, 'dataset/KITTI/object'))

    # Load data from dataset
    objects = dataset.get_label_objects(
        data_idx)  #objects = [Object3d(line) for line in lines]
    objects[object_idx].print_object()

    calib = dataset.get_calibration(
        data_idx)  #utils.Calibration(calib_filename)
    box2d = objects[object_idx].box2d
    xmin, ymin, xmax, ymax = box2d
    box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
    uvdepth = np.zeros((1, 3))
    uvdepth[0, 0:2] = box2d_center
    uvdepth[0, 2] = 20  # some random depth
    box2d_center_rect = calib.project_image_to_rect(uvdepth)
    frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                    box2d_center_rect[0, 0])
    print('frustum_angle:', frustum_angle)
    '''
    Type, truncation, occlusion, alpha: Pedestrian, 0, 0, -0.200000
    2d bbox (x0,y0,x1,y1): 712.400000, 143.000000, 810.730000, 307.920000
    3d bbox h,w,l: 1.890000, 0.480000, 1.200000
    3d bbox location, ry: (1.840000, 1.470000, 8.410000), 0.010000
    '''
    img = dataset.get_image(data_idx)  #(370, 1224, 3)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img_height, img_width, img_channel = img.shape
    print(('Image shape: ', img.shape))
    pc_velo = dataset.get_lidar(data_idx)[:, 0:3]  #(115384, 3)
    calib = dataset.get_calibration(
        data_idx)  #utils.Calibration(calib_filename)

    ## Draw lidar in rect camera coord
    #print(' -------- LiDAR points in rect camera coordination --------')
    #pc_rect = calib.project_velo_to_rect(pc_velo)
    #fig = draw_lidar_simple(pc_rect)
    #raw_input()
    # Draw 2d and 3d boxes on image
    if show_images:
        print(' -------- 2D/3D bounding boxes in images --------')
        show_image_with_boxes(img, objects, calib)
        raw_input()

    if show_lidar:
        # Show all LiDAR points. Draw 3d box in LiDAR point cloud
        print(
            ' -------- LiDAR points and 3D boxes in velodyne coordinate --------'
        )
        #show_lidar_with_boxes(pc_velo, objects, calib)
        #raw_input()
        show_lidar_with_boxes(pc_velo, objects, calib, True, img_width,
                              img_height)
        raw_input()

    if show_lidar_2d:
        # Visualize LiDAR points on images
        print(' -------- LiDAR points projected to image plane --------')
        show_lidar_on_image(pc_velo,
                            img,
                            calib,
                            img_width,
                            img_height,
                            showtime=True)
        raw_input()

    if show_lidar_box:
        # Show LiDAR points that are in the 3d box
        print(' -------- LiDAR points in a 3D bounding box --------')
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(
            objects[object_idx], calib.P)
        box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
        box3droi_pc_velo, _ = extract_pc_in_box3d(pc_velo, box3d_pts_3d_velo)
        print(('Number of points in 3d box: ', box3droi_pc_velo.shape[0]))

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        draw_lidar(box3droi_pc_velo, fig=fig)
        draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig)
        mlab.show(1)
        raw_input()

    if show_project:
        # UVDepth Image and its backprojection to point clouds
        print(' -------- LiDAR points in a frustum from a 2D box --------')
        imgfov_pc_velo, pts_2d, fov_inds = get_lidar_in_image_fov(
            pc_velo, calib, 0, 0, img_width, img_height, True)
        imgfov_pts_2d = pts_2d[fov_inds, :]
        imgfov_pc_rect = calib.project_velo_to_rect(imgfov_pc_velo)

        cameraUVDepth = np.zeros_like(imgfov_pc_rect)
        cameraUVDepth[:, 0:2] = imgfov_pts_2d
        cameraUVDepth[:, 2] = imgfov_pc_rect[:, 2]

        # Show that the points are exactly the same
        backprojected_pc_velo = calib.project_image_to_velo(cameraUVDepth)
        print(imgfov_pc_velo[0:20])
        print(backprojected_pc_velo[0:20])

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        draw_lidar(backprojected_pc_velo, fig=fig)
        raw_input()

    if show_lidar_frustum:
        # Only display those points that fall into 2d box
        print(' -------- LiDAR points in a frustum from a 2D box --------')
        xmin,ymin,xmax,ymax = \
            objects[object_idx].xmin, objects[object_idx].ymin, objects[object_idx].xmax, objects[object_idx].ymax
        boxfov_pc_velo = \
            get_lidar_in_image_fov(pc_velo, calib, xmin, ymin, xmax, ymax)
        print(('2d box FOV point num: ', boxfov_pc_velo.shape[0]))

        fig = mlab.figure(figure=None,
                          bgcolor=(0, 0, 0),
                          fgcolor=None,
                          engine=None,
                          size=(1000, 500))
        draw_lidar(boxfov_pc_velo, fig=fig)
        mlab.show(1)
        raw_input()
コード例 #27
0
def show_lidar_with_boxes(
    pc_velo,
    objects,
    calib,
    img_fov=False,
    img_width=None,
    img_height=None,
    objects_pred=None,
    depth=None,
    cam_img=None,
):
    """ Show all LiDAR points.
        Draw 3d box in LiDAR point cloud (in velo coord system) """
    if "mlab" not in sys.modules:
        import mayavi.mlab as mlab
    from viz_util import draw_lidar_simple, draw_lidar, draw_gt_boxes3d

    print(("All point num: ", pc_velo.shape[0]))
    fig = mlab.figure(figure=None,
                      bgcolor=(0, 0, 0),
                      fgcolor=None,
                      engine=None,
                      size=(1000, 500))
    if img_fov:
        pc_velo = get_lidar_in_image_fov(pc_velo[:, 0:3], calib, 0, 0,
                                         img_width, img_height)
        print(("FOV point num: ", pc_velo.shape[0]))
    print("pc_velo", pc_velo.shape)
    draw_lidar(pc_velo, fig=fig)
    # pc_velo=pc_velo[:,0:3]

    color = (0, 1, 0)
    for obj in objects:
        if obj.type == "Car":
            # Draw 3d bounding box
            box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
            box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
            print("box3d_pts_3d_velo:")
            print(box3d_pts_3d_velo)

            draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color)

            # Draw depth
            if depth is not None:
                # import pdb; pdb.set_trace()
                depth_pt3d = depth_region_pt3d(depth, obj)
                depth_UVDepth = np.zeros_like(depth_pt3d)
                depth_UVDepth[:, 0] = depth_pt3d[:, 1]
                depth_UVDepth[:, 1] = depth_pt3d[:, 0]
                depth_UVDepth[:, 2] = depth_pt3d[:, 2]
                print("depth_pt3d:", depth_UVDepth)
                dep_pc_velo = calib.project_image_to_velo(depth_UVDepth)
                print("dep_pc_velo:", dep_pc_velo)

                draw_lidar(dep_pc_velo, fig=fig, pts_color=(0, 0, 255))
            #

            # Draw heading arrow
            ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(
                obj, calib.P)
            ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
            x1, y1, z1 = ori3d_pts_3d_velo[0, :]
            x2, y2, z2 = ori3d_pts_3d_velo[1, :]
            mlab.plot3d(
                [x1, x2],
                [y1, y2],
                [z1, z2],
                color=color,
                tube_radius=None,
                line_width=1,
                figure=fig,
            )

    if objects_pred is not None:
        color = (1, 0, 0)
        for obj in objects_pred:
            if obj.type != "Car":
                continue
            # Draw 3d bounding box
            box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib.P)
            box3d_pts_3d_velo = calib.project_rect_to_velo(box3d_pts_3d)
            print("box3d_pts_3d_velo:")
            print(box3d_pts_3d_velo)
            draw_gt_boxes3d([box3d_pts_3d_velo], fig=fig, color=color)
            # Draw heading arrow
            ori3d_pts_2d, ori3d_pts_3d = utils.compute_orientation_3d(
                obj, calib.P)
            ori3d_pts_3d_velo = calib.project_rect_to_velo(ori3d_pts_3d)
            x1, y1, z1 = ori3d_pts_3d_velo[0, :]
            x2, y2, z2 = ori3d_pts_3d_velo[1, :]
            mlab.plot3d(
                [x1, x2],
                [y1, y2],
                [z1, z2],
                color=color,
                tube_radius=None,
                line_width=1,
                figure=fig,
            )
コード例 #28
0
def extract_proposal_data(idx_filename,
                          split,
                          output_filename,
                          viz=False,
                          perturb_box3d=False,
                          augmentX=1,
                          type_whitelist=['Car'],
                          kitti_path=os.path.join(ROOT_DIR,
                                                  'dataset/KITTI/object'),
                          pos_neg_ratio=None,
                          source='label'):
    ''' Extract point clouds and corresponding annotations in frustums
        defined generated from 2D bounding boxes
        Lidar points and 3d boxes are in *rect camera* coord system
        (as that in 3d box label files)

    Input:
        idx_filename: string, each line of the file is a sample ID
        split: string, either trianing or testing
        output_filename: string, the name for output .pickle file
        viz: bool, whether to visualize extracted data
        perturb_box3d: bool, whether to perturb the box2d
            (used for data augmentation in train set)
        augmentX: scalar, how many augmentations to have for each 2D box.
        type_whitelist: a list of strings, object types we are interested in.
    Output:
        None (will write a .pickle file to the disk)
    '''
    # import mayavi.mlab as mlab
    dataset = kitti_object_avod(kitti_path, split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = []  # int number
    box2d_list = []  # [xmin,ymin,xmax,ymax]
    box3d_list = []  # (8,3) array in rect camera coord
    input_list = []  # channel number = 4, xyz,intensity in rect camera coord
    label_list = []  # 1 for roi object, 0 for clutter
    type_list = []  # string e.g. Car
    heading_list = []  # ry (along y-axis in rect camera coord) radius of
    # (cont.) clockwise angle from positive x axis in velo coord.
    box3d_size_list = []  # array of l,w,h
    frustum_angle_list = []  # angle of 2d box center from pos x-axis
    roi_feature_list = []  # feature map crop for proposal region
    # type_idxs = {key: [] for key in type_whitelist} # idxs of each type
    type_idxs = {key: [] for key in type_whitelist[:3]}  # idxs of each type
    type_idxs['NonCar'] = []
    type_idxs['NonPeople'] = []

    prop_idx = 0
    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)  # 3 by 4 matrix
        # ground truth
        objects = dataset.get_label_objects(data_idx)
        # proposal boxes from avod
        if source == 'avod':
            proposals = dataset.get_proposals(data_idx,
                                              rpn_score_threshold=0.1,
                                              nms_iou_thres=0.8)
        else:
            proposals = []

        pc_velo = dataset.get_lidar(data_idx)
        pc_rect = np.zeros_like(pc_velo)
        pc_rect[:, 0:3] = calib.project_velo_to_rect(pc_velo[:, 0:3])
        pc_rect[:, 3] = pc_velo[:, 3]

        gt_boxes_xy = []
        gt_boxes_3d = []
        objects = filter(lambda obj: obj.type in type_whitelist, objects)
        for obj in objects:
            _, gt_corners_3d = utils.compute_box_3d(obj, calib.P)
            gt_boxes_xy.append(gt_corners_3d[:4, [0, 2]])
            gt_boxes_3d.append(gt_corners_3d)
            if source == 'label':
                # generate proposal from label to ensure recall
                true_prop = get_proposal_from_label(obj, calib, type_whitelist)
                proposals.append(true_prop)

        pos_proposals_in_frame = []
        neg_proposals_in_frame = []
        for prop_ in proposals:
            prop_corners_image_2d, prop_corners_3d = utils.compute_box_3d(
                prop_, calib.P)
            if prop_corners_image_2d is None:
                print('skip proposal behind camera')
                continue

            prop_box_xy = prop_corners_3d[:4, [0, 2]]
            # find corresponding label object
            obj_idx, iou_with_gt = find_match_label(prop_box_xy, gt_boxes_xy)

            if obj_idx == -1:
                # non-object
                obj_type = 'NonObject'
                gt_box_3d = np.zeros((8, 3))
                heading_angle = 0
                box3d_size = np.zeros((1, 3))
                frustum_angle = 0
                neg_proposals_in_frame.append(prop_corners_3d)

                # get points within proposal box
                _, prop_inds = extract_pc_in_box3d(pc_rect, prop_corners_3d)
                pc_in_prop_box = pc_rect[prop_inds, :]
                # shuffle points order
                np.random.shuffle(pc_in_prop_box)
                label = np.zeros((pc_in_prop_box.shape[0]))

                id_list.append(data_idx)
                # box2d_list.append(np.array([xmin,ymin,xmax,ymax]))
                box3d_list.append(gt_box_3d)
                input_list.append(pc_in_prop_box)
                label_list.append(label)
                type_list.append(obj_type)
                heading_list.append(heading_angle)
                box3d_size_list.append(box3d_size)
                frustum_angle_list.append(frustum_angle)
                roi_feature_list.append(prop_.roi_features)
                # type_idxs[obj_type].append(prop_idx)
                if prop_.l > 2 or prop_.w > 2:
                    type_idxs['NonCar'].append(prop_idx)
                else:
                    type_idxs['NonPeople'].append(prop_idx)
                prop_idx += 1
            else:
                # only do augmentation on objects
                for _ in range(augmentX):
                    prop = copy.deepcopy(prop_)
                    if perturb_box3d:
                        prop = random_shift_box3d(prop)
                    prop_corners_image_2d, prop_corners_3d = utils.compute_box_3d(
                        prop, calib.P)
                    if prop_corners_image_2d is None:
                        print('skip proposal behind camera')
                        continue
                    # get points within proposal box
                    _, prop_inds = extract_pc_in_box3d(pc_rect,
                                                       prop_corners_3d)
                    pc_in_prop_box = pc_rect[prop_inds, :]
                    # shuffle points order
                    np.random.shuffle(pc_in_prop_box)
                    # segmentation label
                    label = np.zeros((pc_in_prop_box.shape[0]))

                    obj = objects[obj_idx]
                    obj_type = obj.type
                    gt_box_3d = gt_boxes_3d[obj_idx]

                    _, inds = extract_pc_in_box3d(pc_in_prop_box, gt_box_3d)
                    label[inds] = 1
                    # Reject object without points
                    if np.sum(label) == 0:
                        print('Reject object without points')
                        continue
                    pos_proposals_in_frame.append(prop_corners_3d)
                    # Get 3D BOX heading
                    heading_angle = obj.ry
                    # Get 3D BOX size
                    box3d_size = np.array([obj.l, obj.w, obj.h])
                    # Get frustum angle
                    xmin, ymin = prop_corners_image_2d.min(0)
                    xmax, ymax = prop_corners_image_2d.max(0)
                    box2d_center = np.array([(xmin + xmax) / 2.0,
                                             (ymin + ymax) / 2.0])
                    uvdepth = np.zeros((1, 3))
                    uvdepth[0, 0:2] = box2d_center
                    uvdepth[0, 2] = 20  # some random depth
                    box2d_center_rect = calib.project_image_to_rect(uvdepth)
                    frustum_angle = -1 * np.arctan2(box2d_center_rect[0, 2],
                                                    box2d_center_rect[0, 0])

                    id_list.append(data_idx)
                    # box2d_list.append(np.array([xmin,ymin,xmax,ymax]))
                    box3d_list.append(gt_box_3d)
                    input_list.append(pc_in_prop_box)
                    label_list.append(label)
                    type_list.append(obj_type)
                    heading_list.append(heading_angle)
                    box3d_size_list.append(box3d_size)
                    frustum_angle_list.append(frustum_angle)
                    roi_feature_list.append(prop.roi_features)
                    type_idxs[obj_type].append(prop_idx)
                    prop_idx += 1
                    # visualize one proposal
                    if viz and False:
                        import mayavi.mlab as mlab
                        from viz_util import draw_lidar, draw_gt_boxes3d
                        fig = draw_lidar(pc_rect)
                        fig = draw_gt_boxes3d([gt_box_3d],
                                              fig,
                                              color=(1, 0, 0))
                        fig = draw_gt_boxes3d([prop_corners_3d],
                                              fig,
                                              draw_text=False,
                                              color=(1, 1, 1))
                        # roi_feature_map
                        roi_features_size = 7 * 7 * 32
                        img_roi_features = prop.roi_features[
                            0:roi_features_size].reshape((7, 7, -1))
                        bev_roi_features = prop.roi_features[
                            roi_features_size:].reshape((7, 7, -1))
                        img_roi_features = np.amax(img_roi_features, axis=-1)
                        bev_roi_features = np.amax(bev_roi_features, axis=-1)
                        fig1 = mlab.figure(figure=None,
                                           bgcolor=(0, 0, 0),
                                           fgcolor=None,
                                           engine=None,
                                           size=(500, 500))
                        fig2 = mlab.figure(figure=None,
                                           bgcolor=(0, 0, 0),
                                           fgcolor=None,
                                           engine=None,
                                           size=(500, 500))
                        mlab.imshow(img_roi_features,
                                    colormap='gist_earth',
                                    name='img_roi_features',
                                    figure=fig1)
                        mlab.imshow(bev_roi_features,
                                    colormap='gist_earth',
                                    name='bev_roi_features',
                                    figure=fig2)
                        # mlab.plot3d([0, box2d_center_rect[0][0]], [0, box2d_center_rect[0][1]], [0, box2d_center_rect[0][2]], color=(1,1,1), tube_radius=None, figure=fig)
                        raw_input()

        print('%d positive proposal in frame %d' %
              (len(pos_proposals_in_frame), data_idx))
        print('%d negative proposal in frame %d' %
              (len(neg_proposals_in_frame), data_idx))
        # draw all proposal in frame
        if viz:
            import mayavi.mlab as mlab
            from viz_util import draw_lidar, draw_gt_boxes3d
            fig = draw_lidar(pc_rect)
            fig = draw_gt_boxes3d(gt_boxes_3d, fig, color=(1, 1, 1))
            fig = draw_gt_boxes3d(pos_proposals_in_frame,
                                  fig,
                                  draw_text=False,
                                  color=(1, 0, 0))
            fig = draw_gt_boxes3d(neg_proposals_in_frame,
                                  fig,
                                  draw_text=False,
                                  color=(0, 1, 0))
            raw_input()

    if pos_neg_ratio is not None:
        keep_idxs = balance(type_idxs, pos_neg_ratio)
    else:
        keep_idxs = [idx for sublist in type_idxs.values() for idx in sublist]
    random.shuffle(keep_idxs)
    id_list = [id_list[i] for i in keep_idxs]
    box3d_list = [box3d_list[i] for i in keep_idxs]
    input_list = [input_list[i] for i in keep_idxs]
    label_list = [label_list[i] for i in keep_idxs]
    type_list = [type_list[i] for i in keep_idxs]
    heading_list = [heading_list[i] for i in keep_idxs]
    box3d_size_list = [box3d_size_list[i] for i in keep_idxs]
    frustum_angle_list = [frustum_angle_list[i] for i in keep_idxs]
    roi_feature_list = [roi_feature_list[i] for i in keep_idxs]
    print_statics(input_list, label_list, type_list, type_whitelist)

    with open(output_filename, 'wb') as fp:
        pickle.dump(id_list, fp)
        pickle.dump(box2d_list, fp)
        pickle.dump(box3d_list, fp)
        pickle.dump(input_list, fp)
        pickle.dump(label_list, fp)
        pickle.dump(type_list, fp)
        pickle.dump(heading_list, fp)
        pickle.dump(box3d_size_list, fp)
        pickle.dump(frustum_angle_list, fp)
        pickle.dump(roi_feature_list, fp)