Пример #1
0
def vis_point_cloud(file_num):
    # Get dataset information
    calib, objects, pc_upright_camera, img, pc_image_coord = load_data(dataset, file_num)

    gt_box3d_list, gt_box2d_list, gt_cls_list = [], [], []
    for obj in objects:
        if obj.classname not in WHITE_LIST: continue
        box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
        box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
        gt_box3d_list.append(box3d_pts_3d)
        gt_box2d_list.append(obj.box2d)
        gt_cls_list.append(obj.classname)

    # Visualize point cloud
    vtk_actors = []
    vtk_gt_boxes = []
    vtk_pc = vis.VtkPointCloud(pc_upright_camera)
    vtk_actors.append(vtk_pc.vtk_actor)

    # Visualize GT 3D boxes
    for box3d in gt_box3d_list:
        vtk_box3D = vis.vtk_box_3D(box3d)
        vtk_gt_boxes.append(vtk_box3D)

    # Visualize GT 2D boxes
    img_filename = os.path.join(IMG_DIR, '%06d.jpg' % file_num)
    vis.display_image(img_filename, gt_box2Ds=gt_box2d_list, gt_col=vis.Color.LightGreen)    

    print('\nTotal number of classes: %d' % len(objects))
    print('Image Size             : %s' % str(img.size))
    print('Point Cloud Size       : %d' % len(pc_upright_camera))

    return vtk_actors, { 'g': vtk_gt_boxes }
Пример #2
0
def evaluate_predictions(predictions, dataset, classes, test_dataset, test_classes):
    ps_list, _, segp_list, center_list, heading_cls_list, heading_res_list, size_cls_list, size_res_list, \
    rot_angle_list, score_list, cls_list, file_num_list, _, _ = predictions
    print('Number of PRED boxes: %d, Number of GT boxes: %d' % (len(segp_list), len(test_dataset)))

    # For detection evaluation
    pred_all = {}
    gt_all = {}
    ovthresh = 0.25

    # A) Get GT boxes
    # print('Constructing GT boxes...')
    for i in range(len(test_dataset)):
        img_id = test_dataset.idx_l[i]
        if img_id in gt_all: continue # All ready counted..
        gt_all[img_id] = []

        objects = dataset.get_label_objects(img_id)
        calib = dataset.get_calibration(img_id)
        for obj in objects:
            if obj.classname not in test_classes: continue
            box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
            box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
            box3d_pts_3d_flipped = np.copy(box3d_pts_3d)
            box3d_pts_3d_flipped[0:4,:] = box3d_pts_3d[4:,:]
            box3d_pts_3d_flipped[4:,:] = box3d_pts_3d[0:4,:]
            gt_all[img_id].append((obj.classname, box3d_pts_3d_flipped))

    # B) Get PRED boxes
    # print('Constructing PRED boxes...')
    for i in range(len(ps_list)):
        img_id = file_num_list[i] 
        classname = classes[cls_list[i]]
        if classname not in test_classes: raise Exception('Not supposed to have class: %s' % classname)
        center = center_list[i].squeeze()
        rot_angle = rot_angle_list[i]

        # Get heading angle and size
        heading_angle = roi_seg_box3d_dataset.class2angle(heading_cls_list[i], heading_res_list[i], NUM_HEADING_BIN)
        box_size = roi_seg_box3d_dataset.class2size(size_cls_list[i], size_res_list[i]) 
        corners_3d_pred = roi_seg_box3d_dataset.get_3d_box(box_size, heading_angle, center)
        corners_3d_pred = rotate_pc_along_y(corners_3d_pred, -rot_angle)

        if img_id not in pred_all:
            pred_all[img_id] = []
        pred_all[img_id].append((classname, corners_3d_pred, score_list[i]))

    # Compute AP
    rec, prec, ap = eval_det(pred_all, gt_all, ovthresh)
    mean_ap = np.mean([ap[classname] for classname in ap])

    return rec, prec, ap, mean_ap
Пример #3
0
def tf_main_points_in_hull():
    import os
    import numpy as np
    import sys
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    print(BASE_DIR)
    sys.path.append(os.path.join(BASE_DIR,'../utils'))
    import utils

    def get_2048_points(depth):
        num_point = depth.shape[0]
        pc_in_box_fov = depth[:, :3]
        if num_point > 2048:
            choice = np.random.choice(depth.shape[0], 2048, replace=False)
            pc_in_box_fov = depth[choice, :3]
        return pc_in_box_fov

    pc_in_box_fov = get_2048_points(np.loadtxt('/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt'))
    calib = utils.SUNRGBD_Calibration("/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt.calib")
    objects = utils.read_sunrgbd_label("/Users/jiangyy/projects/VoteNet/utils/test_datas/000001.txt.label")
    box_list = []
    for obj_idx in range(len(objects)):
        obj = objects[obj_idx]
        box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
        box_list.append(box3d_pts_3d)

    tf_points1 = tf.Variable(pc_in_box_fov, dtype=tf.float32)
    # print(box_list)
    e = np.array(box_list)
    tf_boxes1 = tf.Variable(np.array(box_list), dtype=tf.float32)
    tf_points2 = tf.expand_dims(tf_points1, axis=0)
    tf_boxes2 = tf.expand_dims(tf_boxes1, axis=0)
    tf_idx_in_hull = tf_points_in_hull(tf_points2, tf_boxes2)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        tf_idx_in_hull_1 = sess.run(tf_idx_in_hull)
        print(tf_idx_in_hull_1.shape)
    tf_idx_in_hull_2 = tf_idx_in_hull_1.sum(axis=2)

    import mayavi.mlab as mlab
    fig = mlab.figure(figure=None, bgcolor=(0.4, 0.4, 0.4), fgcolor=None, engine=None, size=(1000, 500))
    mlab.points3d(pc_in_box_fov[:, 0], pc_in_box_fov[:, 1], pc_in_box_fov[:, 2], tf_idx_in_hull_2[0], mode='point', colormap='gnuplot', scale_factor=1, figure=fig)
    mlab.points3d(0, 0, 0, color=(1, 1, 1), mode='sphere', scale_factor=0.2, figure=fig)
    sys.path.append(os.path.join(BASE_DIR, '../mayavi'))
    from viz_util import draw_gt_boxes3d
    draw_gt_boxes3d(box_list, fig, color=(1, 0, 0))
    mlab.orientation_axes()
    mlab.show()
Пример #4
0
def dataset_viz(show_frustum=False):
    sunrgbd = sunrgbd_object(data_dir)
    idxs = np.array(range(1, len(sunrgbd) + 1))
    np.random.shuffle(idxs)
    for idx in range(len(sunrgbd)):
        data_idx = idxs[idx]
        print('--------------------', data_idx)
        pc = sunrgbd.get_depth(data_idx)
        print(pc.shape)

        # Project points to image
        calib = sunrgbd.get_calibration(data_idx)
        uv, d = calib.project_upright_depth_to_image(pc[:, 0:3])
        print(uv)
        print(d)
        raw_input()

        import matplotlib.pyplot as plt
        cmap = plt.cm.get_cmap('hsv', 256)
        cmap = np.array([cmap(i) for i in range(256)])[:, :3] * 255

        img = sunrgbd.get_image(data_idx)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        for i in range(uv.shape[0]):
            depth = d[i]
            color = cmap[int(120.0 / depth), :]
            cv2.circle(img, (int(np.round(uv[i, 0])), int(np.round(uv[i, 1]))),
                       2,
                       color=tuple(color),
                       thickness=-1)
        Image.fromarray(img).show()
        raw_input()

        # Load box labels
        objects = sunrgbd.get_label_objects(data_idx)
        print(objects)
        raw_input()

        # Draw 2D boxes on image
        img = sunrgbd.get_image(data_idx)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        for i, obj in enumerate(objects):
            cv2.rectangle(img, (int(obj.xmin), int(obj.ymin)),
                          (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2)
            cv2.putText(img, '%d %s' % (i, obj.classname),
                        (max(int(obj.xmin), 15), max(int(obj.ymin), 15)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2)
        Image.fromarray(img).show()
        raw_input()

        # Draw 3D boxes on depth points
        box3d = []
        ori3d = []
        for obj in objects:
            corners_3d_image, corners_3d = utils.compute_box_3d(obj, calib)
            ori_3d_image, ori_3d = utils.compute_orientation_3d(obj, calib)
            print('Corners 3D: ', corners_3d)
            box3d.append(corners_3d)
            ori3d.append(ori_3d)
        raw_input()

        bgcolor = (0, 0, 0)
        fig = mlab.figure(figure=None,
                          bgcolor=bgcolor,
                          fgcolor=None,
                          engine=None,
                          size=(1600, 1000))
        mlab.points3d(pc[:, 0],
                      pc[:, 1],
                      pc[:, 2],
                      pc[:, 2],
                      mode='point',
                      colormap='gnuplot',
                      figure=fig)
        mlab.points3d(0,
                      0,
                      0,
                      color=(1, 1, 1),
                      mode='sphere',
                      scale_factor=0.2)
        draw_gt_boxes3d(box3d, fig=fig)
        for i in range(len(ori3d)):
            ori_3d = ori3d[i]
            x1, y1, z1 = ori_3d[0, :]
            x2, y2, z2 = ori_3d[1, :]
            mlab.plot3d([x1, x2], [y1, y2], [z1, z2],
                        color=(0.5, 0.5, 0.5),
                        tube_radius=None,
                        line_width=1,
                        figure=fig)
        mlab.orientation_axes()
        for i, obj in enumerate(objects):
            print('Orientation: ', i,
                  np.arctan2(obj.orientation[1], obj.orientation[0]))
            print('Dimension: ', i, obj.l, obj.w, obj.h)
        raw_input()

        if show_frustum:
            img = sunrgbd.get_image(data_idx)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            for i, obj in enumerate(objects):
                box2d_fov_inds = (uv[:, 0] < obj.xmax) & (
                    uv[:, 0] >= obj.xmin) & (uv[:, 1] <
                                             obj.ymax) & (uv[:, 1] >= obj.ymin)
                box2d_fov_pc = pc[box2d_fov_inds, :]
                img2 = np.copy(img)
                cv2.rectangle(img2, (int(obj.xmin), int(obj.ymin)),
                              (int(obj.xmax), int(obj.ymax)), (0, 255, 0), 2)
                cv2.putText(img2, '%d %s' % (i, obj.classname),
                            (max(int(obj.xmin), 15), max(int(obj.ymin), 15)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0, 0), 2)
                Image.fromarray(img2).show()

                fig = mlab.figure(figure=None,
                                  bgcolor=bgcolor,
                                  fgcolor=None,
                                  engine=None,
                                  size=(1000, 1000))
                mlab.points3d(box2d_fov_pc[:, 0],
                              box2d_fov_pc[:, 1],
                              box2d_fov_pc[:, 2],
                              box2d_fov_pc[:, 2],
                              mode='point',
                              colormap='gnuplot',
                              figure=fig)
                raw_input()
Пример #5
0
def extract_roi_seg(idx_filename,
                    split,
                    output_filename,
                    viz,
                    perturb_box2d=False,
                    augmentX=1,
                    type_whitelist=[
                        'bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                        'dresser', 'night_stand', 'bookshelf', 'bathtub'
                    ]):
    dataset = sunrgbd_object(
        '/home/haianyt/frustum-pointnets/sunrgbd/sunrgbd_data/matlab/SUNRGBDtoolbox/mysunrgbd',
        split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = []  # int number
    box2d_list = []  # [xmin,ymin,xmax,ymax]
    box3d_list = []  # (8,3) array in upright depth coord
    input_list = []  # channel number = 6, xyz,rgb in upright depth coord
    label_list = []  # 1 for roi object, 0 for clutter
    type_list = []  # string e.g. bed
    heading_list = [
    ]  # face of object angle, radius of clockwise angle from positive x axis in upright camera coord
    box3d_size_list = []  # array of l,w,h
    frustum_angle_list = [
    ]  # angle of 2d box center from pos x-axis (clockwise)

    pos_cnt = 0
    all_cnt = 0
    #debuglimit = 10
    for data_idx in data_idx_list:
        #   debuglimit -=1
        #  if debuglimit <=0:
        #     break
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)
        objects = dataset.get_label_objects(data_idx)
        pc_upright_depth = dataset.get_depth(data_idx)
        pc_upright_camera = np.zeros_like(pc_upright_depth)
        pc_upright_camera[:,
                          0:3] = calib.project_upright_depth_to_upright_camera(
                              pc_upright_depth[:, 0:3])
        pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
        if viz:
            mlab.points3d(pc_upright_camera[:, 0],
                          pc_upright_camera[:, 1],
                          pc_upright_camera[:, 2],
                          pc_upright_camera[:, 1],
                          mode='point')
            mlab.orientation_axes()
            raw_input()
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        pc_image_coord, _ = calib.project_upright_depth_to_image(
            pc_upright_depth)
        #print('PC image coord: ', pc_image_coord)

        for obj_idx in range(len(objects)):
            obj = objects[obj_idx]
            if obj.classname not in type_whitelist: continue

            print("object pass the type_whitelist augmentX = ", augmentX)

            # 2D BOX: Get pts rect backprojected
            box2d = obj.box2d
            for _ in range(augmentX):
                # Augment data by box2d perturbation
                if perturb_box2d:
                    xmin, ymin, xmax, ymax = random_shift_box2d(box2d)
                    print(xmin, ymin, xmax, ymax)
                else:
                    xmin, ymin, xmax, ymax = box2d
                box_fov_inds = (pc_image_coord[:, 0] <
                                xmax) & (pc_image_coord[:, 0] >= xmin) & (
                                    pc_image_coord[:, 1] <
                                    ymax) & (pc_image_coord[:, 1] >= ymin)
                pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
                # Get frustum angle (according to center pixel in 2D BOX)
                box2d_center = np.array([(xmin + xmax) / 2.0,
                                         (ymin + ymax) / 2.0])
                uvdepth = np.zeros((1, 3))
                uvdepth[0, 0:2] = box2d_center
                uvdepth[0, 2] = 20  # some random depth
                box2d_center_upright_camera = calib.project_image_to_upright_camerea(
                    uvdepth)
                print('UVdepth, center in upright camera: ', uvdepth,
                      box2d_center_upright_camera)
                frustum_angle = -1 * np.arctan2(
                    box2d_center_upright_camera[0, 2],
                    box2d_center_upright_camera[0, 0]
                )  # angle as to positive x-axis as in the Zoox paper
                print('Frustum angle: ', frustum_angle)

                # 3D BOX: Get pts velo in 3d box
                box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
                box3d_pts_3d = calib.project_upright_depth_to_upright_camera(
                    box3d_pts_3d)
                _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
                print(len(inds))
                label = np.zeros((pc_in_box_fov.shape[0]))
                label[inds] = 1
                # Get 3D BOX heading
                print('Orientation: ', obj.orientation)
                print('Heading angle: ', obj.heading_angle)
                # Get 3D BOX size
                box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h])
                print('Box3d size: ', box3d_size)
                print('Type: ', obj.classname)
                print('Num of point: ', pc_in_box_fov.shape[0])

                # Subsample points..
                num_point = pc_in_box_fov.shape[0]
                if num_point > 2048:
                    choice = np.random.choice(pc_in_box_fov.shape[0],
                                              2048,
                                              replace=False)
                    pc_in_box_fov = pc_in_box_fov[choice, :]
                    label = label[choice]
                # Reject object with too few points
                if np.sum(label) < 5:
                    continue

                id_list.append(data_idx)
                box2d_list.append(np.array([xmin, ymin, xmax, ymax]))
                box3d_list.append(box3d_pts_3d)
                input_list.append(pc_in_box_fov)
                label_list.append(label)
                type_list.append(obj.classname)
                heading_list.append(obj.heading_angle)
                box3d_size_list.append(box3d_size)
                frustum_angle_list.append(frustum_angle)

                # collect statistics
                pos_cnt += np.sum(label)
                all_cnt += pc_in_box_fov.shape[0]

                # VISUALIZATION
                if viz:
                    img2 = np.copy(img)
                    cv2.rectangle(img2, (int(obj.xmin), int(obj.ymin)),
                                  (int(obj.xmax), int(obj.ymax)), (0, 255, 0),
                                  2)
                    utils.draw_projected_box3d(img2, box3d_pts_2d)
                    Image.fromarray(img2).show()
                    p1 = input_list[-1]
                    seg = label_list[-1]
                    fig = mlab.figure(figure=None,
                                      bgcolor=(0.4, 0.4, 0.4),
                                      fgcolor=None,
                                      engine=None,
                                      size=(500, 500))
                    mlab.points3d(p1[:, 0],
                                  p1[:, 1],
                                  p1[:, 2],
                                  seg,
                                  mode='point',
                                  colormap='gnuplot',
                                  scale_factor=1,
                                  figure=fig)
                    mlab.points3d(0,
                                  0,
                                  0,
                                  color=(1, 1, 1),
                                  mode='sphere',
                                  scale_factor=0.2)
                    draw_gt_boxes3d([box3d_pts_3d], fig=fig)
                    mlab.orientation_axes()
                    raw_input()

    print('Average pos ratio: ', pos_cnt / float(all_cnt))
    print('Average npoints: ', float(all_cnt) / len(id_list))

    utils.save_zipped_pickle([
        id_list, box2d_list, box3d_list, input_list, label_list, type_list,
        heading_list, box3d_size_list, frustum_angle_list
    ], output_filename)
Пример #6
0
def process_object(calib,
                   obj,
                   pc_upright_camera,
                   img,
                   pc_image_coord,
                   perturb_box2d=False,
                   num_points=2048):
    # Augment data by box2d perturbation
    box2d = obj.box2d
    if perturb_box2d:
        xmin, ymin, xmax, ymax = random_shift_box2d(box2d)
        # print(xmin,ymin,xmax,ymax)
    else:
        xmin, ymin, xmax, ymax = box2d
    box2d = np.array([xmin, ymin, xmax, ymax])
    box_fov_inds = (pc_image_coord[:, 0] < xmax) & (
        pc_image_coord[:, 0] >= xmin) & (pc_image_coord[:, 1] <
                                         ymax) & (pc_image_coord[:, 1] >= ymin)
    pc_in_box_fov = pc_upright_camera[box_fov_inds, :]

    # Get frustum angle (according to center pixel in 2D BOX)
    box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
    uvdepth = np.zeros((1, 3))
    uvdepth[0, 0:2] = box2d_center
    uvdepth[0, 2] = 20  # some random depth
    box2d_center_upright_camera = calib.project_image_to_upright_camerea(
        uvdepth)
    #print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera)
    frustum_angle = -1 * np.arctan2(
        box2d_center_upright_camera[0, 2], box2d_center_upright_camera[
            0, 0])  # angle as to positive x-axis as in the Zoox paper
    #print('Frustum angle: ', frustum_angle)

    # 3D BOX: Get pts velo in 3d box
    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
    box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
    _, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
    # print(len(inds))
    label = np.zeros((pc_in_box_fov.shape[0]))
    label[inds] = 1

    # Get cropped image
    cropped_image = img[int(ymin):int(ymax), int(xmin):int(xmax), :]
    h, w, _ = img.shape
    img_dims = [h, w]

    # Get 3D BOX heading
    # print('Orientation: ', obj.orientation)
    # print('Heading angle: ', obj.heading_angle)

    # Get 3D BOX size
    box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h])
    # print('Box3d size: ', box3d_size)
    # print('Type: ', obj.classname)
    # print('Num of point: ', pc_in_box_fov.shape[0])

    # Subsample points..
    num_point = pc_in_box_fov.shape[0]
    if num_point > num_points:
        choice = np.random.choice(pc_in_box_fov.shape[0],
                                  num_points,
                                  replace=False)
        pc_in_box_fov = pc_in_box_fov[choice, :]
        label = label[choice]

    return box2d, box3d_pts_3d, cropped_image, pc_in_box_fov, label, box3d_size, frustum_angle, img_dims
Пример #7
0
def dataset_viz(show_frustum=False):  
    sunrgbd = sunrgbd_object(data_dir)
    idxs = np.array(range(1,len(sunrgbd)+1))
    np.random.shuffle(idxs)
    for idx in range(len(sunrgbd)):
        data_idx = idxs[idx]
        print('--------------------', data_idx)
        pc = sunrgbd.get_depth(data_idx)
        print(pc.shape)
        
        # Project points to image
        calib = sunrgbd.get_calibration(data_idx)
        uv,d = calib.project_upright_depth_to_image(pc[:,0:3])
        print(uv)
        print(d)
        raw_input()
        
        import matplotlib.pyplot as plt
        cmap = plt.cm.get_cmap('hsv', 256)
        cmap = np.array([cmap(i) for i in range(256)])[:,:3]*255
        
        img = sunrgbd.get_image(data_idx)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 
        for i in range(uv.shape[0]):
            depth = d[i]
            color = cmap[int(120.0/depth),:]
            cv2.circle(img, (int(np.round(uv[i,0])), int(np.round(uv[i,1]))), 2, color=tuple(color), thickness=-1)
        Image.fromarray(img).show() 
        raw_input()
        
        # Load box labels
        objects = sunrgbd.get_label_objects(data_idx)
        print(objects)
        raw_input()
        
        # Draw 2D boxes on image
        img = sunrgbd.get_image(data_idx)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 
        for i,obj in enumerate(objects):
            cv2.rectangle(img, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
            cv2.putText(img, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15), max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
        Image.fromarray(img).show()
        raw_input()
        
        # Draw 3D boxes on depth points
        box3d = []
        ori3d = []
        for obj in objects:
            corners_3d_image, corners_3d = utils.compute_box_3d(obj, calib)
            ori_3d_image, ori_3d = utils.compute_orientation_3d(obj, calib)
            print('Corners 3D: ', corners_3d)
            box3d.append(corners_3d)
            ori3d.append(ori_3d)
        raw_input()
        
        bgcolor=(0,0,0)
        fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1600, 1000))
        mlab.points3d(pc[:,0], pc[:,1], pc[:,2], pc[:,2], mode='point', colormap='gnuplot', figure=fig)
        mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2)
        draw_gt_boxes3d(box3d, fig=fig)
        for i in range(len(ori3d)):
            ori_3d = ori3d[i]
            x1,y1,z1 = ori_3d[0,:]
            x2,y2,z2 = ori_3d[1,:]
            mlab.plot3d([x1, x2], [y1, y2], [z1,z2], color=(0.5,0.5,0.5), tube_radius=None, line_width=1, figure=fig)
        mlab.orientation_axes()
        for i,obj in enumerate(objects):
            print('Orientation: ', i, np.arctan2(obj.orientation[1], obj.orientation[0]))
            print('Dimension: ', i, obj.l, obj.w, obj.h)
        raw_input()
       
        if show_frustum: 
            img = sunrgbd.get_image(data_idx)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 
            for i,obj in enumerate(objects):
                box2d_fov_inds = (uv[:,0]<obj.xmax) & (uv[:,0]>=obj.xmin) & (uv[:,1]<obj.ymax) & (uv[:,1]>=obj.ymin)
                box2d_fov_pc = pc[box2d_fov_inds, :]
                img2 = np.copy(img)
                cv2.rectangle(img2, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
                cv2.putText(img2, '%d %s'%(i,obj.classname), (max(int(obj.xmin),15), max(int(obj.ymin),15)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)
                Image.fromarray(img2).show()
                
                fig = mlab.figure(figure=None, bgcolor=bgcolor, fgcolor=None, engine=None, size=(1000, 1000))
                mlab.points3d(box2d_fov_pc[:,0], box2d_fov_pc[:,1], box2d_fov_pc[:,2], box2d_fov_pc[:,2], mode='point', colormap='gnuplot', figure=fig)
                raw_input()
Пример #8
0
def extract_roi_seg(idx_filename, split, output_filename, viz, perturb_box2d=False, augmentX=1, type_whitelist=['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']):
    dataset = sunrgbd_object('/home/rqi/Data/mysunrgbd', split)
    data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]

    id_list = [] # int number
    box2d_list = [] # [xmin,ymin,xmax,ymax]
    box3d_list = [] # (8,3) array in upright depth coord
    input_list = [] # channel number = 6, xyz,rgb in upright depth coord
    label_list = [] # 1 for roi object, 0 for clutter
    type_list = [] # string e.g. bed
    heading_list = [] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord
    box3d_size_list = [] # array of l,w,h
    frustum_angle_list = [] # angle of 2d box center from pos x-axis (clockwise)

    pos_cnt = 0
    all_cnt = 0
    for data_idx in data_idx_list:
        print('------------- ', data_idx)
        calib = dataset.get_calibration(data_idx)
        objects = dataset.get_label_objects(data_idx)
        pc_upright_depth = dataset.get_depth(data_idx)
        pc_upright_camera = np.zeros_like(pc_upright_depth)
        pc_upright_camera[:,0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:,0:3])
        pc_upright_camera[:,3:] = pc_upright_depth[:,3:]
        if viz:
            mlab.points3d(pc_upright_camera[:,0], pc_upright_camera[:,1], pc_upright_camera[:,2], pc_upright_camera[:,1], mode='point')
            mlab.orientation_axes()
            raw_input()
        img = dataset.get_image(data_idx)
        img_height, img_width, img_channel = img.shape
        pc_image_coord,_ = calib.project_upright_depth_to_image(pc_upright_depth)
        #print('PC image coord: ', pc_image_coord)

        for obj_idx in range(len(objects)):
            obj = objects[obj_idx]
            if obj.classname not in type_whitelist: continue

            # 2D BOX: Get pts rect backprojected 
            box2d = obj.box2d
            for _ in range(augmentX):
                try:
                    # Augment data by box2d perturbation
                    if perturb_box2d:
                        xmin,ymin,xmax,ymax = random_shift_box2d(box2d)
                        print(xmin,ymin,xmax,ymax)
                    else:
                        xmin,ymin,xmax,ymax = box2d
                    box_fov_inds = (pc_image_coord[:,0]<xmax) & (pc_image_coord[:,0]>=xmin) & (pc_image_coord[:,1]<ymax) & (pc_image_coord[:,1]>=ymin)
                    pc_in_box_fov = pc_upright_camera[box_fov_inds,:]
                    # Get frustum angle (according to center pixel in 2D BOX)
                    box2d_center = np.array([(xmin+xmax)/2.0, (ymin+ymax)/2.0])
                    uvdepth = np.zeros((1,3))
                    uvdepth[0,0:2] = box2d_center
                    uvdepth[0,2] = 20 # some random depth
                    box2d_center_upright_camera = calib.project_image_to_upright_camerea(uvdepth)
                    print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera)
                    frustum_angle = -1 * np.arctan2(box2d_center_upright_camera[0,2], box2d_center_upright_camera[0,0]) # angle as to positive x-axis as in the Zoox paper
                    print('Frustum angle: ', frustum_angle)
                    # 3D BOX: Get pts velo in 3d box
                    box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib) 
                    box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
                    _,inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
                    print(len(inds))
                    label = np.zeros((pc_in_box_fov.shape[0]))
                    label[inds] = 1
                    # Get 3D BOX heading
                    print('Orientation: ', obj.orientation)
                    print('Heading angle: ', obj.heading_angle)
                    # Get 3D BOX size
                    box3d_size = np.array([2*obj.l,2*obj.w,2*obj.h])
                    print('Box3d size: ', box3d_size)
                    print('Type: ', obj.classname)
                    print('Num of point: ', pc_in_box_fov.shape[0])
                    
                    # Subsample points..
                    num_point = pc_in_box_fov.shape[0]
                    if num_point > 2048:
                        choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
                        pc_in_box_fov = pc_in_box_fov[choice,:]
                        label = label[choice]
                    # Reject object with too few points
                    if np.sum(label) < 5:
                        continue

                    id_list.append(data_idx)
                    box2d_list.append(np.array([xmin,ymin,xmax,ymax]))
                    box3d_list.append(box3d_pts_3d)
                    input_list.append(pc_in_box_fov)
                    label_list.append(label)
                    type_list.append(obj.classname)
                    heading_list.append(obj.heading_angle)
                    box3d_size_list.append(box3d_size)
                    frustum_angle_list.append(frustum_angle)
    
                    # collect statistics
                    pos_cnt += np.sum(label)
                    all_cnt += pc_in_box_fov.shape[0]
       
                    # VISUALIZATION
                    if viz:
                        img2 = np.copy(img)
                        cv2.rectangle(img2, (int(obj.xmin),int(obj.ymin)), (int(obj.xmax),int(obj.ymax)), (0,255,0), 2)
                        utils.draw_projected_box3d(img2, box3d_pts_2d)
                        Image.fromarray(img2).show()
                        p1 = input_list[-1]
                        seg = label_list[-1] 
                        fig = mlab.figure(figure=None, bgcolor=(0.4,0.4,0.4), fgcolor=None, engine=None, size=(500, 500))
                        mlab.points3d(p1[:,0], p1[:,1], p1[:,2], seg, mode='point', colormap='gnuplot', scale_factor=1, figure=fig)
                        mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.2)
                        draw_gt_boxes3d([box3d_pts_3d], fig=fig)
                        mlab.orientation_axes()
                        raw_input()
                except:
                    pass

    print('Average pos ratio: ', pos_cnt/float(all_cnt))
    print('Average npoints: ', float(all_cnt)/len(id_list))

    utils.save_zipped_pickle([id_list,box2d_list,box3d_list,input_list,label_list,type_list,heading_list,box3d_size_list,frustum_angle_list],output_filename)
Пример #9
0
def vis_predictions3D(pred_files, gt_file, number_to_show=10, filenums=None):
    
    from roi_seg_box3d_dataset import class2type
    
    idx = 0
    COLS = number_to_show
    ap_infos = {}
    classes, file_nums, mean_box_ious, mean_seg_ious, box_ious, seg_ious = [], [], [], [], [], []
    vtk_pcs_with_col, vtk_pcs_wo_col, vtk_imgs, vtk_gt_boxes, vtk_pred_boxes, vtk_texts = [], [], [], [], [], []
    choices = []

    test_dataset = ROISegBoxDataset(WHITE_LIST, npoints=2048, 
                                    split='val', rotate_to_center=True, 
                                    overwritten_data_path=gt_file, 
                                    from_rgb_detection=False)

    for n, pred_file in enumerate(pred_files):
        # Lists of different items from predictions
        predictions = load_zipped_pickle(pred_file)
        ps_l, seg_gt_l, seg_pred_l, center_l, heading_cls_l, heading_res_l, size_cls_l, size_res_l, rot_angle_l, \
            score_l, cls_type_l, file_num_l, box2d_l, box3d_l = predictions
        if n == 0:
            # Choosing equal number of objects per class to display
            cls_types = []
            options = {}
            for i, cls_type in enumerate(cls_type_l):
                if not class2type[cls_type] in WHITE_LIST: continue
                if options.get(cls_type) is None:
                    options[cls_type] = [i]
                    cls_types.append(cls_type)
                else:
                    options[cls_type].append(i)

            # Make use of array_split to divide into fairly equal groups
            arr = np.array_split([1] * number_to_show, len(options.keys()))
            random.shuffle(arr)
            for i, group in enumerate(arr):
                cls_type = cls_types[i]
                choice_list = np.random.choice(options[cls_type], len(group), replace=False) #replace=True)
                choices.extend(choice_list)
            print('Number of objects in whitelist: %d' % len(options))

        # Compute overall statistics
        if not FLAGS.rgb_detection:
            print('==== Computing overall statistics for %s ====' % pred_file)
            from evaluate import evaluate_predictions, get_ap_info
            rec, prec, ap, mean_ap = evaluate_predictions(predictions, dataset, CLASSES, 
                                                          test_dataset, WHITE_LIST)
            ap['Mean AP'] = mean_ap
            for classname in ap.keys():
                if ap_infos.get(classname) is None: ap_infos[classname] = []
                ap_infos[classname].append('%11s: [%.1f]' % (classname, 100. * ap[classname]))

            box_iou_sum, seg_iou_sum = 0, 0
            for i in range(len(ps_l)):
                seg_gt = seg_gt_l[i]
                box3d = box3d_l[i]
                seg_pred = seg_pred_l[i]
                center = center_l[i]
                heading_cls = heading_cls_l[i]
                heading_res = heading_res_l[i]
                size_cls = size_cls_l[i]
                size_res = size_res_l[i]
                rot_angle = rot_angle_l[i]

                gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle)
                heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN)
                box_size = class2size(size_cls, size_res) 
                pred_box3d = get_3d_box(box_size, heading_angle, center)

                # Box IOU
                shift_arr = np.array([4,5,6,7,0,1,2,3])
                box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d)
                # Seg IOU
                seg_iou = get_seg_iou(seg_gt, seg_pred, 2)

                box_iou_sum += box_iou3d
                seg_iou_sum += seg_iou
            mean_box_iou = box_iou_sum / len(ps_l)
            mean_seg_iou = seg_iou_sum / len(ps_l)
            mean_box_ious.append(mean_box_iou)
            mean_seg_ious.append(mean_seg_iou)
             
        for i in choices:
            row, col = idx // COLS, idx % COLS
            idx += 1
            ps = ps_l[i]
            seg_pred = seg_pred_l[i]
            center = center_l[i]
            heading_cls = heading_cls_l[i]
            heading_res = heading_res_l[i]
            size_cls = size_cls_l[i]
            size_res = size_res_l[i]
            rot_angle = rot_angle_l[i]
            score = score_l[i]
            cls_type = cls_type_l[i]
            file_num = file_num_l[i]
            seg_gt = seg_gt_l[i] if not FLAGS.rgb_detection else []
            box2d = box2d_l[i]
            box3d = box3d_l[i] if not FLAGS.rgb_detection else []

            # Visualize point cloud (with and without color)
            vtk_pc_wo_col = vis.VtkPointCloud(ps)
            vtk_pc = vis.VtkPointCloud(ps, gt_points=seg_gt, pred_points=seg_pred)
            vis.vtk_transform_actor(vtk_pc_wo_col.vtk_actor, translate=(SEP*col,SEP*row,0))
            vis.vtk_transform_actor(vtk_pc.vtk_actor, translate=(SEP*col,SEP*row,0))
            vtk_pcs_wo_col.append(vtk_pc_wo_col.vtk_actor)
            vtk_pcs_with_col.append(vtk_pc.vtk_actor)

            # Visualize GT 3D box
            if FLAGS.rgb_detection:
                objects = dataset.get_label_objects(file_num)
                calib = dataset.get_calibration(file_num)
                for obj in objects:
                    if obj.classname not in WHITE_LIST: continue
                    box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
                    box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
                    box3d_pts_3d = rotate_pc_along_y(np.copy(box3d_pts_3d), rot_angle)
                    vtk_box3D = vis.vtk_box_3D(box3d_pts_3d, color=vis.Color.LightGreen)
                    vis.vtk_transform_actor(vtk_box3D, translate=(SEP*col,SEP*row,0))
                    vtk_gt_boxes.append(vtk_box3D)
            else:
                gt_box3d = rotate_pc_along_y(np.copy(box3d), rot_angle)
                vtk_gt_box3D = vis.vtk_box_3D(gt_box3d, color=vis.Color.LightGreen)
                vis.vtk_transform_actor(vtk_gt_box3D, translate=(SEP*col,SEP*row,0))
                vtk_gt_boxes.append(vtk_gt_box3D)

            # Visualize Pred 3D box
            heading_angle = class2angle(heading_cls, heading_res, NUM_HEADING_BIN)
            box_size = class2size(size_cls, size_res) 
            pred_box3d = get_3d_box(box_size, heading_angle, center)
            vtk_pred_box3D = vis.vtk_box_3D(pred_box3d, color=vis.Color.White)
            vis.vtk_transform_actor(vtk_pred_box3D, translate=(SEP*col,SEP*row,0))
            vtk_pred_boxes.append(vtk_pred_box3D)

            # Visualize Images
            box2d_col = vis.Color.LightGreen if not FLAGS.rgb_detection else vis.Color.Orange
            img_filename = os.path.join(IMG_DIR, '%06d.jpg' % file_num)
            vtk_img = vis.vtk_image(img_filename, box2Ds_list=[[box2d]], box2Ds_cols=[box2d_col])
            vis.vtk_transform_actor(vtk_img, scale=(IMG_SCALE,IMG_SCALE,IMG_SCALE), 
                                    rot=(0,180,180), translate=(-2+SEP*col,2+SEP*row,10))
            vtk_imgs.append(vtk_img)

            # Other information
            classes.append(class2type[cls_type].capitalize())
            file_nums.append(str(file_num))
            if not FLAGS.rgb_detection:
                shift_arr = np.array([4,5,6,7,0,1,2,3])
                box_iou3d, _ = box3d_iou(gt_box3d[shift_arr,:], pred_box3d)
                box_ious.append(box_iou3d)
                seg_iou = get_seg_iou(seg_gt, seg_pred, 2)
                seg_ious.append(seg_iou)

    # Visualize overall statistics
    vtk_texts.extend(vis.vtk_text([('Model: %s' % pred_file.split('/')[-1]) for pred_file in pred_files], arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,2.5,2)))
    vtk_texts.extend(vis.vtk_text(['Mean Box IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3,2)))
    vtk_texts.extend(vis.vtk_text(mean_box_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3,2)))
    vtk_texts.extend(vis.vtk_text(['Mean Seg IOU:'] * len(pred_files), arr_type='text', sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,3.5,2)))
    vtk_texts.extend(vis.vtk_text(mean_seg_ious, arr_type='float', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-10,3.5,2)))
    for i, (cls_name, ap_info) in enumerate(ap_infos.items()):
        vtk_texts.extend(vis.vtk_text(ap_info, arr_type='text', color=True, sep=SEP, cols=1, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-14.5,4+i*0.5,2)))

    # Visualize text information
    vtk_texts.extend(vis.vtk_text(['Class:'] * len(classes), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3,2)))
    vtk_texts.extend(vis.vtk_text(classes, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.5,3,2)))
    vtk_texts.extend(vis.vtk_text(['File:'] * len(file_nums), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,3.5,2)))
    vtk_texts.extend(vis.vtk_text(file_nums, arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0.25,3.5,2)))
    if not FLAGS.rgb_detection:
        vtk_texts.extend(vis.vtk_text(['Box:'] * len(box_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4,2)))
        vtk_texts.extend(vis.vtk_text(box_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4,2)))
        vtk_texts.extend(vis.vtk_text(['Seg:'] * len(seg_ious), arr_type='text', sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(-1.5,4.5,2)))
        vtk_texts.extend(vis.vtk_text(seg_ious, arr_type='float', color=True, sep=SEP, cols=COLS, scale=TEXT_SCALE, rot=TEXT_ROT, translate=(0,4.5,2)))

    key_to_actors_to_hide = { 'g': vtk_gt_boxes, 'p': vtk_pred_boxes, 'i': vtk_imgs, 'c': vtk_pcs_wo_col, 't': vtk_texts }
    return vtk_pcs_with_col, key_to_actors_to_hide
Пример #10
0
type_map = {}
for i in range(len(segp_list)):
    print " ---- %d/%d"%(i,len(segp_list))
    if score_list[i]<0.5: continue
    img_id = TEST_DATASET.id_list[i] 
    box2d = TEST_DATASET.box2d_list[i]

    objects = dataset.get_label_objects(img_id)
    calib = dataset.get_calibration(img_id)
    if img_id not in box3d_map:
        box3d_map[img_id] = []
        box2d_map[img_id] = []
        type_map[img_id] = []
    for obj in objects:
        if obj.classname not in ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']: continue
        box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
        box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
        box3d_map[img_id].append(box3d_pts_3d)
        box2d_map[img_id].append(obj.box2d)
        type_map[img_id].append(obj.classname)

    ps = ps_list[i]
    segp = segp_list[i].squeeze()
    center = center_list[i].squeeze()
    ret = TEST_DATASET[i]
    if FLAGS.from_rgb_detection:
        rot_angle = ret[1]
    else:
        rot_angle = ret[7]

    # Get heading angle and size
Пример #11
0
type_map = {}
for i in range(len(segp_list)):
    print " ---- %d/%d"%(i,len(segp_list))
    if score_list[i]<0.5: continue
    img_id = TEST_DATASET.id_list[i] 
    box2d = TEST_DATASET.box2d_list[i]

    objects = dataset.get_label_objects(img_id)
    calib = dataset.get_calibration(img_id)
    if img_id not in box3d_map:
        box3d_map[img_id] = []
        box2d_map[img_id] = []
        type_map[img_id] = []
    for obj in objects:
        if obj.classname not in ['bed','table','sofa','chair','toilet','desk','dresser','night_stand','bookshelf','bathtub']: continue
        box3d_pts_2d, box3d_pts_3d = compute_box_3d(obj, calib)
        box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
        box3d_map[img_id].append(box3d_pts_3d)
        box2d_map[img_id].append(obj.box2d)
        type_map[img_id].append(obj.classname)

    ps = ps_list[i]
    segp = segp_list[i].squeeze()
    center = center_list[i].squeeze()
    ret = TEST_DATASET[i]
    if FLAGS.from_rgb_detection:
        rot_angle = ret[1]
    else:
        rot_angle = ret[7]

    # Get heading angle and size